If you can use Terraform, it is the most straightforward way to create the resources required for the Operations Manager and Concourse deployment. However, if you are unable or do not wish to use Terraform, see the "Manual Resource Creation" tab to see a list of required resources to deploy an Operations Manager and a Concourse so that you may create those resources manually.

Create resources using Terraform

The paving repository contains Terraform templates for each supported IaaS: AWS, Azure, GCP, and vSphere. This includes infrastructure for the Operations Manager, BOSH Director, and Concourse.

  1. Clone the repo on the command line from the concourse-working-directory folder:

    git clone https://github.com/pivotal/paving.git
    
  2. In the checked out repository there are directories for each IaaS. Copy the Terraform templates for the infrastructure of your choice to a new directory outside of the paving repo, so you can modify it:

    # cp -Ra paving/${IAAS} paving-${IAAS}
    mkdir paving-${IAAS}
    cp -a paving/$IAAS/. paving-$IAAS
    cd paving-${IAAS}
    rm -f pas-*.tf
    rm -f pks-*.tf
    

    IAAS must be set to match one of the infrastructure directories at the top level of the paving repo - for example, aws, azure, gcp, or nsxt. This was done in Get your Working Directory and Shell Set up, but if you're in a new shell, you may need to do it again.

  3. Within the new directory, the terraform.tfvars.example file shows what values are required for that IaaS. Remove the .example from the name, and replace the examples with real values.

  4. You'll be extending the Terraform files from the paving repo with an additional file that defines resources for Concourse. Create a new concourse.tf file in the new directory and copy the following into it.

    For AWS:

    resource "aws_route53_record" "concourse" {
      name = "ci.${var.environment_name}.${data.aws_route53_zone.hosted.name}"
    
      zone_id = data.aws_route53_zone.hosted.zone_id
      type    = "A"
    
      alias {
        name                   = aws_lb.concourse.dns_name
        zone_id                = aws_lb.concourse.zone_id
        evaluate_target_health = true
      }
    }
    
    //create a load balancer for concourse
    resource "aws_lb" "concourse" {
      name                             = "${var.environment_name}-concourse-lb"
      load_balancer_type               = "network"
      enable_cross_zone_load_balancing = true
      subnets                          = aws_subnet.public-subnet[*].id
    }
    
    resource "aws_lb_listener" "concourse-tcp" {
      load_balancer_arn = aws_lb.concourse.arn
      port              = 443
      protocol          = "TCP"
    
      default_action {
        type             = "forward"
        target_group_arn = aws_lb_target_group.concourse-tcp.arn
      }
    }
    
    resource "aws_lb_listener" "concourse-ssh" {
      load_balancer_arn = aws_lb.concourse.arn
      port              = 2222
      protocol          = "TCP"
    
      default_action {
        type             = "forward"
        target_group_arn = aws_lb_target_group.concourse-ssh.arn
      }
    }
    
    resource "aws_lb_listener" "concourse-credhub" {
      load_balancer_arn = aws_lb.concourse.arn
      port              = 8844
      protocol          = "TCP"
    
      default_action {
        type             = "forward"
        target_group_arn = aws_lb_target_group.concourse-credhub.arn
      }
    }
    
    resource "aws_lb_listener" "concourse-uaa" {
      load_balancer_arn = aws_lb.concourse.arn
      port              = 8443
      protocol          = "TCP"
    
      default_action {
        type             = "forward"
        target_group_arn = aws_lb_target_group.concourse-uaa.arn
      }
    }
    
    resource "aws_lb_target_group" "concourse-tcp" {
      name     = "${var.environment_name}-concourse-tg-tcp"
      port     = 443
      protocol = "TCP"
      vpc_id   = aws_vpc.vpc.id
    
      health_check {
        protocol = "TCP"
      }
    }
    
    resource "aws_lb_target_group" "concourse-ssh" {
      name     = "${var.environment_name}-concourse-tg-ssh"
      port     = 2222
      protocol = "TCP"
      vpc_id   = aws_vpc.vpc.id
    
      health_check {
        protocol = "TCP"
      }
    }
    
    resource "aws_lb_target_group" "concourse-credhub" {
      name     = "${var.environment_name}-concourse-tg-credhub"
      port     = 8844
      protocol = "TCP"
      vpc_id   = aws_vpc.vpc.id
    
      health_check {
        protocol = "TCP"
      }
    }
    
    resource "aws_lb_target_group" "concourse-uaa" {
      name     = "${var.environment_name}-concourse-tg-uaa"
      port     = 8443
      protocol = "TCP"
      vpc_id   = aws_vpc.vpc.id
    
      health_check {
        protocol = "TCP"
      }
    }
    
    //create a security group for concourse
    resource "aws_security_group" "concourse" {
      name   = "${var.environment_name}-concourse-sg"
      vpc_id = aws_vpc.vpc.id
    
      ingress {
        cidr_blocks = var.ops_manager_allowed_ips
        protocol    = "tcp"
        from_port   = 443
        to_port     = 443
      }
    
      ingress {
        cidr_blocks = var.ops_manager_allowed_ips
        protocol    = "tcp"
        from_port   = 2222
        to_port     = 2222
      }
    
      ingress {
        cidr_blocks = var.ops_manager_allowed_ips
        protocol    = "tcp"
        from_port   = 8844
        to_port     = 8844
      }
    
      ingress {
        cidr_blocks = var.ops_manager_allowed_ips
        protocol    = "tcp"
        from_port   = 8443
        to_port     = 8443
      }
    
      egress {
        cidr_blocks = ["0.0.0.0/0"]
        protocol    = "-1"
        from_port   = 0
        to_port     = 0
      }
    
      tags = merge(
        var.tags,
        { "Name" = "${var.environment_name}-concourse-sg" },
      )
    }
    
    output "concourse_url" {
      value = aws_route53_record.concourse.name
    }
    

    For Azure:

    resource "azurerm_public_ip" "concourse" {
      name                         = "${var.environment_name}-concourse-lb"
      location                     = var.location
      resource_group_name          = azurerm_resource_group.platform.name
      allocation_method            = "Static"
      sku                          = "Basic"
    
      tags = {
        environment = var.environment_name
      }
    }
    
    resource "azurerm_lb" "concourse" {
      name                = "${var.environment_name}-concourse-lb"
      resource_group_name = azurerm_resource_group.platform.name
      location            = var.location
      sku                 = "Basic"
    
      frontend_ip_configuration {
        name                 = "${var.environment_name}-concourse-frontend-ip-configuration"
        public_ip_address_id = azurerm_public_ip.concourse.id
      }
    }
    
    resource "azurerm_lb_rule" "concourse-https" {
      name                = "${var.environment_name}-concourse-https"
      resource_group_name = azurerm_resource_group.platform.name
      loadbalancer_id     = azurerm_lb.concourse.id
    
      frontend_ip_configuration_name = "${var.environment_name}-concourse-frontend-ip-configuration"
      protocol                       = "TCP"
      frontend_port                  = 443
      backend_port                   = 443
    
      backend_address_pool_id = azurerm_lb_backend_address_pool.concourse.id
      probe_id                = azurerm_lb_probe.concourse-https.id
    }
    
    resource "azurerm_lb_probe" "concourse-https" {
      name                = "${var.environment_name}-concourse-https"
      resource_group_name = azurerm_resource_group.platform.name
      loadbalancer_id     = azurerm_lb.concourse.id
      protocol            = "TCP"
      port                = 443
    }
    
    resource "azurerm_lb_rule" "concourse-http" {
      name                = "${var.environment_name}-concourse-http"
      resource_group_name = azurerm_resource_group.platform.name
      loadbalancer_id     = azurerm_lb.concourse.id
    
      frontend_ip_configuration_name = "${var.environment_name}-concourse-frontend-ip-configuration"
      protocol                       = "TCP"
      frontend_port                  = 80
      backend_port                   = 80
    
      backend_address_pool_id = azurerm_lb_backend_address_pool.concourse.id
      probe_id                = azurerm_lb_probe.concourse-http.id
    }
    
    resource "azurerm_lb_probe" "concourse-http" {
      name                = "${var.environment_name}-concourse-http"
      resource_group_name = azurerm_resource_group.platform.name
      loadbalancer_id     = azurerm_lb.concourse.id
      protocol            = "TCP"
      port                = 80
    }
    
    resource "azurerm_lb_rule" "concourse-uaa" {
      name                = "${var.environment_name}-concourse-uaa"
      resource_group_name = azurerm_resource_group.platform.name
      loadbalancer_id     = azurerm_lb.concourse.id
    
      frontend_ip_configuration_name = "${var.environment_name}-concourse-frontend-ip-configuration"
      protocol                       = "TCP"
      frontend_port                  = 8443
      backend_port                   = 8443
    
      backend_address_pool_id = azurerm_lb_backend_address_pool.concourse.id
      probe_id                = azurerm_lb_probe.concourse-uaa.id
    }
    
    resource "azurerm_lb_probe" "concourse-uaa" {
      name                = "${var.environment_name}-concourse-uaa"
      resource_group_name = azurerm_resource_group.platform.name
      loadbalancer_id     = azurerm_lb.concourse.id
      protocol            = "TCP"
      port                = 8443
    }
    
    resource "azurerm_lb_rule" "concourse-credhub" {
      name                = "${var.environment_name}-concourse-credhub"
      resource_group_name = azurerm_resource_group.platform.name
      loadbalancer_id     = azurerm_lb.concourse.id
    
      frontend_ip_configuration_name = "${var.environment_name}-concourse-frontend-ip-configuration"
      protocol                       = "TCP"
      frontend_port                  = 8844
      backend_port                   = 8844
    
      backend_address_pool_id = azurerm_lb_backend_address_pool.concourse.id
      probe_id                = azurerm_lb_probe.concourse-credhub.id
    }
    
    resource "azurerm_lb_probe" "concourse-credhub" {
      name                = "${var.environment_name}-concourse-credhub"
      resource_group_name = azurerm_resource_group.platform.name
      loadbalancer_id     = azurerm_lb.concourse.id
      protocol            = "TCP"
      port                = 8844
    }
    
    resource "azurerm_network_security_rule" "concourse-credhub-platform-vms" {
      name                        = "${var.environment_name}-credhub"
      priority                    = 300
      direction                   = "Inbound"
      access                      = "Allow"
      protocol                    = "Tcp"
      source_port_range           = "*"
      destination_port_range      = "8844"
      source_address_prefix       = "*"
      destination_address_prefix  = "*"
      resource_group_name         = azurerm_resource_group.platform.name
      network_security_group_name = azurerm_network_security_group.platform-vms.name
    }
    
    resource "azurerm_network_security_rule" "concourse-uaa-platform-vms" {
      name                        = "${var.environment_name}-uaa"
      priority                    = 3001
      direction                   = "Inbound"
      access                      = "Allow"
      protocol                    = "Tcp"
      source_port_range           = "*"
      destination_port_range      = "8443"
      source_address_prefix       = "*"
      destination_address_prefix  = "*"
      resource_group_name         = azurerm_resource_group.platform.name
      network_security_group_name = azurerm_network_security_group.platform-vms.name
    }
    
    resource "azurerm_network_security_rule" "concourse-credhub-ops-manager" {
      name                        = "${var.environment_name}-credhub"
      priority                    = 300
      direction                   = "Inbound"
      access                      = "Allow"
      protocol                    = "Tcp"
      source_port_range           = "*"
      destination_port_range      = "8844"
      source_address_prefix       = "*"
      destination_address_prefix  = "*"
      resource_group_name         = azurerm_resource_group.platform.name
      network_security_group_name = azurerm_network_security_group.ops-manager.name
    }
    
    resource "azurerm_network_security_rule" "concourse-uaa-ops-manager" {
      name                        = "${var.environment_name}-uaa"
      priority                    = 3001
      direction                   = "Inbound"
      access                      = "Allow"
      protocol                    = "Tcp"
      source_port_range           = "*"
      destination_port_range      = "8443"
      source_address_prefix       = "*"
      destination_address_prefix  = "*"
      resource_group_name         = azurerm_resource_group.platform.name
      network_security_group_name = azurerm_network_security_group.ops-manager.name
    }
    
    resource "azurerm_lb_backend_address_pool" "concourse" {
      name                = "${var.environment_name}-concourse-backend-pool"
      resource_group_name = azurerm_resource_group.platform.name
      loadbalancer_id     = azurerm_lb.concourse.id
    }
    
    resource "azurerm_dns_a_record" "concourse" {
      name                = "ci.${var.environment_name}"
      zone_name           = data.azurerm_dns_zone.hosted.name
      resource_group_name = data.azurerm_dns_zone.hosted.resource_group_name
      ttl                 = "60"
      records             = [azurerm_public_ip.concourse.ip_address]
    
      tags = merge(
        var.tags,
        { name = "ci.${var.environment_name}" },
      )
    }
    
    output "concourse_url" {
      value  = "${azurerm_dns_a_record.concourse.name}.${azurerm_dns_a_record.concourse.zone_name}"
    }
    

    For GCP:

    resource "google_dns_record_set" "concourse" {
      name = "ci.${var.environment_name}.${data.google_dns_managed_zone.hosted-zone.dns_name}"
      type = "A"
      ttl  = 60
    
      managed_zone = var.hosted_zone
    
      rrdatas = [google_compute_address.concourse.address]
    }
    
    //create a load balancer for concourse
    resource "google_compute_address" "concourse" {
      name = "${var.environment_name}-concourse"
    }
    
    resource "google_compute_firewall" "concourse" {
      allow {
        ports    = ["443", "2222", "8000", "8443"]
        protocol = "tcp"
      }
    
      direction     = "INGRESS"
      name          = "${var.environment_name}-concourse-open"
      network       = google_compute_network.network.self_link
      source_ranges = ["0.0.0.0/0"]
      target_tags   = ["concourse"]
    }
    
    resource "google_compute_forwarding_rule" "concourse_credhub" {
      ip_address  = google_compute_address.concourse.address
      ip_protocol = "TCP"
      name        = "${var.environment_name}-concourse-credhub"
      port_range  = "8000-8000"
      target      = google_compute_target_pool.concourse_target_pool.self_link
    }
    
    resource "google_compute_forwarding_rule" "concourse_ssh" {
      ip_address  = google_compute_address.concourse.address
      ip_protocol = "TCP"
      name        = "${var.environment_name}-concourse-ssh"
      port_range  = "2222-2222"
      target      = google_compute_target_pool.concourse_target_pool.self_link
    }
    
    resource "google_compute_forwarding_rule" "concourse_tcp" {
      ip_address  = google_compute_address.concourse.address
      ip_protocol = "TCP"
      name        = "${var.environment_name}-concourse-tcp"
      port_range  = "443-443"
      target      = google_compute_target_pool.concourse_target_pool.self_link
    }
    
    resource "google_compute_forwarding_rule" "concourse_uaa" {
      ip_address  = google_compute_address.concourse.address
      ip_protocol = "TCP"
      name        = "${var.environment_name}-concourse-uaa"
      port_range  = "8443-8443"
      target      = google_compute_target_pool.concourse_target_pool.self_link
    }
    
    resource "google_compute_target_pool" "concourse_target_pool" {
      name = "${var.environment_name}-concourse"
    }
    
    output "concourse_url" {
      value = replace(replace("${google_dns_record_set.concourse.name}", "/\.$/", ""), "*.", "")
    }
    

    For vSphere + NSXT:

    resource "nsxt_lb_service" "concourse_lb_service" {
      description  = "concourse lb_service"
      display_name = "${var.environment_name}_concourse_lb_service"
    
      enabled           = true
      logical_router_id = nsxt_logical_tier1_router.t1_infrastructure.id
      virtual_server_ids = ["${nsxt_lb_tcp_virtual_server.concourse_lb_virtual_server.id}"]
      error_log_level   = "INFO"
      size              = "SMALL"
    
      depends_on        = ["nsxt_logical_router_link_port_on_tier1.t1_infrastructure_to_t0"]
    
      tag {
        scope = "terraform"
        tag   = var.environment_name
      }
    }
    
    resource "nsxt_ns_group" "concourse_ns_group" {
      display_name = "${var.environment_name}_concourse_ns_group"
    
      tag {
        scope = "terraform"
        tag   = var.environment_name
      }
    }
    
    resource "nsxt_lb_tcp_monitor" "concourse_lb_tcp_monitor" {
      display_name = "${var.environment_name}_concourse_lb_tcp_monitor"
      interval     = 5
      monitor_port  = 443
      rise_count    = 3
      fall_count    = 3
      timeout      = 15
    
      tag {
        scope = "terraform"
        tag   = var.environment_name
      }
    }
    
    resource "nsxt_lb_pool" "concourse_lb_pool" {
      description              = "concourse_lb_pool provisioned by Terraform"
      display_name             = "${var.environment_name}_concourse_lb_pool"
      algorithm                = "WEIGHTED_ROUND_ROBIN"
      min_active_members       = 1
      tcp_multiplexing_enabled = false
      tcp_multiplexing_number  = 3
      active_monitor_id        = "${nsxt_lb_tcp_monitor.concourse_lb_tcp_monitor.id}"
      snat_translation {
        type          = "SNAT_AUTO_MAP"
      }
      member_group {
        grouping_object {
          target_type = "NSGroup"
          target_id   = "${nsxt_ns_group.concourse_ns_group.id}"
        }
      }
    
      tag {
        scope = "terraform"
        tag   = var.environment_name
      }
    }
    
    resource "nsxt_lb_fast_tcp_application_profile" "tcp_profile" {
      display_name = "${var.environment_name}_concourse_fast_tcp_profile"
    
      tag {
        scope = "terraform"
        tag   = var.environment_name
      }
    }
    
    resource "nsxt_lb_tcp_virtual_server" "concourse_lb_virtual_server" {
      description                = "concourse lb_virtual_server provisioned by terraform"
      display_name               = "${var.environment_name}_concourse virtual server"
      application_profile_id     = "${nsxt_lb_fast_tcp_application_profile.tcp_profile.id}"
      ip_address                 = "${var.nsxt_lb_concourse_virtual_server_ip_address}"
      ports                       = ["443","8443","8844"]
      pool_id                    = "${nsxt_lb_pool.concourse_lb_pool.id}"
    
      tag {
        scope = "terraform"
        tag   = var.environment_name
      }
    }
    
    variable "nsxt_lb_concourse_virtual_server_ip_address" {
      default     = ""
      description = "IP Address for concourse loadbalancer"
      type        = "string"
    }
    
    output "concourse_url" {
      value = var.nsxt_lb_concourse_virtual_server_ip_address
    }
    
  5. With your variables and modifications in place, you can initialize Terraform which will download the required IaaS providers.

    terraform init
    
  6. Run terraform refresh to update the state with what currently exists on the Iaas.

    terraform refresh \
      -var-file=terraform.tfvars
    
  7. Next, you can run terraform plan to see what changes will be made to the infrastructure on the IaaS.

    terraform plan \
      -out=terraform.tfplan \
      -var-file=terraform.tfvars
    
  8. Finally, you can run terraform apply to create the required infrastructure on the IaaS.

    terraform apply \
      -parallelism=5 \
      terraform.tfplan
    
  9. Save off the output from terraform output stable_config as terraform-outputs.yml up a level in your working directory:

    terraform output --raw stable_config_opsmanager > ../terraform-outputs.yml
    

    Caution Terraform v0.14.3+ introduced backwards incompatible change.
    When using the terraform output command in v0.14.3+, the additional flag for --raw will be required. This ensures that output is not JSON encoding a JSON output value.
    terraform output --raw

  10. Export the CONCOURSE_URL from terraform output concourse_url

    export CONCOURSE_URL="$(terraform output concourse_url)"
    
  11. Return to your working directory for the next, post-terraform, steps:

    cd ..
    

Create resources manually

Required resources fall into one of the following categories:

  • "Collected": we expect that these resources are already created in your vCenter.

  • "Collected or Create": we expect that you may or may not already have these resources created already in your vCenter. If you do not have these resources already, they need to be created to continue.

  • "Determine": these resources may be defined based on other resources, local policy, or other factors.

  • "Remove": these resource names are created by the Terraform scripts. To proceed without error when you Deploy the Director, you will need to remove these values from the director-config.yml, as the director configuration included in this document assumes Terraform's outputs.

Resources that are named in a code block are used directly as variables in this documentation. Resources that are not in a code block are resources that are required for a successful Operations Manager, BOSH, or Concourse deployment, but are not used directly in a deploy script or config file.

Resources from Terraform template

These resources are based on the Terraform template in the paving repo. For additional context, see the appropriate templates for your IAAS.

For AWS

Collect

  • region: Region to deploy the Tanzu Operations Manager VM and BOSH VMs.

Collect or Create

  • access_key: Access key for creating the Ops Manager VM and BOSH VMs.
  • secret_key: Matching secret key to access_key for creating the Ops Manager VM and BOSH VMs.
  • ops_manager_key_pair_name: Keypair name with which to deploy the Ops Manager VM.
  • management_subnet_cidrs: List of CIDRs for the subnet to deploy the BOSH director and VMs (total: 3).
  • management_subnet_gateways: List of gateways for the subnet to deploy the BOSH director and VMs (total: 3).
  • management_subnet_ids: List of subnet IDs for the deploy of the BOSH director and VMs (total: 3).
  • management_subnet_reserved_ip_ranges: List of reserved IP ranges for the subnet to deploy the BOSH director and VMs (total: 3).
  • ops_manager_public_ip: Public IP to assign the Ops Manager VM.
  • ops_manager_security_group_id: ID of the security group to deploy the Ops Manager VM to.
  • ops_manager_ssh_private_key: Private SSH key with which to connect to the BOSH director.
  • ops_manager_ssh_public_key: Public key for ops_manager_ssh_private_key.
  • ops_manager_subnet_id: ID of the subnet to deploy the Ops Manager VM to.
  • ops_manager_dns: DNS entry for the Ops Manager VM. This will be used to connect to the Ops Manager from the command line.
  • ops_manager_iam_instance_profile_name: Instance profile name for the BOSH director. BOSH will use this to deploy VMs.
  • ops_manager_iam_user_access_key: IAM user access key for the BOSH director. BOSH will use this to deploy VMs.
  • ops_manager_iam_user_secret_key: IAM user secret key for the BOSH director. BOSH will use this to deploy VMs.
  • platform_vms_security_group_id: Security group that will be assigned to the BOSH director and deployed VMs.
  • vpc_id: ID of the VPC that will be assigned to the BOSH director and deployed VMs.
  • A DNS record for Concourse. This will be your $CONCOURSE_URL later in this guide.

Determine

  • environment_name: Arbitrary name with which to prefix the name of the Ops Manager. NOTE: when creating load balancers for Concourse, this name should be used as a prefix.

Remove

  • pas_subnet_cidrs
  • pas_subnet_gateways
  • pas_subnet_ids
  • pas_subnet_reserved_ip_ranges
  • pks_api_lb_security_group_id
  • pks_subnet_cidrs
  • pks_subnet_gateways
  • pks_subnet_ids
  • pks_subnet_reserved_ip_ranges
  • services_subnet_cidrs
  • services_subnet_gateways
  • services_subnet_ids
  • services_subnet_reserved_ip_ranges
  • From the vmextensions-configuration section, remove the ssh-lb-security-groups, tcp-lb-security-groups, web_lb_security_group_id, and pks-api-lb-security-groups sections.

For Azure

Collect

  • subscription_id: Subscription ID for the Azure Cloud.
  • tenant_id: Tenant ID for the Azure Cloud.
  • client_id: Client ID for the Tanzu Operations Manager, BOSH, and VMs to use.
  • client_secret: Client secret for the Operations Manager, BOSH, and VMs to use.

Collect or Create

  • management_subnet_cidr: CIDR for the subnet to deploy the BOSH director and VMs.
  • management_subnet_gateway: Gateway for the subnet to deploy the BOSH director and VMs.
  • management_subnet_name: Name of the subnet to deploy the BOSH director and VMs.
  • management_subnet_range: Reserved IP ranges for the subnet to deploy the BOSH director and VMs (excludes gateway).
  • management_subnet_id: ID of the subnet to deploy the BOSH director and VMs.
  • bosh_storage_account_name: Storage account for BOSH to store VMs.
  • network_name: Network name to deploy BOSH and VMs.
  • ops_manager_dns: DNS entry for the Ops Manager VM. This will be used to connect to the Ops Manager from the command line.
  • ops_manager_public_ip: Public IP to assign the Ops Manager VM.
  • ops_manager_ssh_private_key: Private SSH key with which to connect to the BOSH director.
  • ops_manager_ssh_public_key: Public key for ops_manager_ssh_private_key.
  • platform_vms_security_group_name: Security group to assign to BOSH and VMs.
  • resource_group_name: Resource group to deploy Ops Manager, BOSH, and VMs.
  • iaas_configuration_environment_azurecloud: Which Azure cloud to deploy to (default: AzureCloud).
  • ops_manager_container_name: Container to deploy the Ops Manager VM.
  • ops_manager_security_group_name: Security group to attach to the Ops Manager VM.
  • ops_manager_storage_account_name: Storage account name to associate with the Ops Manager VM.
  • A DNS record for Concourse. This will be your $CONCOURSE_URL later in this guide.

Determine

  • environment_name: Arbitrary name with which to prefix the name of the Ops Manager. NOTE: when creating load balancers for Concourse, this name should be used as a prefix.
  • location: Location to deploy the Ops Manager VM
  • ops_manager_private_ip: Private IP to assign to the Ops Manager VM

Remove

  • pas_subnet_name
  • pas_subnet_gateway
  • pas_subnet_cidr
  • pas_subnet_range
  • pks_api_application_security_group_name
  • pks_api_network_security_group_name
  • pks_subnet_cidr
  • pks_subnet_gateway
  • pks_subnet_name
  • pks_subnet_range
  • services_subnet_cidr
  • services_subnet_gateway
  • services_subnet_range
  • services_subnet_name
  • From the vmextensions-configuration section, remove the pks-api-lb-security-groups section.

For GCP

Collect

  • service_account_key: Service account key to deploy the Tanzu Operations Manager VM.
  • ops_manager_service_account_key: Service account key to deploy BOSH and VMs.
  • project: Project name to deploy Operations Manager, BOSH, and VMs.
  • region: Region to deploy Operations Manager, BOSH, and VMs.

Collect or Create

  • management_subnet_cidr: CIDR for the subnet to deploy the BOSH director and VMs.
  • management_subnet_gateway: Gateway for the subnet to deploy the BOSH director and VMs.
  • management_subnet_name: Name of the subnet to deploy the BOSH director and VMs.
  • management_subnet_reserved_ip_ranges: Reserved IP ranges for the subnet to deploy the BOSH director and VMs (excludes gateway).
  • availability_zones: List of availability zones to deploy Ops Manager, BOSH, and VMs (total: 3).
  • network_name: Network name to deploy BOSH and VMs.
  • ops_manager_dns: DNS entry for the Ops Manager VM. This will be used to connect to the Ops Manager from the command line.
  • ops_manager_public_ip: Public IP to assign the Ops Manager VM.
  • ops_manager_ssh_private_key: Private SSH key with which to connect to the BOSH director.
  • ops_manager_ssh_public_key: Public key for ops_manager_ssh_private_key.
  • A DNS record for Concourse. This will be your $CONCOURSE_URL later in this guide.

Determine

  • environment_name: Arbitrary name with which to prefix the name of the Ops Manager. NOTE: when creating load balancers for Concourse, this name should be used as a prefix.
  • platform_vms_tag: Tag to assign to VMs created by BOSH.

Remove

  • pas_subnet_cidr
  • pas_subnet_gateway
  • pas_subnet_name
  • pas_subnet_reserved_ip_ranges
  • pks_subnet_cidr
  • pks_subnet_gateway
  • pks_subnet_name
  • pks_subnet_reserved_ip_ranges
  • services_subnet_cidr
  • services_subnet_gateway
  • services_subnet_name
  • services_subnet_reserved_ip_ranges

For vSphere (without NSX-T)

Collect

  • vcenter_host: Hostname for the vCenter
  • vcenter_username: Username for logging in to the vCenter
  • vcenter_password: Password for logging in to the vCenter
  • vcenter_datacenter: Datacenter to deploy the Tanzu Operations Manager, Concourse, and associated VMs.
  • vcenter_cluster: Cluster to deploy the Operations Manager, Concourse, and associated VMs.
  • vcenter_datastore: Datastore to deploy the Operations Manager, Concourse, and associated VMs. This guide assumes the same persistent and ephemeral datastores. It also assumes your resource pool is in that datastore.
  • ops_manager_dns_servers: The address of your DNS server(s). These are comma separated.
  • ops_manager_ntp: NTP server to set server time on Operations Manager and the BOSH director.

Collect or Create

  • ops_manager_dns: DNS record for the Ops Manager VM
  • concourse_url: DNS record for the Concourse web instance
  • ops_manager_folder: Folder to store Ops Manager, BOSH, and its deployed VMs
  • ops_manager_public_ip: (OPTIONAL) This guide does not make use of the public IP. You will need this set if you want to interact with the Ops Manager outside of the defined private network.
  • vcenter_resource_pool: Resource Pool to deploy the Ops Manager, Concourse, and associated VMs
  • management_subnet_name: Name of the subnet to deploy the Ops Manager, Concourse, and associated VMs
  • management_subnet_gateway: Gateway of the management_subnet. This is typically the first IP of the subnet
  • management_subnet_cidr: Private CIDR of the management_subnet We recommend a /24 subnet CIDR.
  • ops_manager_ssh_private_key: A private key (such as might be generated by ssh-keygen) to ssh to the BOSH director.
  • ops_manager_ssh_public_key: The public key pair to ops_manager_ssh_private_key. This key is used when creating the Ops Manager VM.
  • A load balancer with the following ports open: 443, 8443, 8844. This load balancer should have an IP assigned to it. This IP address will be used as the $CONCOURSE_URL later in this guide.

Determine

  • management_subnet_reserved_ip_ranges: IP Addresses that will not be managed by BOSH. This range is typically x.x.x.1-x.x.x.10.
  • ops_manager_netmask: Netmask for the management_subnet
  • ops_manager_private_ip: Private IP for the Ops Manager VM. This is typically x.x.x.10
  • allow_unverified_ssl: Based on your local policies, this may be set to true or false. This is used by om vm-lifecycle CLI to communicate with vCenter when creating the Ops Manager VM.
  • disable_ssl_verification: Based on your local policies, this may be set to true or false. This is used by the BOSH director to create VMs on the vCenter.

Terraform outputs

To follow the rest of this guide, you will also need to create a vars file that contains all of the outputs that would have been created by Terraform.
For simplicity, we recommend naming this file terraform-outputs.yml.

For AWS

region: us-west-2
environment_name: test-env
access_key: AWESOMEACCESSKEY
secret_key: EQUALLYAWESOMESECRETKEY
ops_manager_key_pair_name: test-env-ops-manager-key
management_subnet_cidrs:
- 10.0.16.0/24
- 10.0.17.0/24
- 10.0.18.0/24
management_subnet_gateways:
- 10.0.16.1
- 10.0.17.1
- 10.0.18.1
management_subnet_ids:
- subnet-abcd
- subnet-efgh
- subnet-ijkl
management_subnet_reserved_ip_ranges:
- 10.0.16.1-10.0.16.9
- 10.0.17.1-10.0.17.9
- 10.0.18.1-10.0.18.9
ops_manager_public_ip: 1.2.3.4
ops_manager_security_group_id: sg-abc123
ops_manager_ssh_private_key: |
  -----BEGIN RSA PRIVATE KEY-----
  yourprivatekeyshouldnotlooklikethis
  buteveryoneskeyisdifferentsowhknows
  -----END RSA PRIVATE KEY-----
ops_manager_ssh_public_key: ssh-rsa yourpublickeyallonasingleline
ops_manager_subnet_id: subnet-1234abcd
ops_manager_dns: opsmanager-hostname.example.com
ops_manager_iam_instance_profile_name: test-env-ops-manager
ops_manager_iam_user_access_key: OPSMANAGERACCESSKEY
ops_manager_iam_user_secret_key: OPSMANAGERSECRETKEY
platform_vms_security_group_id: sg-def456
vpc_id: vpc-1234abcd

For Azure

bosh_storage_account_name: abcdefg
client_id: 1234-5678-abcd-efgh
client_secret: abcd1234
environment_name: test-env
iaas_configuration_environment_azurecloud: AzureCloud
location: West US 2
management_subnet_cidr: 10.0.16.0/26
management_subnet_gateway: 10.0.16.1
management_subnet_id: /subscriptions/abcd-1234-efgh-5678-ijkl/resourceGroups/test-env-rg/providers/Microsoft.Network/virtualNetworks/test-env-network/subnets/test-env-subnet
management_subnet_name: test-env-management-subnet
management_subnet_range: 10.0.16.10
network_name: test-env-network
ops_manager_container_name: opsmanagerimage
ops_manager_dns: opsmanager-hostname.example.com
ops_manager_private_ip: 5.6.7.8
ops_manager_public_ip: 1.2.3.4
ops_manager_security_group_name: test-env-sg
ops_manager_ssh_private_key: |
  -----BEGIN RSA PRIVATE KEY-----
  yourprivatekeyshouldnotlooklikethis
  buteveryoneskeyisdifferentsowhknows
  -----END RSA PRIVATE KEY-----
ops_manager_ssh_public_key: ssh-rsa yourpublickeyallonasingleline
ops_manager_storage_account_name: abcdefghijk
platform_vms_security_group_name: test-env-vms-sg
resource_group_name: test-env-rg
subscription_id: abcd-1234-efgh-5678-ijkl
tenant_id: lkji-8765-hgfe-4321-dcba

For GCP

availability_zones:
- us-central1-a
- us-central1-b
- us-central1-c
environment_name: test-env
management_subnet_cidr: 10.0.16.0/26
management_subnet_gateway: 10.0.16.1
management_subnet_name: test-env-management-subnet
management_subnet_reserved_ip_ranges: 10.0.16.1-10.0.16.9
network_name: test-env-network
ops_manager_dns: opsmanager-hostname.example.com
ops_manager_public_ip: 1.2.3.4
ops_manager_service_account_key: |
  {
    "type": "service_account",
    "project_id": "global-project",
    "private_key_id": "123456789abcdefg",
    "private_key": "-----BEGIN PRIVATE KEY-----\nyourprivatekeyshouldnotlooklikethis\nbuteveryoneskeyisdifferentsowhknows\n-----END PRIVATE KEY-----\n",
    "client_email": "[email protected]",
    "client_id": "0987654321234567890",
    "auth_uri": "https://accounts.google.com/o/oauth2/auth",
    "token_uri": "https://oauth2.googleapis.com/token",
    "auth_provider_x509_cert_url": "https://www.googleapis.com/oauth2/v1/certs",
    "client_x509_cert_url": "https://www.googleapis.com/robot/v1/metadata/x509/test-env-ops-manager%test-env.iam.gserviceaccount.com"
  }
ops_manager_ssh_private_key: |
  -----BEGIN RSA PRIVATE KEY-----
  yourprivatekeyshouldnotlooklikethis
  buteveryoneskeyisdifferentsowhknows
  -----END RSA PRIVATE KEY-----
ops_manager_ssh_public_key: ssh-rsa yourpublickeyallonasingleline
platform_vms_tag: test-env-vms
project: global-project
region: us-central1
service_account_key: |
  {
    "type": "service_account",
    "project_id": "global-project",
    "private_key_id": "abcdefg1234567",
    "private_key": "-----BEGIN PRIVATE KEY-----\nyourprivatekeyshouldnotlooklikethis\nbuteveryoneskeyisdifferentsowhknows\n-----END PRIVATE KEY-----\n",
    "client_email": "[email protected]",
    "client_id": "1234567890987654321",
    "auth_uri": "https://accounts.google.com/o/oauth2/auth",
    "token_uri": "https://oauth2.googleapis.com/token",
    "auth_provider_x509_cert_url": "https://www.googleapis.com/oauth2/v1/certs",
    "client_x509_cert_url": "https://www.googleapis.com/robot/v1/metadata/x509/global%project.iam.gserviceaccount.com"
  }

For vSphere (without NSX-T)

allow_unverified_ssl: false
disable_ssl_verification: false
management_subnet_cidr: 195.168.1.0/24
management_subnet_gateway: 195.168.1.1
management_subnet_name: opsmanager-deploy-subnet
management_subnet_reserved_ip_ranges: 195.168.1.1-195.168.1.10
ops_manager_dns: opsmanager-hostname.example.com
ops_manager_dns_servers: 1.2.3.4,5.6.7.8
ops_manager_folder: /Datacenter/vm/vm-folder-name
ops_manager_netmask: 255.255.255.0
ops_manager_ntp: time-a-b.nist.gov
ops_manager_private_ip: 195.168.1.10

# Optional in this documentation
# May be required in your opsman.yml depending on your network setup
# ops_manager_public_ip: 10.193.63.25

ops_manager_ssh_private_key: |
  -----BEGIN RSA PRIVATE KEY-----
  yourprivatekeyshouldnotlooklikethis
  buteveryoneskeyisdifferentsowhknows
  -----END RSA PRIVATE KEY-----
ops_manager_ssh_public_key: ssh-rsa yourpublickeyallonasingleline
vcenter_cluster: Cluster
vcenter_datacenter: Datacenter
vcenter_datastore: Datastore
vcenter_host: vcenter-url.example.com
vcenter_password: password
vcenter_resource_pool: resource-pool
vcenter_username: username


Next Steps

When you have created the required IaaS infrastructure, see Deploying and preparing the Director.

check-circle-line exclamation-circle-line close-line
Scroll to top icon