예제 #1
0
  def _Create(self) -> None:
    # creating database
    database_input = {
        'Name': self.db_name,
        'Description': '\n'.join(
            f'{k}={v}' for k, v in util.MakeDefaultTags().items()),
    }
    cmd = util.AWS_PREFIX + [
        'glue',
        'create-database',
        '--database-input', json.dumps(database_input),
        f'--region={self.region}',
    ]
    vm_util.IssueCommand(cmd)

    targets = {'S3Targets': [{'Path': self.data_discovery_path}]}
    if self.sample_size is not None:
      targets['S3Targets'][0]['SampleSize'] = self.sample_size

    # creating crawler
    cmd = util.AWS_PREFIX + [
        'glue',
        'create-crawler',
        '--name', self.crawler_name,
        '--role', self.role,
        '--database-name', self.db_name,
        '--targets', json.dumps(targets),
        '--region', self.region,
        '--tags', ','.join(
            f'{k}={v}' for k, v in util.MakeDefaultTags().items()),
    ]
    vm_util.IssueCommand(cmd)
예제 #2
0
    def _Create(self):
        """Creates the cluster."""
        cmd = [
            'aws', 'elasticache', 'create-replication-group', '--engine',
            'redis', '--engine-version', self.version,
            '--replication-group-id', self.cluster_name,
            '--replication-group-description', self.cluster_name, '--region',
            self.redis_region, '--cache-node-type', self.node_type,
            '--cache-subnet-group-name', self.subnet_group_name,
            '--preferred-cache-cluster-a-zs', self.spec.client_vm.zone
        ]

        if self.failover_style == cloud_redis.Failover.FAILOVER_SAME_REGION:
            cmd += [self.failover_zone]

        elif self.failover_style == cloud_redis.Failover.FAILOVER_SAME_ZONE:
            cmd += [self.spec.client_vm.zone]

        if self.failover_style != cloud_redis.Failover.FAILOVER_NONE:
            cmd += [
                '--automatic-failover-enabled', '--num-cache-clusters', '2'
            ]

        cmd += ['--tags']
        cmd += util.FormatTags(util.MakeDefaultTags())
        vm_util.IssueCommand(cmd)
  def ImportKeyfile(cls, region):
    """Imports the public keyfile to AWS."""
    with cls._lock:
      if _GetKeyfileSetKey(region) in cls.imported_keyfile_set:
        return
      cat_cmd = ['cat',
                 vm_util.GetPublicKeyPath()]
      keyfile, _ = vm_util.IssueRetryableCommand(cat_cmd)
      formatted_tags = util.FormatTagSpecifications('key-pair',
                                                    util.MakeDefaultTags())
      import_cmd = util.AWS_PREFIX + [
          'ec2', '--region=%s' % region,
          'import-key-pair',
          '--key-name=%s' % cls.GetKeyNameForRun(),
          '--public-key-material=%s' % keyfile,
          '--tag-specifications=%s' % formatted_tags,
      ]
      _, stderr, retcode = vm_util.IssueCommand(
          import_cmd, raise_on_failure=False)
      if retcode:
        if 'KeyPairLimitExceeded' in stderr:
          raise errors.Benchmarks.QuotaFailure(
              'KeyPairLimitExceeded in %s: %s' % (region, stderr))
        else:
          raise errors.Benchmarks.PrepareException(stderr)

      cls.imported_keyfile_set.add(_GetKeyfileSetKey(region))
      if _GetKeyfileSetKey(region) in cls.deleted_keyfile_set:
        cls.deleted_keyfile_set.remove(_GetKeyfileSetKey(region))
 def _Create(self):
   """Creates the control plane and worker nodes."""
   tags = util.MakeDefaultTags()
   eksctl_flags = {
       'kubeconfig': FLAGS.kubeconfig,
       'managed': True,
       'name': self.name,
       'nodegroup-name': 'eks',
       'nodes': self.num_nodes,
       'nodes-min': self.min_nodes,
       'nodes-max': self.max_nodes,
       'node-type': self.vm_config.machine_type,
       'node-volume-size': self.vm_config.boot_disk_size,
       'region': self.region,
       'tags': ','.join('{}={}'.format(k, v) for k, v in tags.items()),
       'ssh-public-key':
           aws_virtual_machine.AwsKeyFileManager.GetKeyNameForRun(),
       'version': self.cluster_version,
       # NAT mode uses an EIP.
       'vpc-nat-mode': 'Disable',
       'zones': ','.join(self.zones),
   }
   cmd = [FLAGS.eksctl, 'create', 'cluster'] + sorted(
       '--{}={}'.format(k, v) for k, v in eksctl_flags.items() if v)
   vm_util.IssueCommand(cmd, timeout=1800)
예제 #5
0
    def _Create(self):
        """Creates the Placement Group."""
        formatted_tags = util.FormatTagSpecifications('placement-group',
                                                      util.MakeDefaultTags())

        create_cmd = util.AWS_PREFIX + [
            'ec2', 'create-placement-group',
            '--region=%s' % self.region,
            '--group-name=%s' % self.name,
            '--strategy=%s' % self.strategy,
            '--tag-specifications=%s' % formatted_tags
        ]

        vm_util.IssueCommand(create_cmd)
    def _Create(self):
        """Creates the cluster."""
        name = 'pkb_' + FLAGS.run_uri
        logs_bucket = FLAGS.aws_emr_loguri or self._CreateLogBucket()

        instance_groups = []
        for group_type, group_spec in [('CORE', self.spec.worker_group),
                                       ('MASTER', self.spec.master_group)]:
            instance_properties = {
                'InstanceCount': group_spec.vm_count,
                'InstanceGroupType': group_type,
                'InstanceType': group_spec.vm_spec.machine_type,
                'Name': group_type + ' group'
            }
            if group_spec.disk_spec:
                # Make sure nothing we are ignoring is included in the disk spec
                assert group_spec.disk_spec.device_path is None
                assert group_spec.disk_spec.disk_number is None
                assert group_spec.disk_spec.mount_point is None
                assert group_spec.disk_spec.iops is None
                ebs_configuration = {
                    'EbsBlockDeviceConfigs': [{
                        'VolumeSpecification': {
                            'SizeInGB': group_spec.disk_spec.disk_size,
                            'VolumeType': group_spec.disk_spec.disk_type
                        },
                        'VolumesPerInstance':
                        group_spec.disk_spec.num_striped_disks
                    }]
                }
                instance_properties.update(
                    {'EbsConfiguration': ebs_configuration})
            instance_groups.append(instance_properties)

        # we need to store the cluster id.
        cmd = self.cmd_prefix + [
            'emr', 'create-cluster', '--name', name, '--release-label',
            RELEASE_LABEL, '--use-default-roles', '--instance-groups',
            json.dumps(instance_groups), '--application', 'Name=Spark',
            'Name=Hadoop', '--log-uri', logs_bucket
        ]
        if self.network:
            cmd += ['--ec2-attributes', 'SubnetId=' + self.network.subnet.id]
        stdout, _, _ = vm_util.IssueCommand(cmd)
        result = json.loads(stdout)
        self.cluster_id = result['ClusterId']
        logging.info('Cluster created with id %s', self.cluster_id)
        for tag_key, tag_value in util.MakeDefaultTags().items():
            self._AddTag(tag_key, tag_value)
예제 #7
0
 def _GetNodeFlags(self, node_group: str, num_nodes: int,
                   vm_config) -> Dict[str, Any]:
     """Get common flags for creating clusters and node_groups."""
     tags = util.MakeDefaultTags()
     return {
         'nodes':
         num_nodes,
         'node-labels':
         f'pkb_nodepool={node_group}',
         'node-type':
         vm_config.machine_type,
         'node-volume-size':
         vm_config.boot_disk_size,
         'region':
         self.region,
         'tags':
         ','.join(f'{k}={v}' for k, v in tags.items()),
         'ssh-public-key':
         aws_virtual_machine.AwsKeyFileManager.GetKeyNameForRun(),
     }
예제 #8
0
    def _Create(self):
        """Creates the cluster."""
        name = 'pkb_' + FLAGS.run_uri

        # Set up ebs details if disk_spec is present in the config
        ebs_configuration = None
        if self.spec.worker_group.disk_spec:
            # Make sure nothing we are ignoring is included in the disk spec
            assert self.spec.worker_group.disk_spec.device_path is None
            assert self.spec.worker_group.disk_spec.disk_number is None
            assert self.spec.worker_group.disk_spec.iops is None
            self.dpb_hdfs_type = disk_to_hdfs_map[
                self.spec.worker_group.disk_spec.disk_type]
            if self.spec.worker_group.disk_spec.disk_type != disk.LOCAL:
                ebs_configuration = {
                    'EbsBlockDeviceConfigs': [{
                        'VolumeSpecification': {
                            'SizeInGB':
                            self.spec.worker_group.disk_spec.disk_size,
                            'VolumeType':
                            self.spec.worker_group.disk_spec.disk_type
                        },
                        'VolumesPerInstance':
                        self.spec.worker_group.disk_count
                    }]
                }

        # Create the specification for the master and the worker nodes
        instance_groups = []
        core_instances = {
            'InstanceCount': self.spec.worker_count,
            'InstanceGroupType': 'CORE',
            'InstanceType': self.spec.worker_group.vm_spec.machine_type
        }
        if ebs_configuration:
            core_instances.update({'EbsConfiguration': ebs_configuration})

        master_instance = {
            'InstanceCount': 1,
            'InstanceGroupType': 'MASTER',
            'InstanceType': self.spec.worker_group.vm_spec.machine_type
        }
        if ebs_configuration:
            master_instance.update({'EbsConfiguration': ebs_configuration})

        instance_groups.append(core_instances)
        instance_groups.append(master_instance)

        # Spark SQL needs to access Hive
        cmd = self.cmd_prefix + [
            'emr', 'create-cluster', '--name', name, '--release-label',
            self.dpb_version, '--use-default-roles', '--instance-groups',
            json.dumps(instance_groups), '--application', 'Name=Spark',
            'Name=Hadoop', 'Name=Hive', '--log-uri', self.base_dir
        ]

        ec2_attributes = [
            'KeyName=' +
            aws_virtual_machine.AwsKeyFileManager.GetKeyNameForRun(),
            'SubnetId=' + self.network.subnet.id,
            # Place all VMs in default security group for simplicity and speed of
            # provisioning
            'EmrManagedMasterSecurityGroup=' + self.security_group_id,
            'EmrManagedSlaveSecurityGroup=' + self.security_group_id,
        ]
        cmd += ['--ec2-attributes', ','.join(ec2_attributes)]

        if FLAGS.dpb_cluster_properties:
            cmd += ['--configurations', _GetClusterConfiguration()]

        stdout, _, _ = vm_util.IssueCommand(cmd)
        result = json.loads(stdout)
        self.cluster_id = result['ClusterId']
        logging.info('Cluster created with id %s', self.cluster_id)
        for tag_key, tag_value in util.MakeDefaultTags().items():
            self._AddTag(tag_key, tag_value)
예제 #9
0
    def _Create(self):
        """Creates the cluster."""
        name = 'pkb_' + FLAGS.run_uri

        # Set up ebs details if disk_spec is present int he config
        ebs_configuration = None
        if self.spec.worker_group.disk_spec:
            # Make sure nothing we are ignoring is included in the disk spec
            assert self.spec.worker_group.disk_spec.device_path is None
            assert self.spec.worker_group.disk_spec.disk_number is None
            assert self.spec.worker_group.disk_spec.iops is None
            ebs_configuration = {
                'EbsBlockDeviceConfigs': [{
                    'VolumeSpecification': {
                        'SizeInGB': self.spec.worker_group.disk_spec.disk_size,
                        'VolumeType':
                        self.spec.worker_group.disk_spec.disk_type
                    },
                    'VolumesPerInstance':
                    self.spec.worker_group.disk_spec.num_striped_disks
                }]
            }
            self.dpb_hdfs_type = disk_to_hdfs_map[
                self.spec.worker_group.disk_spec.disk_type]

        # Create the specification for the master and the worker nodes
        instance_groups = []
        core_instances = {
            'InstanceCount': self.spec.worker_count,
            'InstanceGroupType': 'CORE',
            'InstanceType': self.spec.worker_group.vm_spec.machine_type
        }
        if ebs_configuration:
            core_instances.update({'EbsConfiguration': ebs_configuration})

        master_instance = {
            'InstanceCount': 1,
            'InstanceGroupType': 'MASTER',
            'InstanceType': self.spec.worker_group.vm_spec.machine_type
        }
        if ebs_configuration:
            master_instance.update({'EbsConfiguration': ebs_configuration})

        instance_groups.append(core_instances)
        instance_groups.append(master_instance)

        # Create the log bucket to hold job's log output
        # TODO(saksena): Deprecate aws_emr_loguri flag and move
        # the log bucket creation to Create dependencies.
        logs_bucket = self._CreateLogBucket()

        # Spark SQL needs to access Hive
        cmd = self.cmd_prefix + [
            'emr', 'create-cluster', '--name', name, '--release-label',
            self.dpb_version, '--use-default-roles', '--instance-groups',
            json.dumps(instance_groups), '--application', 'Name=Spark',
            'Name=Hadoop', 'Name=Hive', '--log-uri', logs_bucket
        ]

        ec2_attributes = [
            'KeyName=' +
            aws_virtual_machine.AwsKeyFileManager.GetKeyNameForRun()
        ]
        if self.network:
            ec2_attributes.append('SubnetId=' + self.network.subnet.id)
        cmd += ['--ec2-attributes', ','.join(ec2_attributes)]

        stdout, _, _ = vm_util.IssueCommand(cmd)
        result = json.loads(stdout)
        self.cluster_id = result['ClusterId']
        logging.info('Cluster created with id %s', self.cluster_id)
        for tag_key, tag_value in util.MakeDefaultTags().items():
            self._AddTag(tag_key, tag_value)
예제 #10
0
  def _Create(self):
    """Create a VM instance."""
    placement = []
    if not util.IsRegion(self.zone):
      placement.append('AvailabilityZone=%s' % self.zone)
    if self.use_dedicated_host:
      placement.append('Tenancy=host,HostId=%s' % self.host.id)
      num_hosts = len(self.host_list)
    elif self.placement_group:
      if IsPlacementGroupCompatible(self.machine_type):
        placement.append('GroupName=%s' % self.placement_group.name)
      else:
        logging.warn(
            'VM not placed in Placement Group. VM Type %s not supported',
            self.machine_type)
    placement = ','.join(placement)
    block_device_map = GetBlockDeviceMap(self.machine_type,
                                         self.boot_disk_size,
                                         self.image,
                                         self.region)
    tags = {}
    tags.update(self.vm_metadata)
    tags.update(util.MakeDefaultTags())
    create_cmd = util.AWS_PREFIX + [
        'ec2',
        'run-instances',
        '--region=%s' % self.region,
        '--subnet-id=%s' % self.network.subnet.id,
        '--client-token=%s' % self.client_token,
        '--image-id=%s' % self.image,
        '--instance-type=%s' % self.machine_type,
        '--key-name=%s' % AwsKeyFileManager.GetKeyNameForRun(),
        '--tag-specifications=%s' %
        util.FormatTagSpecifications('instance', tags)]
    if FLAGS.aws_efa:
      create_cmd.extend([
          '--network-interfaces',
          ','.join(['%s=%s' % item for item in sorted(_EFA_PARAMS.items())])
      ])
    else:
      create_cmd.append('--associate-public-ip-address')
    if block_device_map:
      create_cmd.append('--block-device-mappings=%s' % block_device_map)
    if placement:
      create_cmd.append('--placement=%s' % placement)
    if FLAGS.aws_credit_specification:
      create_cmd.append('--credit-specification=%s' %
                        FLAGS.aws_credit_specification)
    if self.user_data:
      create_cmd.append('--user-data=%s' % self.user_data)
    if self.capacity_reservation_id:
      create_cmd.append(
          '--capacity-reservation-specification=CapacityReservationTarget='
          '{CapacityReservationId=%s}' % self.capacity_reservation_id)
    if self.use_spot_instance:
      instance_market_options = collections.OrderedDict()
      spot_options = collections.OrderedDict()
      spot_options['SpotInstanceType'] = 'one-time'
      spot_options['InstanceInterruptionBehavior'] = 'terminate'
      if self.spot_price:
        spot_options['MaxPrice'] = str(self.spot_price)
      instance_market_options['MarketType'] = 'spot'
      instance_market_options['SpotOptions'] = spot_options
      create_cmd.append(
          '--instance-market-options=%s' % json.dumps(instance_market_options))
    _, stderr, retcode = vm_util.IssueCommand(create_cmd,
                                              raise_on_failure=False)

    machine_type_prefix = self.machine_type.split('.')[0]
    host_arch = _MACHINE_TYPE_PREFIX_TO_HOST_ARCH.get(machine_type_prefix)
    if host_arch:
      self.host_arch = host_arch

    if self.use_dedicated_host and 'InsufficientCapacityOnHost' in stderr:
      if self.num_vms_per_host:
        raise errors.Resource.CreationError(
            'Failed to create host: %d vms of type %s per host exceeds '
            'memory capacity limits of the host' %
            (self.num_vms_per_host, self.machine_type))
      else:
        logging.warning(
            'Creation failed due to insufficient host capacity. A new host will '
            'be created and instance creation will be retried.')
        with self._lock:
          if num_hosts == len(self.host_list):
            host = AwsDedicatedHost(self.machine_type, self.zone)
            self.host_list.append(host)
            host.Create()
          self.host = self.host_list[-1]
        self.client_token = str(uuid.uuid4())
        raise errors.Resource.RetryableCreationError()
    if 'InsufficientInstanceCapacity' in stderr:
      if self.use_spot_instance:
        self.spot_status_code = 'InsufficientSpotInstanceCapacity'
        self.early_termination = True
      raise errors.Benchmarks.InsufficientCapacityCloudFailure(stderr)
    if 'SpotMaxPriceTooLow' in stderr:
      self.spot_status_code = 'SpotMaxPriceTooLow'
      self.early_termination = True
      raise errors.Resource.CreationError(stderr)
    if 'InstanceLimitExceeded' in stderr:
      raise errors.Benchmarks.QuotaFailure(stderr)
    if retcode:
      raise errors.Resource.CreationError(
          'Failed to create VM: %s return code: %s' % (retcode, stderr))
예제 #11
0
    def _Create(self):
        """Creates the AWS CapacaityReservation.

    A reservation will be created given the VM shape in self.vm_groups.
    Count is determined by the number of VMs in said group. The reservation
    will have a lifetime determined by the general PKB concept of
    timeout_minutes. If the reservation exceeds this timeout, AWS will
    cancel it automatically. The VMs in the reservation will not be deleted.
    Note that an empty capacity reservation will encur costs for the
    VM shape / count, even if no VMs are using it.

    After the reservation is created, this method updates all the VMs
    in self.vm_groups by setting the capacity_reservation_id, as well
    as the zone attributes on the VM, and the VM's network instance.

    Raises:
      UnsupportedOsTypeError: If creating a capacity reservation for the
        given os type is not supported.
      CreationError: If a capacity reservation cannot be created in the
        region (typically indicates a stockout).
    """
        if self.os_type in os_types.LINUX_OS_TYPES:
            instance_platform = 'Linux/UNIX'
        elif self.os_type in os_types.WINDOWS_OS_TYPES:
            instance_platform = 'Windows'
        else:
            raise UnsupportedOsTypeError(
                'Unsupported os_type for AWS CapacityReservation: %s.' %
                self.os_type)

        # If the user did not specify an AZ, we need to try to create the
        # CapacityReservation in a specifc AZ until it succeeds.
        # Then update the zone attribute on all the VMs in the group,
        # as well as the zone attribute on the VMs' network instance.
        if util.IsRegion(self.zone_or_region):
            zones_to_try = util.GetZonesInRegion(self.region)
        else:
            zones_to_try = [self.zone_or_region]

        end_date = util.MakeDefaultTags()['timeout_utc']
        for zone in zones_to_try:
            cmd = util.AWS_PREFIX + [
                'ec2',
                'create-capacity-reservation',
                '--instance-type=%s' % self.machine_type,
                '--instance-platform=%s' % instance_platform,
                '--availability-zone=%s' % zone,
                '--instance-count=%s' % self.vm_count,
                '--instance-match-criteria=targeted',
                '--region=%s' % self.region,
                '--end-date-type=limited',
                '--end-date=%s' % end_date,
            ]
            stdout, stderr, retcode = vm_util.IssueCommand(cmd)
            if retcode:
                logging.info(
                    'Unable to create CapacityReservation in %s. '
                    'This may be retried. Details: %s', zone, stderr)
                continue
            json_output = json.loads(stdout)
            self.capacity_reservation_id = (
                json_output['CapacityReservation']['CapacityReservationId'])
            self._UpdateVmsInGroup(self.capacity_reservation_id, zone)
            return
        raise CreationError(
            'Unable to create CapacityReservation in any of the '
            'following zones: %s.' % zones_to_try)
    def _Create(self):
        """Creates the cluster."""
        name = 'pkb_' + FLAGS.run_uri

        # Set up ebs details if disk_spec is present int he config
        ebs_configuration = None
        if self.spec.worker_group.disk_spec:
            # Make sure nothing we are ignoring is included in the disk spec
            assert self.spec.worker_group.disk_spec.device_path is None
            assert self.spec.worker_group.disk_spec.disk_number is None
            assert self.spec.worker_group.disk_spec.mount_point is None
            assert self.spec.worker_group.disk_spec.iops is None
            ebs_configuration = {
                'EbsBlockDeviceConfigs': [{
                    'VolumeSpecification': {
                        'SizeInGB': self.spec.worker_group.disk_spec.disk_size,
                        'VolumeType':
                        self.spec.worker_group.disk_spec.disk_type
                    },
                    'VolumesPerInstance':
                    self.spec.worker_group.disk_spec.num_striped_disks
                }]
            }

        # Create the specification for the master and the worker nodes
        instance_groups = []
        core_instances = {
            'InstanceCount': self.spec.worker_count,
            'InstanceGroupType': 'CORE',
            'InstanceType': self.spec.worker_group.vm_spec.machine_type
        }
        if ebs_configuration:
            core_instances.update({'EbsConfiguration': ebs_configuration})

        master_instance = {
            'InstanceCount': 1,
            'InstanceGroupType': 'MASTER',
            'InstanceType': self.spec.worker_group.vm_spec.machine_type
        }
        if ebs_configuration:
            master_instance.update({'EbsConfiguration': ebs_configuration})

        instance_groups.append(core_instances)
        instance_groups.append(master_instance)

        # Create the log bucket to hold job's log output
        logs_bucket = FLAGS.aws_emr_loguri or self._CreateLogBucket()

        cmd = self.cmd_prefix + [
            'emr', 'create-cluster', '--name', name, '--release-label',
            self.emr_release_label, '--use-default-roles', '--instance-groups',
            json.dumps(instance_groups), '--application', 'Name=Spark',
            'Name=Hadoop', '--log-uri', logs_bucket
        ]

        if self.network:
            cmd += ['--ec2-attributes', 'SubnetId=' + self.network.subnet.id]

        stdout, _, _ = vm_util.IssueCommand(cmd)
        result = json.loads(stdout)
        self.cluster_id = result['ClusterId']
        logging.info('Cluster created with id %s', self.cluster_id)
        for tag_key, tag_value in util.MakeDefaultTags().items():
            self._AddTag(tag_key, tag_value)
def _BuildContext(launcher_vm, booter_template_vm):
    """Returns the context variables for Jinja2 template during rendering."""
    context = {
        'boot_machine_type': booter_template_vm.machine_type,
        'cloud': FLAGS.cloud,
        'contact_launcher': FLAGS.vms_contact_launcher,
        'launcher_vm_name': launcher_vm.name,
        'os_type': 'linux' if _IsLinux() else 'windows',
        'server_ip': launcher_vm.internal_ip,
        'server_port': _PORT,
        'start_time_file': _START_TIME_FILE_PATH,
        'timeout': _TIMEOUT_SECONDS,
        'vm_count': FLAGS.boots_per_launcher,
        'zone': launcher_vm.zone,
        'use_public_ip': '' if FLAGS.use_public_ip else 'no-',
    }
    cloud = FLAGS.cloud
    if cloud == 'GCP':
        context.update({
            'boot_disk_size':
            booter_template_vm.boot_disk_size,
            'boot_vm_name_prefix':
            _BOOT_VM_NAME_PREFIX.format(launcher_name=launcher_vm.name),
            'image_family':
            booter_template_vm.image_family,
            'image_project':
            booter_template_vm.image_project,
            'gcloud_path':
            FLAGS.gcloud_path,
            'project':
            FLAGS.project,
            'tags':
            gcp_util.MakeFormattedDefaultTags(),
        })
    elif cloud == 'AWS':
        tags = aws_util.MakeDefaultTags()
        tags.update({'launcher_id': launcher_vm.name})
        context.update({
            'group_name':
            booter_template_vm.placement_group.name,
            'image':
            booter_template_vm.image,
            'key_name':
            'perfkit-key-{0}'.format(FLAGS.run_uri),
            'region':
            aws_util.GetRegionFromZone(launcher_vm.zone),
            'subnet_id':
            booter_template_vm.network.subnet.id,
            'tags':
            aws_util.FormatTagSpecifications('instance', tags),
        })
    elif cloud == 'Azure':
        context.update({
            'boot_vm_name_prefix':
            launcher_vm.name.split('-', 1)[1],
            'location':
            launcher_vm.region,
            'image':
            booter_template_vm.image,
            'storage_sku':
            booter_template_vm.os_disk.disk_type,
            'resource_group':
            launcher_vm.resource_group.name,
            'nic':
            _BOOT_NIC_NAME_PREFIX.format(run_uri=FLAGS.run_uri),
            'password':
            booter_template_vm.password,
            'start_id':
            GetAzBootVMStartIdByLauncher(launcher_vm.name),
        })

    return context