def _Exists(self):
   """Returns true if the VM exists."""
   getinstance_cmd = util.GcloudCommand(self, 'compute', 'instances',
                                        'describe', self.name)
   stdout, _, _ = getinstance_cmd.Issue(suppress_warning=True,
                                        raise_on_failure=False)
   try:
     response = json.loads(stdout)
   except ValueError:
     return False
   try:
     # The VM may exist before we can fully parse the describe response for the
     # IP address or ID of the VM. For example, if the VM has a status of
     # provisioning, we can't yet parse the IP address. If this is the case, we
     # will continue to invoke the describe command in _PostCreate above.
     # However, if we do have this information now, it's better to stash it and
     # avoid invoking the describe command again.
     self._ParseDescribeResponse(response)
   except (KeyError, IndexError):
     pass
   return True
    def generate_data(self, source_dir, udpate_default_fs, num_files,
                      size_file):
        """Method to generate data using a distributed job on the cluster."""
        cmd = util.GcloudCommand(self, 'dataproc', 'jobs', 'submit', 'hadoop')
        cmd.flags['cluster'] = self.cluster_id
        cmd.flags['jar'] = TESTDFSIO_JAR_LOCATION

        self.append_region(cmd)

        job_arguments = [TESTDFSIO_PROGRAM]
        if udpate_default_fs:
            job_arguments.append('-Dfs.default.name={}'.format(source_dir))
        job_arguments.append('-Dtest.build.data={}'.format(source_dir))
        job_arguments.extend([
            '-write', '-nrFiles',
            str(num_files), '-fileSize',
            str(size_file)
        ])
        cmd.additional_flags = ['--'] + job_arguments
        stdout, stderr, retcode = cmd.Issue(timeout=None)
        return {dpb_service.SUCCESS: retcode == 0}
Example #3
0
 def _Create(self):
   """Create Cloud TPU."""
   cmd = util.GcloudCommand(self, 'compute', 'tpus', 'create',
                            self.spec.tpu_name)
   cmd.flags['range'] = self.spec.tpu_cidr_range
   if self.spec.tpu_accelerator_type:
     cmd.flags['accelerator-type'] = self.spec.tpu_accelerator_type
   if self.spec.tpu_description:
     cmd.flags['description'] = self.spec.tpu_description
   if self.spec.tpu_network:
     cmd.flags['network'] = self.spec.tpu_network
   if self.spec.tpu_tf_version:
     cmd.flags['version'] = self.spec.tpu_tf_version
   if self.spec.tpu_zone:
     cmd.flags['zone'] = self.spec.tpu_zone
   if self.spec.tpu_preemptible:
     cmd.flags['preemptible'] = self.spec.tpu_preemptible
   cmd.flags['project'] = self.project
   _, _, retcode = cmd.Issue()
   if retcode != 0:
     logging.error('Create GCP cloud TPU failed.')
Example #4
0
 def _Exists(self):
     """Returns true if the instance exists."""
     cmd = util.GcloudCommand(self, 'beta', 'bigtable', 'instances', 'list')
     cmd.flags['format'] = 'json'
     cmd.flags['project'] = self.project
     # The zone flag makes this command fail.
     cmd.flags['zone'] = []
     stdout, stderr, retcode = cmd.Issue(suppress_warning=True)
     if retcode != 0:
         # This is not ideal, as we're returning false not because we know
         # the table isn't there, but because we can't figure out whether
         # it is there.  This behavior is consistent without other
         # _Exists methods.
         logging.error(
             'Unable to list GCP Bigtable instances. Return code %s '
             'STDOUT: %s\nSTDERR: %s', retcode, stdout, stderr)
         return False
     result = json.loads(stdout)
     instances = {instance['name'] for instance in result}
     full_name = 'projects/{}/instances/{}'.format(self.project, self.name)
     return full_name in instances
Example #5
0
    def _Create(self):
        """Creates the cluster."""

        if self.cluster_id is None:
            self.cluster_id = 'pkb-' + FLAGS.run_uri
        cmd = util.GcloudCommand(self, 'dataproc', 'clusters', 'create',
                                 self.cluster_id)
        if self.project is not None:
            cmd.flags['project'] = self.project

        # The number of worker machines in the cluster
        cmd.flags['num-workers'] = self.spec.worker_count

        # Initialize applications on the dataproc cluster
        if self.spec.applications:
            logging.info('Include the requested applications')

        for role in ['worker', 'master']:
            # Set machine type
            if self.spec.worker_group.vm_spec.machine_type:
                self._AddToCmd(cmd, '{0}-machine-type'.format(role),
                               self.spec.worker_group.vm_spec.machine_type)

            # Set boot_disk_size
            if self.spec.worker_group.vm_spec.boot_disk_size:
                self._AddToCmd(cmd, '{0}-boot-disk-size'.format(role),
                               self.spec.worker_group.vm_spec.boot_disk_size)

            # Set ssd count
            if self.spec.worker_group.vm_spec.num_local_ssds:
                self._AddToCmd(cmd, 'num-{0}-local-ssds'.format(role),
                               self.spec.worker_group.vm_spec.num_local_ssds)

        self.append_region(cmd, True)

        if self.dpb_dataproc_image_version:
            cmd.flags['image-version'] = self.dpb_dataproc_image_version

        # TODO(saksena): Retrieve the cluster create time and hold in a var
        cmd.Issue()
Example #6
0
  def SubmitJob(self, jarfile, classname, pyspark_file=None, query_file=None,
                job_poll_interval=None, job_arguments=None,
                job_stdout_file=None, job_type=None):
    """See base class."""
    args = ['jobs', 'submit', job_type]

    if job_type == self.PYSPARK_JOB_TYPE:
      args.append(pyspark_file)

    cmd = util.GcloudCommand(self, 'dataproc', *args)

    cmd.flags['cluster'] = self.cluster_id
    cmd.flags['labels'] = util.MakeFormattedDefaultTags()

    if classname:
      cmd.flags['jars'] = jarfile
      cmd.flags['class'] = classname
    elif jarfile:
      cmd.flags['jar'] = jarfile

    if query_file:
      cmd.flags['file'] = query_file

    # Dataproc gives as stdout an object describing job execution.
    # Its stderr contains a mix of the stderr of the job, and the
    # stdout of the job.  We set the driver log level to FATAL
    # to suppress those messages, and we can then separate, hopefully
    # the job standard out from the log messages.
    cmd.flags['driver-log-levels'] = 'root={}'.format(FLAGS.dpb_log_level)

    if job_arguments:
      cmd.additional_flags = ['--'] + job_arguments

    stdout, stderr, retcode = cmd.Issue(timeout=None)
    if retcode != 0:
      return {dpb_service.SUCCESS: False}

    stats = self._GetStats(stdout)
    stats[dpb_service.SUCCESS] = True
    return stats
Example #7
0
    def _Create(self):
        """Creates the cluster."""

        if self.cluster_id is None:
            self.cluster_id = 'pkb-' + FLAGS.run_uri
        cmd = util.GcloudCommand(self, 'dataproc', 'clusters', 'create',
                                 self.cluster_id)
        if self.project is not None:
            cmd.flags['project'] = self.project
        cmd.flags['num-workers'] = self.spec.worker_group.vm_count

        for group_type, group_spec in [('worker', self.spec.worker_group),
                                       ('master', self.spec.master_group)]:
            flag_name = group_type + '-machine-type'
            cmd.flags[flag_name] = group_spec.vm_spec.machine_type

            if group_spec.vm_spec.num_local_ssds:
                ssd_flag = 'num-{0}-local-ssds'.format(group_type)
                cmd.flags[ssd_flag] = group_spec.vm_spec.num_local_ssds

            if group_spec.vm_spec.boot_disk_size:
                disk_flag = group_type + '-boot-disk-size'
                cmd.flags[disk_flag] = group_spec.vm_spec.boot_disk_size

            if group_spec.vm_spec.boot_disk_type:
                disk_flag = group_type + '-boot-disk-type'
                cmd.flags[disk_flag] = group_spec.vm_spec.boot_disk_type

        if FLAGS.gcp_dataproc_subnet:
            cmd.flags['subnet'] = FLAGS.gcp_dataproc_subnet
            cmd.additional_flags.append('--no-address')

        if FLAGS.gcp_dataproc_property:
            cmd.flags['properties'] = ','.join(FLAGS.gcp_dataproc_property)

        if FLAGS.gcp_dataproc_image:
            cmd.flags['image'] = FLAGS.gcp_dataproc_image

        cmd.flags['metadata'] = util.MakeFormattedDefaultTags()
        cmd.Issue()
    def _Create(self):
        """Creates the GCE placement group."""

        cmd = gcp_util.GcloudCommand(self, 'compute', 'resource-policies',
                                     'create', 'group-placement', self.name)

        placement_policy = {
            'format': 'json',
            'region': self.region,
            'vm-count': self.num_vms
        }

        if self.style == placement_group.PLACEMENT_GROUP_CLUSTER:
            placement_policy['collocation'] = 'COLLOCATED'

        else:
            placement_policy[
                'availability-domain-count'] = self.availability_domain_count

        cmd.flags.update(placement_policy)

        cmd.Issue()
Example #9
0
  def _GenerateCreateCommand(self, ssh_keys_path):
    """Generates a command to create the VM instance.

    Args:
      ssh_keys_path: string. Path to a file containing the sshKeys metadata.

    Returns:
      GcloudCommand. gcloud command to issue in order to create the VM instance.
    """
    cmd = util.GcloudCommand(self, 'compute', 'instances', 'create', self.name)
    cmd.flags['network'] = self.network.network_resource.name
    cmd.flags['image'] = self.image
    cmd.flags['boot-disk-auto-delete'] = True
    if FLAGS.image_project:
      cmd.flags['image-project'] = FLAGS.image_project
    cmd.flags['boot-disk-size'] = self.BOOT_DISK_SIZE_GB
    cmd.flags['boot-disk-type'] = self.BOOT_DISK_TYPE
    if self.machine_type is None:
      cmd.flags['custom-cpu'] = self.cpus
      cmd.flags['custom-memory'] = '{0}MiB'.format(self.memory_mib)
    else:
      cmd.flags['machine-type'] = self.machine_type
    cmd.flags['tags'] = 'perfkitbenchmarker'
    cmd.flags['no-restart-on-failure'] = True
    cmd.flags['metadata-from-file'] = 'sshKeys=%s' % ssh_keys_path
    metadata = ['owner=%s' % FLAGS.owner]
    for key, value in self.boot_metadata.iteritems():
      metadata.append('%s=%s' % (key, value))
    cmd.flags['metadata'] = ','.join(metadata)
    if not FLAGS.gce_migrate_on_maintenance:
      cmd.flags['maintenance-policy'] = 'TERMINATE'
    ssd_interface_option = NVME if NVME in self.image else SCSI
    cmd.flags['local-ssd'] = (['interface={0}'.format(ssd_interface_option)] *
                              self.max_local_disks)
    if FLAGS.gcloud_scopes:
      cmd.flags['scopes'] = ','.join(re.split(r'[,; ]', FLAGS.gcloud_scopes))
    if self.preemptible:
      cmd.flags['preemptible'] = True
    return cmd
Example #10
0
  def Attach(self, vm):
    """Attaches the disk to a VM.

    Args:
      vm: The GceVirtualMachine instance to which the disk will be attached.
    """
    self.attached_vm_name = vm.name
    cmd = util.GcloudCommand(self, 'compute', 'instances', 'attach-disk',
                             self.attached_vm_name)
    cmd.flags['device-name'] = self.name
    cmd.flags['disk'] = self.name
    stdout, stderr, retcode = cmd.Issue(raise_on_failure=False)
    # Gcloud attach-disk commands may still attach disks despite being rate
    # limited.
    if retcode:
      if (cmd.rate_limited and 'is already being used' in stderr and
          FLAGS.retry_on_rate_limited):
        return
      debug_text = ('Ran: {%s}\nReturnCode:%s\nSTDOUT: %s\nSTDERR: %s' %
                    (' '.join(cmd.GetCommand()), retcode, stdout, stderr))
      raise errors.VmUtil.CalledProcessException(
          'Command returned a non-zero exit code:\n{}'.format(debug_text))
  def _IsReady(self):
    """Return true if the underlying resource is ready.

    Supplying this method is optional.  Use it when a resource can exist
    without being ready.  If the subclass does not implement
    it then it just returns true.

    Returns:
      True if the resource was ready in time, False if the wait timed out.
    """
    cmd = util.GcloudCommand(self, 'sql', 'instances', 'describe',
                             self.instance_id)
    stdout, _, _ = cmd.Issue()
    try:
      json_output = json.loads(stdout)
      if not json_output['state'] == 'RUNNABLE':
        return False
    except:
      logging.exception('Error attempting to read stdout. Creation failure.')
      return False
    self.endpoint = self._ParseEndpoint(json_output)
    self.port = self.MYSQL_DEFAULT_PORT
    return True
  def _IsDBInstanceReady(self, instance_id, timeout=IS_READY_TIMEOUT):
    cmd = util.GcloudCommand(self, 'sql', 'instances', 'describe',
                             instance_id)
    start_time = datetime.datetime.now()

    while True:
      if (datetime.datetime.now() - start_time).seconds > timeout:
        logging.exception('Timeout waiting for sql instance to be ready')
        return False
      stdout, _, _ = cmd.Issue(suppress_warning=True, raise_on_failure=False)

      try:
        json_output = json.loads(stdout)
        state = json_output['state']
        logging.info('Instance %s state: %s', instance_id, state)
        if state == 'RUNNABLE':
          break
      except:
        logging.exception('Error attempting to read stdout. Creation failure.')
        return False
      time.sleep(5)

    return True
Example #13
0
 def _Delete(self):
   """Deletes the node template."""
   cmd = util.GcloudCommand(self, 'alpha', 'compute', 'sole-tenancy',
                            'node-templates', 'delete', self.name)
   cmd.flags['region'] = self.region
   cmd.Issue()
 def _Delete(self):
     """Deletes the cluster."""
     cmd = util.GcloudCommand(self, 'container', 'clusters', 'delete',
                              self.name)
     cmd.Issue()
 def _Exists(self):
     """Returns True if the cluster exits."""
     cmd = util.GcloudCommand(self, 'container', 'clusters', 'describe',
                              self.name)
     _, _, retcode = cmd.Issue(suppress_warning=True)
     return retcode == 0
Example #16
0
 def DataprocGcloudCommand(self, *args):
   all_args = ('dataproc',) + args
   cmd = util.GcloudCommand(self, *all_args)
   cmd.flags['region'] = self.region
   return cmd
 def _Delete(self):
     """Deletes the host."""
     cmd = util.GcloudCommand(self, 'compute', 'sole-tenancy',
                              'node-groups', 'delete', self.name)
     cmd.Issue(raise_on_failure=False)
 def _SetNodes(self, nodes: int) -> None:
     """Sets the number of nodes on the Spanner instance."""
     cmd = util.GcloudCommand(self, 'spanner', 'instances', 'update',
                              self.name)
     cmd.flags['nodes'] = nodes
     cmd.Issue(raise_on_failure=True)
Example #19
0
    def SubmitJob(self,
                  jarfile,
                  classname,
                  job_poll_interval=None,
                  job_arguments=None,
                  job_stdout_file=None,
                  job_type=spark_service.SPARK_JOB_TYPE):
        cmd = util.GcloudCommand(self, 'dataproc', 'jobs', 'submit', job_type)
        cmd.flags['cluster'] = self.cluster_id
        # If we don't put this here, zone is auotmatically added to the command
        # which breaks dataproc jobs submit
        cmd.flags['zone'] = []

        if classname:
            cmd.flags['jars'] = jarfile
            cmd.flags['class'] = classname
        else:
            cmd.flags['jar'] = jarfile

        # Dataproc gives as stdout an object describing job execution.
        # Its stderr contains a mix of the stderr of the job, and the
        # stdout of the job.  We set the driver log level to FATAL
        # to suppress those messages, and we can then separate, hopefully
        # the job standard out from the log messages.
        cmd.flags['driver-log-levels'] = 'root=FATAL'
        if job_arguments:
            cmd.additional_flags = ['--'] + job_arguments
        stdout, stderr, retcode = cmd.Issue(timeout=None)
        if retcode != 0:
            return {spark_service.SUCCESS: False}

        stats = self._GetStats(stdout)
        stats[spark_service.SUCCESS] = True

        if job_stdout_file:
            with open(job_stdout_file, 'w') as f:
                lines = stderr.splitlines(True)
                if (not re.match(r'Job \[.*\] submitted.', lines[0]) or
                        not re.match(r'Waiting for job output...', lines[1])):
                    raise Exception('Dataproc output in unexpected format.')
                i = 2
                if job_type == spark_service.SPARK_JOB_TYPE:
                    if not re.match(r'\r', lines[i]):
                        raise Exception(
                            'Dataproc output in unexpected format.')
                    i += 1
                    # Eat these status lines.  They end in \r, so they overwrite
                    # themselves at the console or when you cat a file.  But they
                    # are part of this string.
                    while re.match(r'\[Stage \d+:', lines[i]):
                        i += 1
                    if not re.match(r' *\r$', lines[i]):
                        raise Exception(
                            'Dataproc output in unexpected format.')

                while i < len(lines) and not re.match(r'Job \[.*\]', lines[i]):
                    f.write(lines[i])
                    i += 1
                if i != len(lines) - 1:
                    raise Exception('Dataproc output in unexpected format.')
        return stats
Example #20
0
    def _CreateGcloudSqlInstance(self):
        storage_size = self.spec.disk_spec.disk_size
        instance_zone = self.spec.vm_spec.zone

        authorized_network = self._GetAuthorizedNetworks([self.client_vm])

        database_version_string = self._GetEngineVersionString(
            self.spec.engine, self.spec.engine_version)

        cmd_string = [
            self,
            'beta',
            'sql',
            'instances',
            'create',
            self.instance_id,
            '--quiet',
            '--format=json',
            '--activation-policy=ALWAYS',
            '--assign-ip',
            '--authorized-networks=%s' % authorized_network,
            '--zone=%s' % instance_zone,
            '--database-version=%s' % database_version_string,
            '--pricing-plan=%s' % self.GCP_PRICING_PLAN,
            '--storage-size=%d' % storage_size,
        ]
        if self.spec.engine == relational_db.MYSQL:
            cmd_string.append('--enable-bin-log')

        if (self.spec.vm_spec.cpus and self.spec.vm_spec.memory):
            self._ValidateSpec()
            memory = self.spec.vm_spec.memory
            cpus = self.spec.vm_spec.cpus
            self._ValidateMachineType(memory, cpus)
            cmd_string.append('--cpu={}'.format(cpus))
            cmd_string.append('--memory={}MiB'.format(memory))
        elif hasattr(self.spec.vm_spec, 'machine_type'):
            machine_type_flag = '--tier=%s' % self.spec.vm_spec.machine_type
            cmd_string.append(machine_type_flag)
        else:
            raise Exception('Unspecified machine type')

        if self.spec.high_availability:
            cmd_string.append(self._GetHighAvailabilityFlag())

        if self.spec.backup_enabled:
            cmd_string.append('--backup')
            cmd_string.append('--backup-start-time={}'.format(
                self.spec.backup_start_time))
        else:
            cmd_string.append('--no-backup')
        cmd = util.GcloudCommand(*cmd_string)
        cmd.flags['project'] = self.project

        _, _, _ = cmd.Issue()

        if FLAGS.mysql_flags:
            cmd_string = [
                self, 'sql', 'instances', 'patch', self.instance_id,
                '--database-flags=%s' % ','.join(FLAGS.mysql_flags)
            ]
            cmd = util.GcloudCommand(*cmd_string)
            _, stderr, _ = cmd.Issue()
            if stderr:
                raise Exception('Invalid MySQL flags: %s' % stderr)
 def _Exists(self):
     """Check to see whether the cluster exists."""
     cmd = util.GcloudCommand(self, 'dataproc', 'clusters', 'describe',
                              self.cluster_id)
     _, _, retcode = cmd.Issue()
     return retcode == 0
 def _Delete(self):
     """Deletes the cluster."""
     cmd = util.GcloudCommand(self, 'dataproc', 'clusters', 'delete',
                              self.cluster_id)
     cmd.Issue()
    def _GenerateCreateCommand(self, ssh_keys_path):
        """Generates a command to create the VM instance.

    Args:
      ssh_keys_path: string. Path to a file containing the sshKeys metadata.

    Returns:
      GcloudCommand. gcloud command to issue in order to create the VM instance.
    """
        args = ['compute', 'instances', 'create', self.name]

        cmd = util.GcloudCommand(self, *args)
        if self.network.subnet_resource is not None:
            cmd.flags['subnet'] = self.network.subnet_resource.name
        else:
            cmd.flags['network'] = self.network.network_resource.name
        if self.image:
            cmd.flags['image'] = self.image
        elif self.image_family:
            cmd.flags['image-family'] = self.image_family
        if self.image_project is not None:
            cmd.flags['image-project'] = self.image_project
        cmd.flags['boot-disk-auto-delete'] = True
        if self.boot_disk_size:
            cmd.flags['boot-disk-size'] = self.boot_disk_size
        if self.boot_disk_type:
            cmd.flags['boot-disk-type'] = self.boot_disk_type
        if self.machine_type is None:
            cmd.flags['custom-cpu'] = self.cpus
            cmd.flags['custom-memory'] = '{0}MiB'.format(self.memory_mib)
            if self.min_cpu_platform:
                cmd.flags['min-cpu-platform'] = self.min_cpu_platform
        else:
            cmd.flags['machine-type'] = self.machine_type
            if self.min_cpu_platform and 'n1-' in self.machine_type:
                cmd.flags['min-cpu-platform'] = self.min_cpu_platform
            elif self.min_cpu_platform:
                logging.warning('Cannot set min-cpu-platform for %s',
                                self.machine_type)
        if self.gpu_count and self.machine_type and 'a2-' not in self.machine_type:
            # A2 machine type already has predefined GPU type and count.
            cmd.flags['accelerator'] = GenerateAcceleratorSpecString(
                self.gpu_type, self.gpu_count)
        cmd.flags['tags'] = ','.join(['perfkitbenchmarker'] +
                                     (self.gce_tags or []))
        cmd.flags['no-restart-on-failure'] = True
        if self.node_group:
            cmd.flags['node-group'] = self.node_group.name
        if self.gce_shielded_secure_boot:
            cmd.flags['shielded-secure-boot'] = True

        if self.network.placement_group:
            self.metadata.update(
                self.network.placement_group.GetResourceMetadata())
            cmd.flags['resource-policies'] = self.network.placement_group.name
            cmd.flags['maintenance-policy'] = 'TERMINATE'
        else:
            self.metadata[
                'placement_group_style'] = placement_group.PLACEMENT_GROUP_NONE

        metadata_from_file = {'sshKeys': ssh_keys_path}
        parsed_metadata_from_file = flag_util.ParseKeyValuePairs(
            FLAGS.gcp_instance_metadata_from_file)
        for key, value in six.iteritems(parsed_metadata_from_file):
            if key in metadata_from_file:
                logging.warning(
                    'Metadata "%s" is set internally. Cannot be overridden '
                    'from command line.', key)
                continue
            metadata_from_file[key] = value
        cmd.flags['metadata-from-file'] = ','.join(
            ['%s=%s' % (k, v) for k, v in six.iteritems(metadata_from_file)])

        metadata = {}
        metadata.update(self.boot_metadata)
        metadata.update(util.GetDefaultTags())

        additional_metadata = {}
        additional_metadata.update(self.vm_metadata)
        additional_metadata.update(
            flag_util.ParseKeyValuePairs(FLAGS.gcp_instance_metadata))

        for key, value in six.iteritems(additional_metadata):
            if key in metadata:
                logging.warning(
                    'Metadata "%s" is set internally. Cannot be overridden '
                    'from command line.', key)
                continue
            metadata[key] = value

        if self.preemptible:
            cmd.flags['preemptible'] = True
            preemptible_status_bucket = (
                f'gs://{FLAGS.gcp_preemptible_status_bucket}/{FLAGS.run_uri}/')
            self.preempt_marker = f'{preemptible_status_bucket}{self.name}'
            metadata.update([self._PreemptibleMetadataKeyValue()])

        cmd.flags['metadata'] = util.FormatTags(metadata)

        # TODO(user): If GCE one day supports live migration on GPUs
        #                           this can be revised.
        if (FLAGS['gce_migrate_on_maintenance'].present
                and FLAGS.gce_migrate_on_maintenance and self.gpu_count):
            raise errors.Config.InvalidValue(
                'Cannot set flag gce_migrate_on_maintenance on instances with GPUs, '
                'as it is not supported by GCP.')
        if not FLAGS.gce_migrate_on_maintenance or self.gpu_count:
            cmd.flags['maintenance-policy'] = 'TERMINATE'
        cmd.flags['local-ssd'] = (
            ['interface={0}'.format(FLAGS.gce_ssd_interface)] *
            self.max_local_disks)
        if FLAGS.gcloud_scopes:
            cmd.flags['scopes'] = ','.join(
                re.split(r'[,; ]', FLAGS.gcloud_scopes))
        cmd.flags['network-tier'] = self.gce_network_tier.upper()
        cmd.flags['labels'] = util.MakeFormattedDefaultTags()

        return cmd
Example #24
0
  def _GenerateCreateCommand(self, ssh_keys_path):
    """Generates a command to create the VM instance.

    Args:
      ssh_keys_path: string. Path to a file containing the sshKeys metadata.

    Returns:
      GcloudCommand. gcloud command to issue in order to create the VM instance.
    """
    args = []
    if self.node_group:
      args = ['alpha']
    args.extend(['compute', 'instances', 'create', self.name])

    cmd = util.GcloudCommand(self, *args)
    if self.network.subnet_resource is not None:
      cmd.flags['subnet'] = self.network.subnet_resource.name
    else:
      cmd.flags['network'] = self.network.network_resource.name
    if self.image:
      cmd.flags['image'] = self.image
    elif self.image_family:
      cmd.flags['image-family'] = self.image_family
    if self.image_project is not None:
      cmd.flags['image-project'] = self.image_project
    cmd.flags['boot-disk-auto-delete'] = True
    cmd.flags['boot-disk-size'] = self.boot_disk_size or self.BOOT_DISK_SIZE_GB
    cmd.flags['boot-disk-type'] = self.boot_disk_type or self.BOOT_DISK_TYPE
    if self.machine_type is None:
      cmd.flags['custom-cpu'] = self.cpus
      cmd.flags['custom-memory'] = '{0}MiB'.format(self.memory_mib)
    else:
      cmd.flags['machine-type'] = self.machine_type
    if self.gpu_count:
      cmd.flags['accelerator'] = GenerateAcceleratorSpecString(self.gpu_type,
                                                               self.gpu_count)
    cmd.flags['tags'] = 'perfkitbenchmarker'
    cmd.flags['no-restart-on-failure'] = True
    if self.node_group:
      cmd.flags['node-group'] = self.node_group.name
    if self.min_cpu_platform:
      cmd.flags['min-cpu-platform'] = self.min_cpu_platform

    metadata_from_file = {'sshKeys': ssh_keys_path}
    parsed_metadata_from_file = flag_util.ParseKeyValuePairs(
        FLAGS.gcp_instance_metadata_from_file)
    for key, value in parsed_metadata_from_file.iteritems():
      if key in metadata_from_file:
        logging.warning('Metadata "%s" is set internally. Cannot be overridden '
                        'from command line.', key)
        continue
      metadata_from_file[key] = value
    cmd.flags['metadata-from-file'] = ','.join([
        '%s=%s' % (k, v) for k, v in metadata_from_file.iteritems()
    ])

    metadata = {'owner': FLAGS.owner} if FLAGS.owner else {}
    metadata.update(self.boot_metadata)
    parsed_metadata = flag_util.ParseKeyValuePairs(FLAGS.gcp_instance_metadata)
    for key, value in parsed_metadata.iteritems():
      if key in metadata:
        logging.warning('Metadata "%s" is set internally. Cannot be overridden '
                        'from command line.', key)
        continue
      metadata[key] = value
    cmd.flags['metadata'] = ','.join(
        ['%s=%s' % (k, v) for k, v in metadata.iteritems()])

    # TODO(gareth-ferneyhough): If GCE one day supports live migration on GPUs
    #                           this can be revised.
    if (FLAGS['gce_migrate_on_maintenance'].present and
        FLAGS.gce_migrate_on_maintenance and self.gpu_count):
      raise errors.Config.InvalidValue(
          'Cannot set flag gce_migrate_on_maintenance on instances with GPUs, '
          'as it is not supported by GCP.')
    if not FLAGS.gce_migrate_on_maintenance or self.gpu_count:
      cmd.flags['maintenance-policy'] = 'TERMINATE'
    ssd_interface_option = FLAGS.gce_ssd_interface
    cmd.flags['local-ssd'] = (['interface={0}'.format(ssd_interface_option)] *
                              self.max_local_disks)
    if FLAGS.gcloud_scopes:
      cmd.flags['scopes'] = ','.join(re.split(r'[,; ]', FLAGS.gcloud_scopes))
    if self.preemptible:
      cmd.flags['preemptible'] = True
    return cmd
Example #25
0
 def _Exists(self):
   """Returns True if the host exists."""
   cmd = util.GcloudCommand(self, 'alpha', 'compute', 'sole-tenancy',
                            'node-groups', 'describe', self.name)
   _, _, retcode = cmd.Issue(suppress_warning=True)
   return not retcode
Example #26
0
 def _Delete(self):
     """Deletes the instance."""
     cmd = util.GcloudCommand(self, 'redis', 'instances', 'delete',
                              self.name)
     cmd.flags['region'] = self.redis_region
     cmd.Issue(timeout=COMMAND_TIMEOUT, raise_on_failure=False)
Example #27
0
 def _Delete(self):
   """Deletes the host."""
   cmd = util.GcloudCommand(self, 'alpha', 'compute', 'sole-tenancy',
                            'node-groups', 'delete', self.name)
   cmd.Issue()
Example #28
0
 def _Delete(self):
     """Deletes the disk."""
     cmd = util.GcloudCommand(self, 'compute', 'disks', 'delete', self.name)
     cmd.Issue(raise_on_failure=False)
Example #29
0
 def _Delete(self):
   """Delete a GCE VM instance."""
   delete_cmd = util.GcloudCommand(self, 'compute', 'instances', 'delete',
                                   self.name)
   delete_cmd.Issue()
Example #30
0
 def _Create(self):
     """Creates the host."""
     cmd = util.GcloudCommand(self, 'alpha', 'compute', 'sole-tenancy',
                              'hosts', 'create', self.name)
     cmd.flags['host-type'] = self.host_type
     cmd.Issue()