def _Create(self): """Creates the cluster.""" cmd = [ 'aws', 'elasticache', 'create-replication-group', '--engine', 'redis', '--engine-version', self.version, '--replication-group-id', self.name, '--replication-group-description', self.name, '--region', self.redis_region, '--cache-node-type', self.node_type, '--cache-subnet-group-name', self.subnet_group_name, '--preferred-cache-cluster-a-zs', self.spec.vms[0].zone ] if self.failover_style == managed_memory_store.Failover.FAILOVER_SAME_REGION: cmd += [self.failover_zone] elif self.failover_style == managed_memory_store.Failover.FAILOVER_SAME_ZONE: cmd += [self.spec.vms[0].zone] if self.failover_style != managed_memory_store.Failover.FAILOVER_NONE: cmd += [ '--automatic-failover-enabled', '--num-cache-clusters', '2' ] cmd += ['--tags'] cmd += util.MakeFormattedDefaultTags() vm_util.IssueCommand(cmd)
def _Create(self): """Creates the cluster.""" cmd = [ 'aws', 'elasticache', 'create-replication-group', '--engine', 'redis', '--engine-version', self.version, '--replication-group-id', self.name, '--replication-group-description', self.name, '--region', self.redis_region, '--cache-node-type', self.node_type, '--cache-subnet-group-name', self.subnet_group_name, '--preferred-cache-cluster-a-zs', self.spec.vms[0].zone ] if self.failover_style == managed_memory_store.Failover.FAILOVER_SAME_REGION: cmd += [self.failover_zone] elif self.failover_style == managed_memory_store.Failover.FAILOVER_SAME_ZONE: cmd += [self.spec.vms[0].zone] if self.failover_style != managed_memory_store.Failover.FAILOVER_NONE: cmd += [ '--automatic-failover-enabled', '--num-cache-clusters', '2' ] cmd += ['--tags'] cmd += util.MakeFormattedDefaultTags() _, stderr, _ = vm_util.IssueCommand(cmd, raise_on_failure=False) if 'InsufficientCacheClusterCapacity' in stderr: raise errors.Benchmarks.InsufficientCapacityCloudFailure(stderr)
def _Create(self) -> None: """Creates the dynamodb table.""" cmd = util.AWS_PREFIX + [ 'dynamodb', 'create-table', '--region', self.region, '--table-name', self.table_name, '--attribute-definitions', self._PrimaryAttrsJson(), '--key-schema', self._PrimaryKeyJson(), '--provisioned-throughput', self.throughput, '--tags' ] + util.MakeFormattedDefaultTags() if self.lsi_count > 0 and self.use_sort: self._SetAttrDefnArgs(cmd, [ self._PrimaryAttrsJson(), self._SortAttrsJson(), self.lsi_indexes[1] ]) cmd.append('--local-secondary-indexes') cmd.append(self.lsi_indexes[0]) self._SetKeySchemaArgs( cmd, [self._PrimaryKeyJson(), self._SortKeyJson()]) elif self.use_sort: self._SetAttrDefnArgs( cmd, [self._PrimaryAttrsJson(), self._SortAttrsJson()]) self._SetKeySchemaArgs( cmd, [self._PrimaryKeyJson(), self._SortKeyJson()]) if self.gsi_count > 0: self._SetAttrDefnArgs( cmd, cmd[10].strip('[]').split(',') + [self.gsi_indexes[1]]) cmd.append('--global-secondary-indexes') cmd.append(self.gsi_indexes[0]) _, stderror, retcode = vm_util.IssueCommand(cmd, raise_on_failure=False) if retcode != 0: logging.warning('Failed to create table! %s', stderror)
def Create(self): # Open the port memcached needs aws_network.AwsFirewall.GetFirewall() \ .AllowPortInSecurityGroup(self.region, self.security_group_id, ELASTICACHE_PORT) # Create a cache subnet group cmd = [ 'aws', 'elasticache', 'create-cache-subnet-group', '--region=%s' % self.region, '--cache-subnet-group-name=%s' % self.subnet_group_name, '--cache-subnet-group-description="PKB memcached_ycsb benchmark"', '--subnet-ids=%s' % self.subnet_id ] vm_util.IssueCommand(cmd) # Create the cluster cmd = [ 'aws', 'elasticache', 'create-cache-cluster', '--engine=memcached', '--cache-subnet-group-name=%s' % self.subnet_group_name, '--cache-cluster-id=%s' % self.cluster_id, '--num-cache-nodes=%s' % self.num_servers, '--region=%s' % self.region, '--cache-node-type=%s' % self.node_type, '--tags' ] + util.MakeFormattedDefaultTags() vm_util.IssueCommand(cmd) # Wait for the cluster to come up cluster_info = self._WaitForClusterUp() # Parse out the hosts self.hosts = \ [(node['Endpoint']['Address'], node['Endpoint']['Port']) for node in cluster_info['CacheNodes']] assert len(self.hosts) == self.num_servers
def UpdateTimeout(self, timeout_minutes: int) -> None: """Updates the timeout associated with the table.""" tags = util.MakeFormattedDefaultTags(timeout_minutes) cmd = self._GetTagResourceCommand(tags) logging.info('Updating timeout tags on table %s with timeout minutes %s', self.table_name, timeout_minutes) util.IssueRetryableCommand(cmd)
def _Create(self): """Creates the dynamodb table.""" cmd = util.AWS_PREFIX + [ 'dynamodb', 'create-table', '--region', self.region, '--table-name', self.table_name, '--attribute-definitions', self.part_attributes, '--key-schema', self.primary_key, '--provisioned-throughput', self.throughput, '--tags' ] + util.MakeFormattedDefaultTags() if FLAGS.aws_dynamodb_lsi_count > 0 and FLAGS.aws_dynamodb_use_sort: cmd[10] = ('[' + self.part_attributes + ', ' + self.sort_attributes + ', ' + self.lsi_indexes[1] + ']') logging.info('adding to --attribute definitions') cmd.append('--local-secondary-indexes') cmd.append(self.lsi_indexes[0]) cmd[12] = ('[' + self.primary_key + ', ' + self.sort_key + ']') logging.info('adding to --key-schema') elif FLAGS.aws_dynamodb_use_sort: cmd[10] = ('[' + self.part_attributes + ', ' + self.sort_attributes + ']') logging.info('adding to --attribute definitions') cmd[12] = ('[' + self.primary_key + ', ' + self.sort_key + ']') logging.info('adding to --key-schema') if FLAGS.aws_dynamodb_gsi_count > 0: cmd[10] = cmd[10][:-1] cmd[10] += (', ' + self.gsi_indexes[1] + ']') logging.info('adding to --attribute definitions') cmd.append('--global-secondary-indexes') cmd.append(self.gsi_indexes[0]) _, stderror, retcode = vm_util.IssueCommand(cmd, raise_on_failure=False) if retcode != 0: logging.warning('Failed to create table! %s', stderror)
def AddTags(resource_arn, region): """Adds tags to a Redshift cluster created by PerfKitBenchmarker. Args: resource_arn: The arn of AWS resource to operate on. region: The AWS region resource was created in. """ cmd_prefix = util.AWS_PREFIX tag_cmd = cmd_prefix + ['redshift', 'create-tags', '--region=%s' % region, '--resource-name', resource_arn, '--tags'] tag_cmd += util.MakeFormattedDefaultTags() vm_util.IssueCommand(tag_cmd)
def AddTagsToExistingInstance(table_name, region): """Add tags to an existing DynamoDB table.""" cmd = util.AWS_PREFIX + [ 'dynamodb', 'describe-table', '--table-name', table_name, '--region', region ] stdout, _, _ = vm_util.IssueCommand(cmd) resource_arn = json.loads(stdout)['Table']['TableArn'] cmd = util.AWS_PREFIX + [ 'dynamodb', 'tag-resource', '--resource-arn', resource_arn, '--region', region, '--tags' ] + util.MakeFormattedDefaultTags() vm_util.IssueCommand(cmd)
def _Create(self): """Create the load balancer.""" create_cmd = util.AWS_PREFIX + [ '--region', self.region, 'elbv2', 'create-load-balancer', '--name', self.name, '--type', self.type, '--tags' ] + util.MakeFormattedDefaultTags() # Add --subnets argument to the command. create_cmd.append('--subnets') create_cmd.extend(self.subnet_ids) stdout, _, _ = vm_util.IssueCommand(create_cmd) load_balancer = json.loads(stdout)['LoadBalancers'][0] self.arn = load_balancer['LoadBalancerArn'] self.dns_name = load_balancer['DNSName']
def _Create(self): """Creates the cache cluster.""" cmd = [ 'aws', 'elasticache', 'create-cache-cluster', '--engine', 'memcached', '--region', self.region, '--cache-cluster-id', self.name, '--preferred-availability-zone', self.zone, '--num-cache-nodes', str(managed_memory_store.MEMCACHED_NODE_COUNT), '--engine-version', MEMCACHED_VERSION, '--cache-node-type', self.node_type, '--cache-subnet-group-name', self.subnet_group_name ] cmd += ['--tags'] cmd += util.MakeFormattedDefaultTags() vm_util.IssueCommand(cmd)
def MakeBucket(self, bucket_name): vm_util.IssueCommand( ['aws', 's3', 'mb', 's3://%s' % bucket_name, '--region=%s' % self.region]) # Tag the bucket with the persistent timeout flag so that buckets can # optionally stick around after PKB runs. default_tags = util.MakeFormattedDefaultTags( timeout_minutes=max(FLAGS.timeout_minutes, FLAGS.persistent_timeout_minutes)) tag_set = ','.join('{%s}' % tag for tag in default_tags) vm_util.IssueCommand( ['aws', 's3api', 'put-bucket-tagging', '--bucket', bucket_name, '--tagging', 'TagSet=[%s]' % tag_set])
def _Create(self): """Creates the cluster.""" name = 'pkb_' + FLAGS.run_uri logs_bucket = FLAGS.aws_emr_loguri or self._CreateLogBucket() instance_groups = [] for group_type, group_spec in [('CORE', self.spec.worker_group), ('MASTER', self.spec.master_group)]: instance_properties = { 'InstanceCount': group_spec.vm_count, 'InstanceGroupType': group_type, 'InstanceType': group_spec.vm_spec.machine_type, 'Name': group_type + ' group' } if group_spec.disk_spec: # Make sure nothing we are ignoring is included in the disk spec assert group_spec.disk_spec.device_path is None assert group_spec.disk_spec.disk_number is None assert group_spec.disk_spec.mount_point is None assert group_spec.disk_spec.iops is None ebs_configuration = { 'EbsBlockDeviceConfigs': [{ 'VolumeSpecification': { 'SizeInGB': group_spec.disk_spec.disk_size, 'VolumeType': group_spec.disk_spec.disk_type }, 'VolumesPerInstance': group_spec.disk_spec.num_striped_disks }] } instance_properties.update( {'EbsConfiguration': ebs_configuration}) instance_groups.append(instance_properties) # we need to store the cluster id. cmd = self.cmd_prefix + [ 'emr', 'create-cluster', '--name', name, '--release-label', RELEASE_LABEL, '--use-default-roles', '--instance-groups', json.dumps(instance_groups), '--application', 'Name=Spark', 'Name=Hadoop', '--log-uri', logs_bucket, '--tags' ] + util.MakeFormattedDefaultTags() if self.network: cmd += ['--ec2-attributes', 'SubnetId=' + self.network.subnet.id] stdout, _, _ = vm_util.IssueCommand(cmd) result = json.loads(stdout) self.cluster_id = result['ClusterId'] logging.info('Cluster created with id %s', self.cluster_id)
def MakeBucket(self, bucket_name, raise_on_failure=True): command = [ 'aws', 's3', 'mb', 's3://%s' % bucket_name, '--region=%s' % self.region ] _, stderr, ret_code = vm_util.IssueCommand(command, raise_on_failure=False) if ret_code and raise_on_failure: raise errors.Benchmarks.BucketCreationError(stderr) # Tag the bucket with the persistent timeout flag so that buckets can # optionally stick around after PKB runs. default_tags = util.MakeFormattedDefaultTags( timeout_minutes=max(FLAGS.timeout_minutes, FLAGS.persistent_timeout_minutes)) tag_set = ','.join('{%s}' % tag for tag in default_tags) vm_util.IssueRetryableCommand( ['aws', 's3api', 'put-bucket-tagging', '--bucket', bucket_name, '--tagging', 'TagSet=[%s]' % tag_set, '--region=%s' % self.region])
def _CreateDbSubnetGroup(self, subnets): """Creates a new db subnet group. Args: subnets: a list of strings. The db subnet group will consit of all subnets in this list. """ db_subnet_group_name = 'pkb-db-subnet-group-{0}'.format(FLAGS.run_uri) create_db_subnet_group_cmd = util.AWS_PREFIX + ([ 'rds', 'create-db-subnet-group', '--db-subnet-group-name', db_subnet_group_name, '--db-subnet-group-description', 'pkb_subnet_group_for_db', '--region', self.region, '--subnet-ids' ] + [subnet.id for subnet in subnets ] + ['--tags'] + util.MakeFormattedDefaultTags()) vm_util.IssueCommand(create_db_subnet_group_cmd) # save for cleanup self.db_subnet_group_name = db_subnet_group_name self.security_group_id = (self.client_vm.network.regional_network.vpc. default_security_group_id)
def UpdateWithDefaultTags(self) -> None: """Adds default tags to the table.""" tags = util.MakeFormattedDefaultTags() cmd = self._GetTagResourceCommand(tags) logging.info('Setting default tags on table %s', self.table_name) util.IssueRetryableCommand(cmd)
def _Create(self): """Creates the AWS RDS instance. Raises: Exception: if unknown how to create self.spec.engine. """ if self.spec.engine in _RDS_ENGINES: instance_identifier = self.instance_id self.all_instance_ids.append(instance_identifier) cmd = util.AWS_PREFIX + [ 'rds', 'create-db-instance', '--db-instance-identifier=%s' % instance_identifier, '--engine=%s' % self.spec.engine, '--master-username=%s' % self.spec.database_username, '--master-user-password=%s' % self.spec.database_password, '--allocated-storage=%s' % self.spec.disk_spec.disk_size, '--storage-type=%s' % self.spec.disk_spec.disk_type, '--db-instance-class=%s' % self.spec.vm_spec.machine_type, '--no-auto-minor-version-upgrade', '--region=%s' % self.region, '--engine-version=%s' % self.spec.engine_version, '--db-subnet-group-name=%s' % self.db_subnet_group_name, '--vpc-security-group-ids=%s' % self.security_group_id, '--availability-zone=%s' % self.spec.vm_spec.zone, '--tags' ] + util.MakeFormattedDefaultTags() if self.spec.disk_spec.disk_type == aws_disk.IO1: cmd.append('--iops=%s' % self.spec.disk_spec.iops) # TODO(ferneyhough): add backup_enabled and backup_window vm_util.IssueCommand(cmd) elif self.spec.engine in _AURORA_ENGINES: zones_needed_for_high_availability = len(self.zones) > 1 if zones_needed_for_high_availability != self.spec.high_availability: raise Exception( 'When managed_db_high_availability is true, multiple ' 'zones must be specified. When ' 'managed_db_high_availability is false, one zone ' 'should be specified. ' 'managed_db_high_availability: {0} ' 'zone count: {1} '.format( zones_needed_for_high_availability, len(self.zones))) cluster_identifier = 'pkb-db-cluster-' + FLAGS.run_uri # Create the cluster. cmd = util.AWS_PREFIX + [ 'rds', 'create-db-cluster', '--db-cluster-identifier=%s' % cluster_identifier, '--engine=%s' % self.spec.engine, '--engine-version=%s' % self.spec.engine_version, '--master-username=%s' % self.spec.database_username, '--master-user-password=%s' % self.spec.database_password, '--region=%s' % self.region, '--db-subnet-group-name=%s' % self.db_subnet_group_name, '--vpc-security-group-ids=%s' % self.security_group_id, '--availability-zones=%s' % self.spec.zones[0], '--tags' ] + util.MakeFormattedDefaultTags() self.cluster_id = cluster_identifier vm_util.IssueCommand(cmd) for zone in self.zones: # The first instance is assumed to be writer - # and so use the instance_id for that id. if zone == self.zones[0]: instance_identifier = self.instance_id else: instance_identifier = self.instance_id + '-' + zone self.all_instance_ids.append(instance_identifier) cmd = util.AWS_PREFIX + [ 'rds', 'create-db-instance', '--db-instance-identifier=%s' % instance_identifier, '--db-cluster-identifier=%s' % cluster_identifier, '--engine=%s' % self.spec.engine, '--engine-version=%s' % self.spec.engine_version, '--no-auto-minor-version-upgrade', '--db-instance-class=%s' % self.spec.vm_spec.machine_type, '--region=%s' % self.region, '--availability-zone=%s' % zone, '--tags' ] + util.MakeFormattedDefaultTags() vm_util.IssueCommand(cmd) else: raise Exception( 'Unknown how to create AWS data base engine {0}'.format( self.spec.engine))
def testAddTags(self): self._SetResponse() self.aws.AddTagsToFiler(_FILE_ID) tags = util.MakeFormattedDefaultTags() self.assertCalled('create-tags', '--file-system-id', _FILE_ID, '--tags', *tags)
def AddTagsToFiler(self, filer_id): tags = util.MakeFormattedDefaultTags() args = ['create-tags', '--file-system-id', filer_id, '--tags'] + tags self._IssueAwsCommand(args, False)