def test_cli_connect_timeout_for_blocking(self): parsed_args = FakeParsedArgs(connect_timeout='0') session = get_session() globalargs.resolve_cli_connect_timeout(parsed_args, session) self.assertEqual(parsed_args.connect_timeout, None) self.assertEqual( session.get_default_client_config().connect_timeout, None)
def test_cli_read_timeout(self): parsed_args = FakeParsedArgs(read_timeout='60') session = get_session() globalargs.resolve_cli_read_timeout(parsed_args, session) self.assertEqual(parsed_args.read_timeout, 60) self.assertEqual( session.get_default_client_config().read_timeout, 60)
def _get_registry_info(self): ecr_client = get_session().create_client('ecr', self.region, config=Config(signature_version='v4')) result = ecr_client.get_authorization_token() auth = result['authorizationData'][0] auth_token = b64decode(auth['authorizationToken']).decode() username, password = auth_token.split(':') return username, password, 'none', auth['proxyEndpoint']
def fetch_tracks(name): """Download tracks (wav, srt) of the movie from s3. :param name: :return: file objects (wav, srt) """ bucket_name = config.get('s3.buckets.tracks') sess = session.get_session() sess.set_credentials(config.get('s3.access_key'), config.get('s3.access_secret')) s3 = sess.create_client('s3') key = op.join(name, name + '.flac') response = s3.get_object( Bucket=bucket_name, Key=key, ) stream = response['Body'] temp_flac = write_stream_to_temp_file(stream) key = op.join(name, name + '.srt') response = s3.get_object( Bucket=bucket_name, Key=key, ) stream = response['Body'] temp_srt = write_stream_to_temp_file(stream) return temp_flac, temp_srt
def test_service_names_are_valid(): session = get_session() loader = session.get_component('data_loader') service_names = loader.list_available_services('service-2') for service_name in service_names: yield _assert_name_length, service_name yield _assert_name_pattern, service_name
def session(self): """ Returns a valid botocore session """ if self._session is None: self._session = get_session() return self._session
def _init_sqs_client(self, parsed_url, **kwargs): params = parse_qs(parsed_url.query) session = get_session() queue_url = params.get('queue_url') if queue_url: queue_url = queue_url[-1] else: queue_url = None region = params.get('region') if region: region = region[-1] else: region = None return AsyncSQSClient( session, self.logger, queue_name=parsed_url.netloc, queue_url=queue_url, region=region, http_client=AsyncHTTPClient( self.loop, force_instance=True, defaults=dict( request_timeout=AsyncSQSClient.long_poll_timeout+5, ) ) )
def upload_file_to_s3(): # redirect_url = "http://songty.ucmpcs.org:8888/send_annotation_request" redirect_url = str(request.url) + "/send_annotation_request" print redirect_url time = datetime.timedelta(days = 1) + datetime.datetime.today(); # define S3 policy document policy = {"expiration":time.strftime('%Y-%m-%dT%H:%M:%S.000Z'), "conditions": [{"bucket":"gas-inputs"}, {"acl": "private"}, ["starts-with", "$key", "songty/"], ["starts-with", "$success_action_redirect",redirect_url], ] } #https://docs.python.org/2/library/base64.html Policy_Code = base64.b64encode(str(policy)).encode('utf8') s3_key = str(uuid.uuid4()) session = botosession.get_session() credentials = session.get_credentials() access_key = credentials.access_key secret_key = credentials.secret_key #https://docs.python.org/2/library/hmac.html my_hmac = hmac.new(secret_key.encode(),Policy_Code,hashlib.sha1) digest = my_hmac.digest() signature = base64.b64encode(digest) tpl = template("upload.tpl",bucket_name = "gas-inputs",policy = Policy_Code,aws_key = access_key,signature = signature,redirect = redirect_url,s3_key = s3_key) return tpl
def setUp(self): super(TestServiceDocumenter, self).setUp() self.add_shape_to_params('Biz', 'String') self.setup_client() with mock.patch('botocore.session.create_loader', return_value=self.loader): session = get_session() self.service_documenter = ServiceDocumenter( 'myservice', session)
def setUp(self): self.session = session.get_session() self.region = self.test_args['region'] self.client = self.session.create_client( 'swf', self.region) self.domain = self.test_args['domain'] self.task_list = self.test_args['tasklist'] self.workflow_execution = None self.workflow_executions = [] self.serializer = JSONDataConverter()
def setUp(self): self.session = get_session() self.files = FileCreator() # Create our own loader for the unit test and not rely on the # customer's actual ~/.aws/models nor the builtin botocore data # directory. self.customer_data_root = os.path.join(self.files.rootdir, 'customer') os.mkdir(self.customer_data_root) self.builtin_data_root = os.path.join(self.files.rootdir, 'builtin') os.mkdir(self.builtin_data_root) self.data_loader = Loader( [self.customer_data_root, self.builtin_data_root], include_default_search_paths=False ) self.data_loader.CUSTOMER_DATA_PATH = self.customer_data_root self.session.register_component('data_loader', self.data_loader) # Since we are using a custom data loader, we need to remove # retries since these try to get loaded when the service model # is loaded. self.session.unregister( 'service-data-loaded', register_retries_for_service) # Add some models into the builtin model directory # We are going to add two models. One with a matching service name # and endpoint and another without. self.matching_service = 'matching' self.non_matching_service = 'nonmatching' self.non_matching_prefix = 'nonmatching-prefix' self.default_api_version = '2015-10-01' matching_service_path = os.path.join( self.builtin_data_root, self.matching_service, self.default_api_version, 'service-2.json' ) os.makedirs(os.path.dirname(matching_service_path)) non_matching_service_path = os.path.join( self.builtin_data_root, self.non_matching_service, self.default_api_version, 'service-2.json' ) os.makedirs(os.path.dirname(non_matching_service_path)) # Write the models to the builtin directory with open(matching_service_path, 'w') as f: json.dump(self._create_service_definition( self.matching_service, self.default_api_version), f) with open(non_matching_service_path, 'w') as f: json.dump(self._create_service_definition( self.non_matching_prefix, self.default_api_version), f)
def test_service_name_matches_endpoint_prefix(): # Generates tests for each service to verify that the endpoint prefix # matches the service name unless there is an explicit exception. session = get_session() loader = session.get_component("data_loader") # Load the list of available services. The names here represent what # will become the client names. services = loader.list_available_services("service-2") for service in services: yield _assert_service_name_matches_endpoint_prefix, loader, service
def assumed_session(role_arn, session_name, session=None, region=None, external_id=None): """STS Role assume a boto3.Session With automatic credential renewal. Args: role_arn: iam role arn to assume session_name: client session identifier session: an optional extant session, note session is captured in a function closure for renewing the sts assumed role. :return: a boto3 session using the sts assumed role credentials Notes: We have to poke at botocore internals a few times """ if session is None: session = Session() retry = get_retry(('Throttling',)) def refresh(): parameters = {"RoleArn": role_arn, "RoleSessionName": session_name} if external_id is not None: parameters['ExternalId'] = external_id credentials = retry( session.client('sts').assume_role, **parameters)['Credentials'] return dict( access_key=credentials['AccessKeyId'], secret_key=credentials['SecretAccessKey'], token=credentials['SessionToken'], # Silly that we basically stringify so it can be parsed again expiry_time=credentials['Expiration'].isoformat()) session_credentials = RefreshableCredentials.create_from_metadata( metadata=refresh(), refresh_using=refresh, method='sts-assume-role') # so dirty.. it hurts, no clean way to set this outside of the # internals poke. There's some work upstream on making this nicer # but its pretty baroque as well with upstream support. # https://github.com/boto/boto3/issues/443 # https://github.com/boto/botocore/issues/761 s = get_session() s._credentials = session_credentials if region is None: region = s.get_config_variable('region') or 'us-east-1' s.set_config_variable('region', region) return Session(botocore_session=s)
def setUp(self): self.create_client_patch = patch( 'botocore.session.Session.create_client' ) self.mock_create_client = self.create_client_patch.start() self.session = get_session() self.client = Mock() self.client.describe_cluster.return_value = describe_cluster_response() self.mock_create_client.return_value = self.client self.command = UpdateKubeconfigCommand(self.session)
def test_service_name_matches_endpoint_prefix(): # Generates tests for each service to verify that the computed service # named based on the service id matches the service name used to # create a client (i.e the directory name in botocore/data) # unless there is an explicit exception. session = get_session() loader = session.get_component('data_loader') # Load the list of available services. The names here represent what # will become the client names. services = loader.list_available_services('service-2') for service in services: yield _assert_service_name_matches_endpoint_prefix, session, service
def setUp(self): self.session = get_session() loader = self.session.get_component('data_loader') loader.data_path = os.path.join(ROOT, 'data') self.docs = docs_for( 'todo', session=self.session, resource_filename=os.path.join(ROOT, 'data', 'resources', 'todo-2015-04-01.resources.json')) self.resource_docs = '' if 'Service Resource' in self.docs: self.resource_docs = self.docs.split('Service Resource')[-1]
def __init__(self, region, access, secret): self.region = region self.connection_data = { 'config_file': (None, 'AWS_CONFIG_FILE', "/home/vishnu/vobs/jcsapitests/functional_tests.conf"), 'region': ('region', 'BOTO_DEFAULT_REGION', self.region), 'ca_bundle': ('ca_bundle', 'AWS_CA_BUNDLE', "/home/vishnu/vobs/jcsapitests/ca-certificates.crt") } if not access or not secret: raise Exception('Auth params did not provided') if(BotocoreClientBase.session == None): BotocoreClientBase.session = session.get_session(self.connection_data) BotocoreClientBase.session.set_credentials(access, secret) BotocoreClientBase.session.set_debug_logger()
def setUp(self): self.create_client_patch = mock.patch("botocore.session.Session.create_client") self.mock_create_client = self.create_client_patch.start() self.session = get_session() self.gamelift_client = mock.Mock() self.s3_client = mock.Mock() self.mock_create_client.side_effect = [self.gamelift_client, self.s3_client] self.file_creator = FileCreator() self.upload_file_patch = mock.patch("awscli.customizations.gamelift.uploadbuild.S3Transfer.upload_file") self.upload_file_mock = self.upload_file_patch.start() self.cmd = UploadBuildCommand(self.session) self._setup_input_output()
def setUp(self): self.create_client_patch = mock.patch("botocore.session.Session.create_client") self.mock_create_client = self.create_client_patch.start() self.session = get_session() self.client = mock.Mock() self.mock_create_client.return_value = self.client self.cmd = GetGameSessionLogCommand(self.session) self.contents = b"mycontents" self.file_creator = FileCreator() self.urlopen_patch = mock.patch("awscli.customizations.gamelift.getlog.urlopen") self.urlopen_mock = self.urlopen_patch.start() self.urlopen_mock.return_value = six.BytesIO(self.contents)
def test_endpoint_matches_service(): backwards_renames = dict((v, k) for k, v in SERVICE_RENAMES.items()) session = get_session() loader = session.get_component('data_loader') expected_services = set(loader.list_available_services('service-2')) pdir = os.path.dirname endpoints_path = os.path.join(pdir(pdir(pdir(__file__))), 'botocore', 'data', 'endpoints.json') with open(endpoints_path, 'r') as f: data = json.loads(f.read()) for partition in data['partitions']: for service in partition['services'].keys(): service = backwards_renames.get(service, service) if service not in BLACKLIST: yield _assert_endpoint_is_service, service, expected_services
def test_all_uses_of_h2_are_known(): session = get_session() loader = session.get_component('data_loader') services = loader.list_available_services('service-2') for service in services: service_model = session.get_service_model(service) h2_config = service_model.metadata.get('protocolSettings', {}).get('h2') if h2_config == 'required': yield _assert_h2_service_is_known, service elif h2_config == 'eventstream': for operation in service_model.operation_names: operation_model = service_model.operation_model(operation) if operation_model.has_event_stream_output: yield _assert_h2_operation_is_known, service, operation
def test_generate_docs(self): session = get_session() # Have the rst files get written to the temporary directory generate_docs(self.docs_root, session) reference_services_path = os.path.join( self.docs_root, 'reference', 'services') reference_service_path = os.path.join( reference_services_path, 'myservice.rst') self.assertTrue(os.path.exists(reference_service_path)) # Make sure the rst file has some the expected contents. with open(reference_service_path, 'r') as f: contents = f.read() self.assertIn('AWS MyService', contents) self.assertIn('Client', contents) self.assertIn('Paginators', contents) self.assertIn('Waiters', contents)
def test_endpoint_matches_service(): # This verifies client names match up with data from the endpoints.json # file. We want to verify that every entry in the endpoints.json # file corresponds to a client we can construct via # session.create_client(...). # So first we get a list of all the service names in the endpoints # file. session = get_session() loader = session.get_component('data_loader') endpoints = loader.load_data('endpoints') # A service can be in multiple partitions so we're using # a set here to remove dupes. services_in_endpoints_file = set([]) for partition in endpoints['partitions']: for service in partition['services']: # There are some services we don't support in the SDK # so we don't need to add them to the list of services # we need to check. if service not in NOT_SUPPORTED_IN_SDK: services_in_endpoints_file.add(service) # Now we need to cross check them against services we know about. # The entries in endpoints.json are keyed off of the endpoint # prefix. We don't directly have that data, so we have to load # every service model and look up its endpoint prefix in its # ``metadata`` section. known_services = loader.list_available_services('service-2') known_endpoint_prefixes = [ session.get_service_model(service_name).endpoint_prefix for service_name in known_services ] # Now we go through every known endpoint prefix in the endpoints.json # file and ensure it maps to an endpoint prefix we've seen # in a service model. for endpoint_prefix in services_in_endpoints_file: # Check for an override where we know that an entry # in the endpoints.json actually maps to a different endpoint # prefix. endpoint_prefix = ENDPOINT_PREFIX_OVERRIDE.get(endpoint_prefix, endpoint_prefix) yield (_assert_known_endpoint_prefix, endpoint_prefix, known_endpoint_prefixes)
def upload_tracks(name, flac_fpath, srt_fpath): bucket_name = config.get('s3.buckets.tracks') sess = session.get_session() sess.set_credentials(config.get('s3.access_key'), config.get('s3.access_secret')) s3 = sess.create_client('s3') key = op.join(name, name + '.flac') with open(flac_fpath) as f: s3.put_object( Body=f, Bucket=bucket_name, Key=key, ) key = op.join(name, name + '.srt') with open(srt_fpath) as f: s3.put_object( Body=f, Bucket=bucket_name, Key=key, )
def load(): parser = argparse.ArgumentParser( prog="dynamodb-loader", description="""DynamoDB Loader: restore tables dumped by dynamodb-dumper with ease.""" ) parser.add_argument( '-r', '--region', type=str, default=get_session().get_config_variable('region'), help="The region to connect to." ) parser.add_argument( '-o', '--host', type=str, help="The host url to connect to (for use with DynamoDB Local)." ) parser.add_argument( '-l', '--load-files', type=str, nargs='*', required=True, help="The list of filenames of dump files created by ddb-dumper that you wish to load." ) parser.add_argument( '-p', '--parallelism', type=int, default=multiprocessing.cpu_count(), help="The number of processes to use (defaults to the number of processors you have)." ) parser.add_argument( 'table_name', type=str, help="The name of the table to load into." ) kwargs = vars(parser.parse_args()) loader.load_table(**kwargs)
def __init__(self): self.seen = set() self.session = get_session() try: if CONFIG.get("output_s3", "access_key_id") and CONFIG.get("output_s3", "secret_access_key"): self.session.set_credentials( CONFIG.get("output_s3", "access_key_id"), CONFIG.get("output_s3", "secret_access_key"), ) except NoOptionError: log.msg("No AWS credentials found in config - using botocore global settings.") self.client = self.session.create_client( 's3', region_name=CONFIG.get("output_s3", "region"), endpoint_url=CONFIG.get("output_s3", "endpoint") or None, verify=False if CONFIG.get("output_s3", "verify") == "no" else True, ) self.bucket = CONFIG.get("output_s3", "bucket") cowrie.core.output.Output.__init__(self)
def upload_lines(name, lines, wav_data, wav_params): """Takes movie name, the lines of the movie, list of wav chunks, and the params of the wav. Uploads them all to s3. :param name: :param lines: :param wav_data: :param wav_params: :return: """ bucket_name = config.get('s3.buckets.data') sess = session.get_session() sess.set_credentials(config.get('s3.access_key'), config.get('s3.access_secret')) s3 = sess.create_client('s3') line_data = '\n'.join(lines) key = op.join(name, name + '.txt') s3.put_object( Body=line_data, Bucket=bucket_name, Key=key, ) for i, data in enumerate(wav_data): key = op.join(name, '%s.%05d.wav' % (name, i)) with NamedTemporaryFile('w+b') as f: outwav = wave.open(f.name, 'wb') outwav.setparams(wav_params) outwav.writeframes(data) f.flush() f.seek(0) s3.put_object( Body=f, Bucket=bucket_name, Key=key, )
def s3_get_object_request_maker(region_name=None, credentials=None, ssl=True): session = get_session() if region_name is None: region_name = auto_find_region(session) if credentials is None: managed_credentials = session.get_credentials() credentials = managed_credentials.get_frozen_credentials() else: managed_credentials = None protocol = "https" if ssl else "http" auth = S3SigV4Auth(credentials, "s3", region_name) def maybe_refresh_credentials(): nonlocal credentials nonlocal auth if not managed_credentials: return creds = managed_credentials.get_frozen_credentials() if creds is credentials: return log.debug("Refreshed credentials (s3_get_object_request_maker)") credentials = creds auth = S3SigV4Auth(credentials, "s3", region_name) def build_request(bucket=None, key=None, url=None, range=None): #pylint: disable=redefined-builtin if key is None and url is None: if bucket is None: raise ValueError("Have to supply bucket,key or url") # assume bucket is url url = bucket if url is not None: bucket, key = s3_url_parse(url) if isinstance(range, (tuple, list)): range = "bytes={}-{}".format(range[0], range[1] - 1) maybe_refresh_credentials() headers = {} if range is not None: headers["Range"] = range req = AWSRequest( method="GET", url="{}://s3.{}.amazonaws.com/{}/{}".format( protocol, region_name, bucket, key), headers=headers, ) auth.add_auth(req) return Request(req.url, headers=dict(**req.headers), method="GET") return build_request
def _service_names(): session = get_session() loader = session.get_component('data_loader') return loader.list_available_services('service-2')
def test_cli_read_timeout(self): parsed_args = FakeParsedArgs(read_timeout='60') session = get_session() globalargs.resolve_cli_read_timeout(parsed_args, session) self.assertEqual(parsed_args.read_timeout, 60) self.assertEqual(session.get_default_client_config().read_timeout, 60)
def client(self): try: from botocore.session import get_session except ImportError: raise DistutilsModuleError('botocore is required') return get_session().create_client('s3')
def get_boto_session() -> boto.Session: session = boto.get_session() _patch_boto(session) return session
def setUp(self) -> None: super().setUp() if not self.client.iam_enabled: self.client = self.client_builder.with_iam(get_session()).build()
def test_vm_mgr_setup_botocore(self): """ Python 3/botocore version of the test code. """ # DEBUG print("\nBOTOCORE-BASED CODE") # END sss = session.get_session() for r_ndx, r_name in enumerate(REGIONS): # DEBUG print(("\n%s" % r_name.upper())) # END ec2 = sss.create_client('ec2', region_name=r_name) inst = ec2.describe_instances()['Reservations'][0]['Instances'] icount = len(inst) print("There is/are %d instances" % icount) for ndx in range(icount): instance = inst[ndx] # there are 28 keys print(' instance ID: %s' % instance['InstanceId']) print(' instance type: %s' % instance['InstanceType']) print(' image ID: %s' % instance['ImageId']) print(' key name: %s' % instance['KeyName']) print(' public IP: %s' % instance['PublicIpAddress']) print(' private IP: %s' % instance['PrivateIpAddress']) print(' subnet ID: %s' % instance['SubnetId']) print(' root device: %s' % instance['RootDeviceName']) ifaces = instance['NetworkInterfaces'] print(" There is/are %d interfaces" % len(ifaces)) for iface in ifaces: # there are 14 keys print( " ID: %s" % iface['NetworkInterfaceId']) print(" Status: %s" % iface['Status']) print(" SubnetId: %s" % iface['SubnetId']) print(" VpcId: %s" % iface['VpcId']) print(" priv ip addr: %s" % iface['PrivateIpAddress']) print(" association:") assoc = iface['Association'] print(" IpOwnerId: %s" % assoc['IpOwnerId']) print(" PublicIp: %s" % assoc['PublicIp']) print(" groups:") groups = iface['Groups'] for which, group in enumerate(groups): print(" %d: id %s, name %s" % ( which, group['GroupId'], group['GroupName'])) blkdev = instance['BlockDeviceMappings'] print(" There is/are %d block devices" % len(blkdev)) for ndx2, dev in enumerate(blkdev): name = dev['DeviceName'] ebs = dev['Ebs'] status = ebs['Status'] vol_id = ebs['VolumeId'] # del_on_term = ebs['DeleteOnTermination'] # boolean print(" %d name %-9s status %s vol_id %s" % ( ndx2, name, status, vol_id)) volumes = ec2.describe_volumes()['Volumes'] vcount = len(volumes) print(" There are %d volumes" % vcount) for ndx in range(vcount): volume = volumes[ndx] # there are 9 keys print(' volume ID: %s' % volume['VolumeId']) print(' volume type: %s' % volume['VolumeType']) print(' size: %s' % volume['Size']) print(' state: %s' % volume['State']) print(' zone: %s' % volume['AvailabilityZone']) print(' attachments:') att = volume['Attachments'][0] print(' volume ID: %s' % att['VolumeId']) print(' device: %s' % att['Device']) print(' state: %s' % att['State']) print(' instance ID: %s' % att['InstanceId']) print(' del on term %s' % att['DeleteOnTermination']) continue # XXX # END # DEBUG # print("region_info: %s" % region_info) # print(" cnx: %s" % region_info.connection) # print(" endpoint: %s" % region_info.endpoint) # END ################################## # XXX THIS CODE IS NOW UNREACHABLE ################################## vpc = ec2.Vpc(VPC_IDS[r_ndx]) # gets a ResourceWarning self.assertTrue(vpc is not None) self.assertEqual(vpc.cidr_block, VPC_CIDRS[r_ndx]) # alternative approach ---------------------------------- # CLIENT -------------------------------------- client = boto3.client('ec2', region_name=r_name) # GATEWAYS ------------------------------------ desc = client.describe_internet_gateways() igws = desc['InternetGateways'] self.assertEqual(len(igws), 1) igw = igws[0] # DEBUG print(("IGW: %s" % igws[0]['InternetGatewayId'])) sys.stdout.flush() # END my_id = igw['InternetGatewayId'] self.assertEqual(my_id, IGATEWAY_IDS[r_ndx])
def guided_prompts(self, parameter_override_keys): default_stack_name = self.stack_name or "sam-app" default_region = self.region or get_session().get_config_variable( "region") or "us-east-1" default_capabilities = self.capabilities[0] or ("CAPABILITY_IAM", ) default_config_env = self.config_env or DEFAULT_ENV default_config_file = self.config_file or DEFAULT_CONFIG_FILE_NAME input_capabilities = None config_env = None config_file = None click.echo( self.color.yellow( "\n\tSetting default arguments for 'sam deploy'\n\t=========================================" )) stack_name = prompt(f"\t{self.start_bold}Stack Name{self.end_bold}", default=default_stack_name, type=click.STRING) region = prompt(f"\t{self.start_bold}AWS Region{self.end_bold}", default=default_region, type=click.STRING) input_parameter_overrides = self.prompt_parameters( parameter_override_keys, self.parameter_overrides_from_cmdline, self.start_bold, self.end_bold) stacks = SamLocalStackProvider.get_stacks( self.template_file, parameter_overrides=sanitize_parameter_overrides( input_parameter_overrides)) image_repositories = self.prompt_image_repository(stacks) click.secho( "\t#Shows you resources changes to be deployed and require a 'Y' to initiate deploy" ) confirm_changeset = confirm( f"\t{self.start_bold}Confirm changes before deploy{self.end_bold}", default=self.confirm_changeset) click.secho( "\t#SAM needs permission to be able to create roles to connect to the resources in your template" ) capabilities_confirm = confirm( f"\t{self.start_bold}Allow SAM CLI IAM role creation{self.end_bold}", default=True) if not capabilities_confirm: input_capabilities = prompt( f"\t{self.start_bold}Capabilities{self.end_bold}", default=list(default_capabilities), type=FuncParamType(func=_space_separated_list_func_type), ) self.prompt_authorization(stacks) self.prompt_code_signing_settings(stacks) save_to_config = confirm( f"\t{self.start_bold}Save arguments to configuration file{self.end_bold}", default=True) if save_to_config: config_file = prompt( f"\t{self.start_bold}SAM configuration file{self.end_bold}", default=default_config_file, type=click.STRING, ) config_env = prompt( f"\t{self.start_bold}SAM configuration environment{self.end_bold}", default=default_config_env, type=click.STRING, ) s3_bucket = self.s3_bucket if s3_bucket: click.echo(f"\n\t\tUsing defined S3 bucket: {s3_bucket}") else: s3_bucket = manage_stack(profile=self.profile, region=region) click.echo(f"\n\t\tManaged S3 bucket: {s3_bucket}") click.echo( "\t\tA different default S3 bucket can be set in samconfig.toml" ) self.guided_stack_name = stack_name self.guided_s3_bucket = s3_bucket self.guided_image_repositories = image_repositories self.guided_s3_prefix = stack_name self.guided_region = region self.guided_profile = self.profile self._capabilities = input_capabilities if input_capabilities else default_capabilities self._parameter_overrides = (input_parameter_overrides if input_parameter_overrides else self.parameter_overrides_from_cmdline) self.save_to_config = save_to_config self.config_env = config_env if config_env else default_config_env self.config_file = config_file if config_file else default_config_file self.confirm_changeset = confirm_changeset
def setUp(self) -> None: super().setUp() self.client = self.client_builder.with_iam(get_session()).build()
def setUp(self): self._session = get_session()
# botocore documentation build configuration file, created by # sphinx-quickstart on Sun Dec 2 07:26:23 2012. # # This file is execfile()d with the current directory set to its containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. import sys, os from botocore.session import get_session from botocore.docs import generate_docs generate_docs(os.path.dirname(os.path.abspath(__file__)), get_session()) # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. #sys.path.insert(0, os.path.abspath('.')) # -- General configuration ----------------------------------------------------- # If your documentation needs a minimal Sphinx version, state it here. #needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be extensions # coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = ['sphinx.ext.autodoc']
def _available_services(): # Load the list of available services. The names here represent what # will become the client names. session = get_session() loader = session.get_component('data_loader') return loader.list_available_services('service-2')
def get_endpoint(service_name): this_session = session.get_session() client = this_session.create_client(service_name, region_name=REGION) return client.meta.endpoint_url