def run(self,asg_name): session = botocore.session.Session(profile=self.profile) asg = session.create_client('autoscaling',self.region) ec2 = session.create_client('ec2',self.region) groups = asg.describe_auto_scaling_groups() matching_groups = [g for g in groups['AutoScalingGroups'] for t in g['Tags'] if t['Key'] == 'Name' and t['Value'] == asg_name] groups_to_instances = {group['AutoScalingGroupName']: [instance['InstanceId'] for instance in group['Instances']] for group in matching_groups} instances_to_groups = {instance['InstanceId']: group['AutoScalingGroupName'] for group in matching_groups for instance in group['Instances'] } # We only need to check for ASGs in an ELB if we have more than 1. # If a cluster is running with an ASG out of the ELB, then there are larger problems. active_groups = defaultdict(dict) if len(matching_groups) > 1: elb = session.create_client('elb',self.region) for group in matching_groups: for load_balancer_name in group['LoadBalancerNames']: instances = elb.describe_instance_health(LoadBalancerName=load_balancer_name) active_instances = [instance['InstanceId'] for instance in instances['InstanceStates'] if instance['State'] == 'InService'] for instance_id in active_instances: active_groups[instances_to_groups[instance_id]] = 1 if len(active_groups) > 1: # When we have more than a single active ASG, we need to bail out as we don't know what ASG to pick an instance from return else: active_groups = { g['AutoScalingGroupName']: 1 for g in matching_groups } for group in active_groups.keys(): for group_instance in groups_to_instances[group]: instance = ec2.describe_instances(InstanceIds=[group_instance])['Reservations'][0]['Instances'][0] if 'PrivateIpAddress' in instance: print "{},".format(instance['PrivateIpAddress']) return # We only want a single IP
def run(self,asg_name): session = botocore.session.Session(profile=self.profile) asg = session.create_client('autoscaling',self.region) ec2 = session.create_client('ec2',self.region) asg_paginator = asg.get_paginator('describe_auto_scaling_groups') asg_iterator = asg_paginator.paginate() matching_groups = [] for groups in asg_iterator: for g in groups['AutoScalingGroups']: for t in g['Tags']: if t['Key'] == 'Name' and t['Value'] == asg_name: matching_groups.append(g) groups_to_instances = {group['AutoScalingGroupName']: [instance['InstanceId'] for instance in group['Instances']] for group in matching_groups} instances_to_groups = {instance['InstanceId']: group['AutoScalingGroupName'] for group in matching_groups for instance in group['Instances'] } # We only need to check for ASGs in an ELB if we have more than 1. # If a cluster is running with an ASG out of the ELB, then there are larger problems. active_groups = defaultdict(dict) if len(matching_groups) > 1: elb = session.create_client('elb',self.region) for group in matching_groups: for load_balancer_name in group['LoadBalancerNames']: instances = elb.describe_instance_health(LoadBalancerName=load_balancer_name) active_instances = [instance['InstanceId'] for instance in instances['InstanceStates'] if instance['State'] == 'InService'] for instance_id in active_instances: active_groups[instances_to_groups[instance_id]] = 1 # If we found no active groups, because there are no ELBs (edxapp workers normally) elbs = list(chain.from_iterable([group['LoadBalancerNames'] for group in matching_groups])) if not (active_groups or elbs): # This implies we're in a worker cluster since we have no ELB and we didn't find an active group above for group in matching_groups: # Asgard marks a deleting ASG with SuspendedProcesses # If the ASG doesn't have those, then it's "Active" and a worker since there was no ELB above if not {'Launch','AddToLoadBalancer'} <= {i['ProcessName'] for i in group['SuspendedProcesses']}: active_groups[group['AutoScalingGroupName']] = 1 if len(active_groups) > 1: # When we have more than a single active ASG, we need to bail out as we don't know what ASG to pick an instance from print("Multiple active ASGs - unable to choose an instance", file=sys.stderr) return else: active_groups = { g['AutoScalingGroupName']: 1 for g in matching_groups } for group in active_groups.keys(): for group_instance in groups_to_instances[group]: instance = random.choice(ec2.describe_instances(InstanceIds=[group_instance])['Reservations'][0]['Instances']) if 'PrivateIpAddress' in instance: print("{},".format(instance['PrivateIpAddress'])) return # We only want a single IP
def create_default_deployer(session, prompter=None): # type: (botocore.session.Session, NoPrompt) -> Deployer if prompter is None: prompter = NoPrompt() aws_client = TypedAWSClient(session) api_gateway_deploy = APIGatewayDeployer( aws_client, session.create_client('apigateway'), session.create_client('lambda')) packager = LambdaDeploymentPackager() osutils = OSUtils() lambda_deploy = LambdaDeployer( aws_client, packager, prompter, osutils) return Deployer(api_gateway_deploy, lambda_deploy)
def __init__(self, uri, access_key=None, secret_key=None): # BEGIN Backwards compatibility for initialising without keys (and # without using from_crawler) no_defaults = access_key is None and secret_key is None if no_defaults: from scrapy.conf import settings if 'AWS_ACCESS_KEY_ID' in settings or 'AWS_SECRET_ACCESS_KEY' in settings: import warnings from scrapy.exceptions import ScrapyDeprecationWarning warnings.warn( "Initialising `scrapy.extensions.feedexport.S3FeedStorage` " "without AWS keys is deprecated. Please supply credentials or " "use the `from_crawler()` constructor.", category=ScrapyDeprecationWarning, stacklevel=2 ) access_key = settings['AWS_ACCESS_KEY_ID'] secret_key = settings['AWS_SECRET_ACCESS_KEY'] # END Backwards compatibility u = urlparse(uri) self.bucketname = u.hostname self.access_key = u.username or access_key self.secret_key = u.password or secret_key self.is_botocore = is_botocore() self.keyname = u.path[1:] # remove first "/" if self.is_botocore: import botocore.session session = botocore.session.get_session() self.s3_client = session.create_client( 's3', aws_access_key_id=self.access_key, aws_secret_access_key=self.secret_key) else: import boto self.connect_s3 = boto.connect_s3
def handle_visualize(parsed, session): identity_response = session.create_client('sts').get_caller_identity() account = identity_response['Account'] graphfile = get_graph_file(parsed, account, "r") graph = graph_from_file(graphfile) perform_visualization(parsed, account, session, graph)
def handle_query(parsed, session): identity_response = session.create_client('sts').get_caller_identity() account = identity_response['Account'] graphfile = get_graph_file(parsed, account, "r") graph = graph_from_file(graphfile) perform_query(parsed.query_string, session, graph, parsed.skip_admin)
class epsc_Spider(CrawlSpider): # epreservation science # http://www.morana-rtd.com/e-preservationscience/TOC.html name = "epsc" allowed_domains = ['www.morana-rtd.com/e-preservationscience/TOC.html'] # Boto3 session for S3. session = botocore.session.get_session() client = session.create_client('s3', region_name='eu-central-1') # Liste des volumes start_urls = ['http://www.morana-rtd.com/e-preservationscience/TOC.html'] # Url de base utilisée pour les requêtes de navigation. BASE_URL = 'http://www.morana-rtd.com' def parse(self, response): raise CloseSpider('Not Implemented yet.') # Create article article = Article() # For all article on a page. for a in response.css('tbody tr td p'): # Extract metadata. article['file_urls'] = a.css( 'a[href$=".pdf"]::attr(href)').extract() yield article return article
def lambda_handler(event, context): i_id = str(uuid.uuid4()) default_namespace = 'default' account_id = 'AWS-Account-ID' ds_id = 'SPICE-ID' session = botocore.session.get_session() client = session.create_client("quicksight", region_name='us-east-1') response = client.create_ingestion(AwsAccountId=account_id, DataSetId=ds_id, IngestionId=i_id) responseIngestionArn = response['IngestionArn'] describeResponse = client.describe_ingestion(AwsAccountId=account_id, DataSetId=ds_id, IngestionId=i_id) while (describeResponse['Ingestion']['IngestionStatus'] != 'COMPLETED'): describeResponse = client.describe_ingestion(AwsAccountId=account_id, DataSetId=ds_id, IngestionId=i_id) else: sns = boto3.client('sns') responsesns = sns.publish( TopicArn='arn:aws:sns:us-east-1:<AwsAccountId>:IngestionDone', Message='Ingestion is finished successfully!!') return { 'statusCode': 200, }
def setUp(self): """Setup users, projects, and start a test server.""" super(S3APITestCase, self).setUp() tempdir = self.useFixture(fixtures.TempDir()) conf = self.useFixture(config_fixture.Config()) conf.config(buckets_path=tempdir.path, s3_listen='127.0.0.1', s3_listen_port=0) self.server = s3server.get_wsgi_server() # NOTE(ft): this requires eventlet.monkey_patch, which is called in # tests/unit/__init__.py. Remove it out from there if you get these # tests rid of server run self.server.start() self.addCleanup(self.server.stop) s3_url = 'http://' + CONF.s3_listen + ':' + str(self.server.port) region = 'FakeRegion' connection_data = { 'config_file': (None, 'AWS_CONFIG_FILE', None, None), 'region': ('region', 'BOTO_DEFAULT_REGION', region, None), } session = botocore.session.get_session(connection_data) conn = session.create_client( 's3', region_name=region, endpoint_url=s3_url, aws_access_key_id='fake', aws_secret_access_key='fake') self.conn = conn def get_http_connection(*args): """Get a new S3 connection, don't attempt to reuse connections.""" return self.conn.new_http_connection(*args) self.conn.get_http_connection = get_http_connection
def get_s3_bucket_policies( session: botocore.session.Session, client_args_map: Optional[dict] = None) -> List[Policy]: """Using a botocore Session object, return a list of Policy objects representing the bucket policies of each S3 bucket in this account. """ result = [] s3args = client_args_map.get('s3', {}) s3client = session.create_client('s3', **s3args) buckets = [x['Name'] for x in s3client.list_buckets()['Buckets']] for bucket in buckets: bucket_arn = 'arn:aws:s3:::{}'.format( bucket) # TODO: allow different partition try: bucket_policy = json.loads( s3client.get_bucket_policy(Bucket=bucket)['Policy']) result.append(Policy(bucket_arn, bucket, bucket_policy)) logger.info('Caching policy for {}'.format(bucket_arn)) except botocore.exceptions.ClientError as ex: if 'NoSuchBucketPolicy' in str(ex): logger.info( 'Bucket {} does not have a bucket policy, adding a "stub" policy instead.' .format(bucket)) result.append( Policy(bucket_arn, bucket, { "Statement": [], "Version": "2012-10-17" })) else: logger.info( 'Unable to retrieve bucket policy for {}. You should add this manually. Continuing.' .format(bucket)) logger.debug('Exception was: {}'.format(ex)) return result
def launch_and_get_instance_id(launch_args, jobid): try: # capturing stdout from the launch command os.environ[ 'AWS_DEFAULT_REGION'] = 'us-east-1' # necessary? not sure just put it in there session = botocore.session.get_session() x = session.create_client('ec2') except Exception as e: raise Exception("Failed to create a client for EC2") try: res = 0 res = x.run_instances(**launch_args) except Exception as e: raise Exception( "failed to launch instance for job {jobid}: {log}. %s".format( jobid=jobid, log=res) % e) try: instance_id = res['Instances'][0]['InstanceId'] except Exception as e: raise Exception( "failed to retrieve instance ID for job {jobid}".format( jobid=jobid)) return instance_id
def create_machine(key_name, create_security_group=False): # Use the tsid-test auth credentials session = botocore.session.Session( session_vars={'profile': (None, None, 'tsid-test')} ) ec2 = session.create_client('ec2', region_name='eu-west-1') if create_security_group: # Firewall rules ec2.create_security_group( GroupName='project-work-1', Description='project work sec group 1' ) ec2.authorize_security_group_ingress( GroupName='project-work-1', IpProtocol='tcp', FromPort=22, ToPort=22, CidrIp='0.0.0.0/0' ) kp = ec2.create_key_pair(KeyName=key_name) with open('key_{}.pem'.format(key_name), 'x') as key_file: key_file.write(kp['KeyMaterial']) instances = ec2.run_instances( ImageId='ami-00b11177', KeyName=key_name, SecurityGroups=['project-work-1'], InstanceType='t1.micro', MinCount=1, MaxCount=1 ) instance_id = instances['Instances'][0]['InstanceId'] with open('instance_id_{}.txt'.format(key_name), 'w') as info_file: info_file.write(instance_id) ec2.describe_instances(InstanceIds=['i-047f42e2'])
def _create_xray_client(self, ip='127.0.0.1', port='2000'): session = botocore.session.get_session() url = 'http://%s:%s' % (ip, port) return session.create_client('xray', endpoint_url=url, region_name='us-west-2', config=Config(signature_version=UNSIGNED))
def test_ec2(): session = botocore.session.get_session() client = session.create_client('ec2', region_name=AWS_REGION, aws_access_key_id=AWS_ACCESS_KEY_ID, aws_secret_access_key=AWS_SECRET_ACCESS_KEY) # Create instance resp = client.run_instances( ImageId=UBUNTU_14_04_PARAVIRTUAL_AMI, InstanceType='m1.small', MinCount=1, MaxCount=1, ) assert resp['ResponseMetadata']['HTTPStatusCode'] == 200 assert len(resp['Instances']) == 1 instance_id = resp['Instances'][0]['InstanceId'] # Describe instance resp = client.describe_instances(InstanceIds=[instance_id]) assert resp['ResponseMetadata']['HTTPStatusCode'] == 200 assert resp['Reservations'][0]['Instances'][0]['InstanceId'] == instance_id # Delete instance resp = client.terminate_instances(InstanceIds=[instance_id]) assert resp['ResponseMetadata']['HTTPStatusCode'] == 200 assert resp['TerminatingInstances'][0]['InstanceId'] == instance_id
def upload_to_sandbox(path): print('File \'%s\' uploading started...' % path.replace('api/test/../../', '')) start_time = str(int(time.time())) session = botocore.session.get_session() session.set_credentials(access_key="AKIAIA5IPHVWBGQSSBBA", secret_key="uhsuCK/U8hBWYkMHPoB9dGFJHxjTFK4y63cx3D/y") client = session.create_client('s3', region_name='us-west-2') f = open(path, 'rb') key = '/'.join(unicode(uuid4()).split('-') + [os.path.basename(path)]) response = client.put_object( Body=f.read(), Bucket='bkstg-sandbox', Key=key, ContentType=mimetypes.guess_type(path)[0] ) end_time = str(int(time.time())) print('File was uploaded in %s seconds.\n' % (int(end_time) - int(start_time))) return key # if __name__ == '__main__': # Upload.upload_to_sandbox(path=TestData.image_path)
def get_annotation_details(job_id): auth.require(fail_redirect='/') dynamodb = boto3.resource('dynamodb') ann_table = dynamodb.Table('songty_annotations') response = ann_table.scan(FilterExpression=Attr('job_id').eq(job_id)) timestamp = response["Items"][0]["submit_time"] time_tuple = time.localtime(timestamp) request_date_str = time.strftime("%Y-%m-%d %H:%M", time_tuple) status = response["Items"][0]["job_status"] complete_date_str = "" is_over = False if status == "COMPLETE": timestamp = response["Items"][0]["complete_time"] time_tuple = time.localtime(timestamp) complete_date_str = time.strftime("%Y-%m-%d %H:%M", time_tuple) current_time = int(time.time()) if current_time - response["Items"][0]["complete_time"] >= 7200: is_over = True input_file_key = "songty/"+job_id+"~"+response["Items"][0]["input_file_name"] result_file_key = "songty/"+job_id+"~"+response["Items"][0]["input_file_name"].split('.')[0]+".annot.vcf" #https://github.com/boto/boto3/issues/110 session = botocore.session.get_session() client = session.create_client('s3') input_file_url = client.generate_presigned_url( 'get_object', Params={'Bucket': "gas-inputs", 'Key':input_file_key}) result_file_url = client.generate_presigned_url( 'get_object', Params={'Bucket': "gas-results", 'Key':result_file_key}) return template(request.app.config['mpcs.env.templates'] + 'annotation_details',auth=auth,request_id = job_id, request_time = request_date_str, input_file = response["Items"][0]["input_file_name"],status=response["Items"][0]["job_status"],complete_time=complete_date_str,input_url = input_file_url,result_url = result_file_url,over_hours = is_over)
def __init__( self, uri, access_key=None, secret_key=None, acl=None, *, feed_options=None ): if not is_botocore_available(): raise NotConfigured("missing botocore library") u = urlparse(uri) self.bucketname = u.hostname self.access_key = u.username or access_key self.secret_key = u.password or secret_key self.keyname = u.path[1:] # remove first "/" self.acl = acl import botocore.session session = botocore.session.get_session() self.s3_client = session.create_client( "s3", aws_access_key_id=self.access_key, aws_secret_access_key=self.secret_key, ) if feed_options and feed_options.get("overwrite", True) is False: logger.warning( "S3 does not support appending to files. To " "suppress this warning, remove the overwrite " "option from your FEEDS setting or set it to True." )
def test_invalid_bucket_name_raises_error(self): session = botocore.session.get_session() s3 = session.create_client('s3') with self.assertRaises(ParamValidationError): s3.put_object(Bucket='adfgasdfadfs/bucket/name', Key='foo', Body=b'asdf')
def get_dns_name(instance_id): session = botocore.session.Session( session_vars={'profile': (None, None, 'tsid-test')} ) ec2 = session.create_client('ec2', region_name='eu-west-1') inst_desc = ec2.describe_instances(InstanceIds=[instance_id]) return inst_desc['Reservations'][0]['Instances'][0]['PublicDnsName']
def _get_firehose_client(region_name): session = botocore.session.get_session() client = session.create_client( service_name='firehose', region_name=region_name, ) return client
def test_client_can_retry_request_properly(): session = botocore.session.get_session() for service_name in SMOKE_TESTS: client = session.create_client(service_name, region_name=REGION) for operation_name in SMOKE_TESTS[service_name]: kwargs = SMOKE_TESTS[service_name][operation_name] yield (_make_client_call_with_errors, client, operation_name, kwargs)
def test_list_parameter_counting(): """ Test special parameters that have shape of list are recorded as count based on `para_whitelist.json` """ sqs = session.create_client('sqs', region_name='us-west-2') queue_urls = ['url1', 'url2'] queue_name_prefix = 'url' response = { 'QueueUrls': queue_urls, 'ResponseMetadata': { 'RequestId': '1234', 'HTTPStatusCode': 200, } } with Stubber(sqs) as stubber: stubber.add_response('list_queues', response, {'QueueNamePrefix': queue_name_prefix}) sqs.list_queues(QueueNamePrefix='url') subsegment = xray_recorder.current_segment().subsegments[0] assert subsegment.http['response']['status'] == 200 aws_meta = subsegment.aws assert aws_meta['queue_count'] == len(queue_urls) # all whitelisted input parameters will be converted to snake case # unless there is an explicit 'rename_to' attribute in json key assert aws_meta['queue_name_prefix'] == queue_name_prefix
def create_machine(key_name, create_security_group=False): # Use the tsid-test auth credentials session = botocore.session.Session( session_vars={'profile': (None, None, 'tsid-test')}) ec2 = session.create_client('ec2', region_name='eu-west-1') if create_security_group: # Firewall rules ec2.create_security_group(GroupName='project-work-1', Description='project work sec group 1') ec2.authorize_security_group_ingress(GroupName='project-work-1', IpProtocol='tcp', FromPort=22, ToPort=22, CidrIp='0.0.0.0/0') kp = ec2.create_key_pair(KeyName=key_name) with open('key_{}.pem'.format(key_name), 'x') as key_file: key_file.write(kp['KeyMaterial']) instances = ec2.run_instances(ImageId='ami-00b11177', KeyName=key_name, SecurityGroups=['project-work-1'], InstanceType='t1.micro', MinCount=1, MaxCount=1) instance_id = instances['Instances'][0]['InstanceId'] with open('instance_id_{}.txt'.format(key_name), 'w') as info_file: info_file.write(instance_id) ec2.describe_instances(InstanceIds=['i-047f42e2'])
def main(master_account_id, remove_master=False, profile=None, role_arn=None, region=None): region = region or None profile = profile or None role_arn = role_arn or None session = botocore.session.Session(profile=profile) session.set_default_client_config( botocore.client.Config(region_name=region)) if role_arn: session = assume_role(session, role_arn, profile=profile) sechub = session.create_client("securityhub", region_name=region) if remove_master: sechub.disassociate_from_master_account() log.info("Disassociated member from master account: %s", master_account_id) else: invite_id = get_pending_invite_id(sechub, master_account_id) if not invite_id: raise NoPendingInviteException( "No pending invite found for Security Hub:") log.info("Found pending invite_id: %s", invite_id) accept_pending_invite(sechub, master_account_id, invite_id) log.info("Accepted invite from SecurityHub master: %s", master_account_id)
def _verify_expected_endpoint_url(region, bucket, key, s3_config, is_secure=True, customer_provided_endpoint=None, expected_url=None, signature_version=None): http_response = mock.Mock() http_response.status_code = 200 http_response.headers = {} http_response.content = b'' environ = {} with mock.patch('os.environ', environ): environ['AWS_ACCESS_KEY_ID'] = 'access_key' environ['AWS_SECRET_ACCESS_KEY'] = 'secret_key' environ['AWS_CONFIG_FILE'] = 'no-exist-foo' environ['AWS_SHARED_CREDENTIALS_FILE'] = 'no-exist-foo' session = create_session() session.config_filename = 'no-exist-foo' config = Config(signature_version=signature_version, s3=s3_config) s3 = session.create_client('s3', region_name=region, use_ssl=is_secure, config=config, endpoint_url=customer_provided_endpoint) with mock.patch('botocore.endpoint.Session.send') as mock_send: mock_send.return_value = http_response s3.put_object(Bucket=bucket, Key=key, Body=b'bar') request_sent = mock_send.call_args[0][0] assert_equal(request_sent.url, expected_url)
def __init__(self, service, operation, region_name, endpoint_url=None, session=None): # set credentials manually session = session or botocore.session.get_session() # get_session accepts access_key, secret_key self.client = session.create_client( service, region_name=region_name, endpoint_url=endpoint_url ) self.endpoint = self.client._endpoint self.operation = operation self.http_client = AsyncHTTPClient() self.proxy_host = None self.proxy_port = None https_proxy = getproxies_environment().get('https') if https_proxy: proxy_parts = https_proxy.split(':') if len(proxy_parts) == 2 and proxy_parts[-1].isdigit(): self.proxy_host, self.proxy_port = proxy_parts self.proxy_port = int(self.proxy_port) else: proxy = urlparse(https_proxy) self.proxy_host = proxy.hostname self.proxy_port = proxy.port
def get_sqs_queue_policies( session: botocore.session.Session, account_id: str, region_allow_list: Optional[List[str]] = None, region_deny_list: Optional[List[str]] = None) -> List[Policy]: """Using a botocore Session object, return a list of Policy objects representing the key policies of each KMS key in this account. The region allow/deny lists are mutually-exclusive (i.e. at least one of which has the value None) lists of allowed/denied regions to pull data from. """ result = [] # Iterate through all regions of SQS where possible for sqs_region in get_regions_to_search(session, 'sqs', region_allow_list, region_deny_list): try: # Grab the queue names queue_urls = [] sqsclient = session.create_client('sqs', region_name=sqs_region) response = sqsclient.list_queues() if 'QueueUrls' in response: queue_urls.extend(response['QueueUrls']) else: continue # Grab the queue policies for queue_url in queue_urls: queue_name = queue_url.split('/')[-1] sqs_policy_response = sqsclient.get_queue_attributes( QueueUrl=queue_url, AttributeNames=['Policy']) if 'Policy' in sqs_policy_response: sqs_policy_doc = json.loads(sqs_policy_response['Policy']) result.append( Policy( 'arn:aws:sqs:{}:{}:{}'.format( sqs_region, account_id, queue_name), queue_name, json.loads(sqs_policy_doc))) logger.info('Caching policy for {}'.format( 'arn:aws:sqs:{}:{}:{}'.format(sqs_region, account_id, queue_name))) else: result.append( Policy( 'arn:aws:sqs:{}:{}:{}'.format( sqs_region, account_id, queue_name), queue_name, { "Statement": [], "Version": "2012-10-17" })) logger.info( 'Queue {} does not have a bucket policy, adding a "stub" policy instead.' .format(queue_name)) except botocore.exceptions.ClientError as ex: logger.info( 'Unable to search SQS in region {} for queues. The region may be disabled, or the current principal may not be authorized to access the service. Continuing.' .format(sqs_region)) logger.debug('Exception was: {}'.format(ex)) return result
def _get_client(service_name): aws_access_key_id = _id aws_secret_key = _key if service_name in _api_clients: return _api_clients[service_name] session = _get_botocore_session() if service_name == 'elasticbeanstalk': endpoint_url = _endpoint_url else: endpoint_url = None try: LOG.debug('Creating new Botocore Client for ' + str(service_name)) client = session.create_client( service_name, endpoint_url=endpoint_url, # region_name=_region_name, aws_access_key_id=aws_access_key_id, aws_secret_access_key=aws_secret_key, verify=_verify_ssl, config=Config(signature_version='s3v4')) except botocore.exceptions.ProfileNotFound as e: raise InvalidProfileError(e) LOG.debug('Successfully created session for ' + service_name) _api_clients[service_name] = client return client
def get_connection3(self, conn_type='ec2', cloud_type=None, region=None, access_key=None, secret_key=None, security_token=None): # For this spike, rely on existing model/auth.py code to do the hard stuff. # later, we'd convert all that from the ground up conn2 = self.get_connection(conn_type, cloud_type, region, access_key, secret_key, security_token) if conn2 is None: # return because of unit tests.. return None # convert the boto2 connection to a botocore client endpoint_url = '{protocol}://{host}:{port}{path}'.format( protocol=('https' if conn2.is_secure else 'http'), host=conn2.host, port=conn2.port, path=conn2.path) session = botocore.session.get_session() client_args = dict( aws_access_key_id=conn2.aws_access_key_id, aws_secret_access_key=conn2.aws_secret_access_key, aws_session_token=conn2.provider.security_token, use_ssl=conn2.is_secure, verify=False ) if self.cloud_type == 'euca': client_args.update(dict( api_version=conn2.APIVersion, endpoint_url=endpoint_url, )) conn3 = session.create_client( conn_type, conn2.region.name, **client_args ) return conn3
def test_region_mentioned_in_invalid_region(self): session = botocore.session.get_session() client = session.create_client('cloudformation', region_name='bad-region-name') with self.assertRaisesRegexp(EndpointConnectionError, 'Could not connect to the endpoint URL'): client.list_stacks()
def _verify_expected_endpoint_url(region, bucket, key, s3_config, is_secure=True, customer_provided_endpoint=None, expected_url=None, signature_version=None): environ = {} with mock.patch('os.environ', environ): environ['AWS_ACCESS_KEY_ID'] = 'access_key' environ['AWS_SECRET_ACCESS_KEY'] = 'secret_key' environ['AWS_CONFIG_FILE'] = 'no-exist-foo' environ['AWS_SHARED_CREDENTIALS_FILE'] = 'no-exist-foo' session = create_session() session.config_filename = 'no-exist-foo' config = Config(signature_version=signature_version, s3=s3_config) s3 = session.create_client('s3', region_name=region, use_ssl=is_secure, config=config, endpoint_url=customer_provided_endpoint) with ClientHTTPStubber(s3) as http_stubber: http_stubber.add_response() s3.put_object(Bucket=bucket, Key=key, Body=b'bar') assert_equal(http_stubber.requests[0].url, expected_url)
def perform_visualization(session, graph): iamclient = session.create_client('iam') pydot_node_dict = {} dot_graph = pydot.Dot(graph_type='digraph', overlap='scale', layout='neato', concentrate='true', splines='true') admins = [] for node in graph.nodes: n_e_tuples = get_relevant_nodes(graph, node) result = PrivEscQuery.run_query(iamclient, graph, node, n_e_tuples) color = 'white' if result[0] == 2: # use other principal to priv-esc color = '#FADBD8' elif result[0] == 1: # already admin color = '#BFEFFF' admins.append(node) pydot_node_dict[node] = pydot.Node(str(node), style='filled', fillcolor=color, shape='box') dot_graph.add_node(pydot_node_dict[node]) for edge in graph.edges: if edge.nodeX not in admins: dot_graph.add_edge( pydot.Edge(pydot_node_dict[edge.nodeX], pydot_node_dict[edge.nodeY])) graphfile = open('output.dot', 'w') graphfile.write(dot_graph.to_string()) graphfile.close() dot_graph.write_svg('output.svg')
def _get_client(service_name): aws_access_key_id = _id aws_secret_key = _key if service_name in _api_clients: return _api_clients[service_name] session = _get_botocore_session() if service_name == 'elasticbeanstalk': endpoint_url = _endpoint_url else: endpoint_url = None try: LOG.debug('Creating new Botocore Client for ' + str(service_name)) client = session.create_client(service_name, endpoint_url=endpoint_url, # region_name=_region_name, aws_access_key_id=aws_access_key_id, aws_secret_access_key=aws_secret_key, verify=_verify_ssl, config=Config(signature_version='s3v4')) except botocore.exceptions.ProfileNotFound as e: raise InvalidProfileError(e) LOG.debug('Successfully created session for ' + service_name) _api_clients[service_name] = client return client
def test_region_mentioned_in_invalid_region(self): session = botocore.session.get_session() client = session.create_client( 'cloudformation', region_name='us-east-999') with self.assertRaisesRegexp(EndpointConnectionError, 'Could not connect to the endpoint URL'): client.list_stacks()
def _verify_expected_endpoint_url(region, bucket, key, s3_config, is_secure=True, customer_provided_endpoint=None, expected_url=None): http_response = mock.Mock() http_response.status_code = 200 http_response.headers = {} http_response.content = b'' environ = {} with mock.patch('os.environ', environ): environ['AWS_ACCESS_KEY_ID'] = 'access_key' environ['AWS_SECRET_ACCESS_KEY'] = 'secret_key' environ['AWS_CONFIG_FILE'] = 'no-exist-foo' session = create_session() session.config_filename = 'no-exist-foo' config = None if s3_config is not None: config = Config(s3=s3_config) s3 = session.create_client('s3', region_name=region, use_ssl=is_secure, config=config, endpoint_url=customer_provided_endpoint) with mock.patch('botocore.endpoint.Session.send') as mock_send: mock_send.return_value = http_response s3.put_object(Bucket=bucket, Key=key, Body=b'bar') request_sent = mock_send.call_args[0][0] assert_equal(request_sent.url, expected_url)
def test_lint_waiter_configs(): session = botocore.session.get_session() for service_name in session.get_available_services(): client = session.create_client(service_name, 'us-east-1') service_model = client.meta.service_model for waiter_name in client.waiter_names: yield _lint_single_waiter, client, waiter_name, service_model
def connect_to_region(cls, region, session=None, access_key=None, secret_key=None, **kwargs): """ Connect to an AWS region. This method has been deprecated in favor of :meth:`~.connect` Parameters ---------- region : str Name of an AWS region session : :class:`~botocore.session.Session`, optional The Session object to use for the connection access_key : str, optional If session is None, set this access key when creating the session secret_key : str, optional If session is None, set this secret key when creating the session **kwargs : dict Keyword arguments to pass to the constructor """ warnings.warn("connect_to_region is deprecated and will be removed. " "Use connect instead.") if session is None: session = botocore.session.get_session() if access_key is not None: session.set_credentials(access_key, secret_key) client = session.create_client('dynamodb', region) return cls(client, **kwargs)
def __init__(self, service, operation, region_name, endpoint_url=None, session=None, connect_timeout=None, request_timeout=None): # set credentials manually session = session or botocore.session.get_session() # get_session accepts access_key, secret_key self.client = session.create_client( service, region_name=region_name, endpoint_url=endpoint_url ) try: self.endpoint = self.client.endpoint except AttributeError: self.endpoint = self.client._endpoint self.operation = operation self.http_client = AsyncHTTPClient() self.proxy_host = None self.proxy_port = None https_proxy = getproxies_environment().get('https') if https_proxy: self._enable_curl_httpclient() proxy_parts = https_proxy.split(':') if len(proxy_parts) == 2 and proxy_parts[-1].isdigit(): self.proxy_host, self.proxy_port = proxy_parts self.proxy_port = int(self.proxy_port) else: proxy = urlparse(https_proxy) self.proxy_host = proxy.hostname self.proxy_port = proxy.port self.request_timeout = request_timeout self.connect_timeout = connect_timeout
def get_client(region=None, profile=None): try: print("get_client(): starting...") session = botocore.session.get_session() region = region or discover_region() if not region: raise ValueError('Region was not provided and could not be determined') session.set_config_variable('region', region) if profile: session.set_config_variable('profile', profile) print("create_client() will be called...") try: return session.create_client('kms', region_name=region) except Exception as e: print("\nget_client() failed calling session.create_client()") print(" ERROR:{}\n".format(e)) sys.exit(1) print("client created") except Exception as e: print("get_client() failed:{}".format(e)) sys.exit(1) print("get_client() finished")
def handle_preset_priv_esc(tokens, session, graph, skip_admin): """This function handles the "priv_esc / change_perms / privesc" preset queries.""" if len(tokens) != 3: PrivEscQuery.print_help() return iamclient = session.create_client('iam') if tokens[2] != '*': node = grab_node_by_name(tokens[2], graph) if node is None: print('Could not find a principal matching: ' + tokens[2]) return node_edgelist_tuples = get_relevant_nodes(graph, node) tuple_result = PrivEscQuery.run_query(iamclient, graph, node, node_edgelist_tuples) if tuple_result[0] != 0: print('Discovered a path to escalate privileges:') print(tuple_result[1]) else: print('Did not find a path to escalate privileges') else: for node in graph.nodes: if skip_admin and 'is_admin' in node.properties and node.properties['is_admin']: continue node_edgelist_tuples = get_relevant_nodes(graph, node) tuple_result = PrivEscQuery.run_query(iamclient, graph, node, node_edgelist_tuples) if tuple_result[0] != 0: print(tuple_result[1]) print("")
def tag_instance(): """ Apply instance tag on the instance that is running the container using botocore """ instance_id = _retrieve_instance_id() region = _retrieve_instance_region() args = parse_args() framework, framework_version, container_type = args.framework, args.framework_version, args.container_type py_version = sys.version.split(" ")[0] device = _retrieve_device() cuda_version = f"_cuda{_retrieve_cuda()}" if device == "gpu" else "" os_version = _retrieve_os() tag = f"{framework}_{container_type}_{framework_version}_python{py_version}_{device}{cuda_version}_{os_version}" tag_struct = {"Key": "aws-dlc-autogenerated-tag-do-not-delete", "Value": tag} request_status = None if instance_id and region: try: session = botocore.session.get_session() ec2_client = session.create_client("ec2", region_name=region) response = ec2_client.create_tags(Resources=[instance_id], Tags=[tag_struct]) request_status = response.get("ResponseMetadata").get("HTTPStatusCode") if os.environ.get("TEST_MODE") == str(1): with open(os.path.join(os.sep, "tmp", "test_tag_request.txt"), "w+") as rf: rf.write(json.dumps(tag_struct, indent=4)) except Exception as e: logging.error(f"Error. {e}") logging.debug("Instance tagged successfully: {}".format(request_status)) else: logging.error("Failed to retrieve instance_id or region") return request_status
def setUp(self): session = botocore.session.get_session() config = botocore.config.Config(signature_version=botocore.UNSIGNED, s3={'addressing_style': 'path'}) self.client = session.create_client('s3', region_name='us-east-1', config=config) self.stubber = Stubber(self.client)
def lambda_handler(event, context): print('Checking at {} for environment {}...'.format(event['time'], ENVIRONMENT_NAME)) session = botocore.session.get_session() eb_client = session.create_client('elasticbeanstalk', region_name=REGION) resources = eb_client.describe_environment_resources(EnvironmentName=ENVIRONMENT_NAME)['EnvironmentResources'] instance = resources['Instances'][0]['Id'] print('Found instance {}'.format(instance)) ec2_client = session.create_client('ec2', region_name=REGION) inst_detail = ec2_client.describe_instances(InstanceIds=[instance])['Reservations'][0]['Instances'][0] print('Described details of instance {}'.format(instance)) private_ip = inst_detail['NetworkInterfaces'][0]['PrivateIpAddress'] print('Private IP is {}'.format(private_ip)) r53_client = session.create_client('route53') record_sets = r53_client.list_resource_record_sets(HostedZoneId=HOSTED_ZONE)['ResourceRecordSets'] for record in record_sets: if record['Name'] == TARGET_DOMAIN: value = record['ResourceRecords'][0]['Value'] if value == private_ip: print("Record '{}' has same value {} as current IP address {}".format(TARGET_DOMAIN, value, private_ip)) else: print("Record doesn't match. Changing...") r53_client.change_resource_record_sets( HostedZoneId=HOSTED_ZONE, ChangeBatch={ 'Comment': 'string', 'Changes': [ { 'Action': 'UPSERT', 'ResourceRecordSet': { 'Name': 'internal.ceebadmin.', 'Type': 'A', 'ResourceRecords': [ { 'Value': private_ip }, ], 'TTL': 300, } }, ] }) print("Record changed from {} to {}".format(value, private_ip))
def test_can_retry_request_properly(): session = botocore.session.get_session() for service_name in SMOKE_TESTS: client = session.create_client(service_name, region_name=REGION) for operation_name in SMOKE_TESTS[service_name]: kwargs = SMOKE_TESTS[service_name][operation_name] yield (_make_call_with_errors, session, service_name, REGION, operation_name, kwargs)
def test_can_make_request_and_understand_errors_with_client(): session = botocore.session.get_session() for service_name in ERROR_TESTS: client = session.create_client(service_name, region_name=REGION) for operation_name in ERROR_TESTS[service_name]: kwargs = ERROR_TESTS[service_name][operation_name] method_name = xform_name(operation_name) yield _make_error_client_call, client, method_name, kwargs
def code(): context = request.environ.get("context") session = botocore.session.get_session() # region name is detected from lambda environment client = session.create_client("lambda") code = client.get_function(FunctionName=context.function_name, Qualifier=context.function_version) return redirect(code["Code"]["Location"], code=303)
def _create_client(self): session = botocore.session.get_session() if aws_creds: session.set_credentials(**aws_creds) else: session.set_config_variable('profile', self.profile) return session.create_client( self.service_name, region_name=self.region_name)
def get_cluster_info(cluster): session = botocore.session.get_session() cfn_client = session.create_client('cloudformation') asg_client = session.create_client('autoscaling') info = [] stack = get_stack_info(cfn_client, cluster) info.extend(stack.items()) if stack['stack_status'] != "CREATE_COMPLETE": return info info.extend(get_bastion_info(cfn_client, cluster).items()) info.extend(get_worker_info(asg_client, cluster).items()) return info
def assert_monitoring_host_and_port(self, session, host, port): with mock.patch('botocore.monitoring.SocketPublisher', spec=True) as mock_publisher: client = session.create_client('ec2', 'us-west-2') self.assertEqual(mock_publisher.call_count, 1) _, args, kwargs = mock_publisher.mock_calls[0] self.assertEqual(kwargs.get('host'), host) self.assertEqual(kwargs.get('port'), port)
def get_s3_file(bucket=None, key=None, local_path=None, method="boto"): """get a file from s3 (quickly)""" local_dir = local_path.rpartition('/')[0] if local_dir: mkdir_p(local_dir) conn = boto3.client('s3') if method == "boto": # TODO: this method hasn't been tested at all, and is # probably broken since I never moved it from boto to boto3 bbucket = conn.get_bucket(bucket, validate=False) bkey = bbucket.get_key(key) with open(local_path, 'w') as local_log: tries = 5 while True: try: bkey.get_contents_to_file(local_log) break except Exception as oops: tries -= 1 if __debug__: print >> sys.stderr, "Getting {}:{} failed with {}".format(bbucket.name, bkey.name, oops) if not tries: raise else: failures = 0 session = botocore.session.get_session() client = session.create_client('s3') s3_url = client.generate_presigned_url( 'get_object', Params={ 'Bucket': bucket, 'Key': key } ) if method == "axel": command = 'axel --num-connections 8 --quiet --output {} {}'.format(local_path, s3_url).split() elif method == "aria": command = 'aria2c --min-split-size=20M --max-connection-per-server=8 --split=8 --out={} {}'.format(local_path, s3_url).split() elif method == "wget": command = 'wget --quiet --output-document={} {}'.format(local_path, s3_url).split() elif method == "curl": command = 'curl --fail --silent --speed-limit 5000 --speed-time 20 --output {} {}'.format(local_path, s3_url).split() have_file = False while not have_file: try: subprocess.check_call('/bin/rm -rf {}'.format(local_path).split()) except Exception as oops: pass # this should fail since we often won't have the file try: subprocess.check_call(command, stderr=open(os.devnull, 'w'), stdout=open(os.devnull, 'w')) have_file = True return except Exception as oops: failures += 1 if failures >= 20: print >> sys.stderr, "get failed (after {} attempts):".format(failures), oops print >> sys.stderr, " ".join(command) raise
def create_client(self, service, api_version=None): return session.create_client( service_name=service, region_name=self.region, api_version=api_version, aws_access_key_id=self.access_key_id, aws_secret_access_key=self.secret_access_key, aws_session_token=self.session_token, )
def test_get_ses_waiter(self): # We're checking this because ses is not the endpoint prefix # for the service, it's email. We want to make sure this does # not affect the lookup process. session = botocore.session.get_session() client = session.create_client('ses', 'us-east-1') # If we have at least one waiter in the list, we know that we have # actually loaded the waiters and this test has passed. self.assertTrue(len(client.waiter_names) > 0)
def __init__(self, service, operation, region_name, endpoint_url=None, session=None): # set credentials manually session = session or botocore.session.get_session() # get_session accepts access_key, secret_key self.client = session.create_client( service, region_name=region_name, endpoint_url=endpoint_url) self.endpoint = self.client._endpoint self.operation = operation self.http_client = AsyncHTTPClient()
def setUp(self): session = botocore.session.get_session() config = botocore.config.Config( signature_version=botocore.UNSIGNED, s3={'addressing_style': 'path'} ) self.client = session.create_client( 's3', region_name='us-east-1', config=config) self.stubber = Stubber(self.client)
def _get_client(client_name, url, region, access, secret): connection_data = { 'config_file': (None, 'AWS_CONFIG_FILE', None, None), 'region': ('region', 'BOTO_DEFAULT_REGION', region, None), } session = botocore.session.get_session(connection_data) return session.create_client( client_name, region_name=region, endpoint_url=url, aws_access_key_id=access, aws_secret_access_key=secret)
def test_can_make_request_with_client(): # Same as test_can_make_request, but with Client objects # instead of service/operations. session = botocore.session.get_session() for service_name in SMOKE_TESTS: client = session.create_client(service_name, region_name=REGION) for operation_name in SMOKE_TESTS[service_name]: kwargs = SMOKE_TESTS[service_name][operation_name] method_name = xform_name(operation_name) yield _make_client_call, client, method_name, kwargs