예제 #1
0
    def test_get_snsname_arn_auth_exception_handling(self, aws_res_mock):
        """AuthorizationError is intercepted and re-raised as AssertionError"""
        # local imports of code-under-test ensure moto has mocks
        # registered before any possible calls out to AWS
        from awstools.awstools import get_snsname_arn

        # create a mock SNS client that returns what we tell it to
        client = boto3.client('sns')
        stub = Stubber(client)
        stub.add_client_error('create_topic', service_error_code='AuthorizationError')
        stub.activate()


        # since firesim manager code doesn't take clients as method parameters
        # now we mock boto3.client to return our stubbed client
        with patch.object(boto3._get_default_session(), 'client', return_value=client) as mock_session:
            topic_arn = get_snsname_arn()

            stub.assert_no_pending_responses()
            assert topic_arn == None

            # TODO we could mock rootLogger.critical to capture it's calls and args and validate that we're seeing the correct "nice" message

            # make sure get_snsname_arn() actually called out to get a sns
            # client, otherwise we aren't testing what we think we are
            mock_session.assert_called_once_with('sns')

        aws_res_mock.assert_called_once()
예제 #2
0
def check_in_whitelist(resource_id, resource_type, is_global=False) -> bool:
    """Checks if the resource id is in the corresponding resources whitelist for the region

    Args:
        is_global: If this resource is in the global region
        resource_id (str): The resource id which will reside in the whitelist
        resource_type (str): The resource type e.g. s3_bucket or ec2_instance which will be in the whitelist

    Returns:
        bool True if in whitelist false if not

    """
    if is_global is True:
        region = "global"
    else:
        region = boto3._get_default_session().region_name  # Ugly hack to get current session
    try:
        # If in whitelist for region
        return True if resource_id in WHITELIST[region][resource_type] else False
    except KeyError: # If it cant find resource types in the region check
        global KEEP_GOING
        while KEEP_GOING != "y" and KEEP_GOING != "n":
            KEEP_GOING = input("Incorrect whitelist in {0}-{1}."
                               " Do you still want to continue (y/n)? "
                               "".format(region, resource_type)).lower()
        if KEEP_GOING == "y":
            return False
        sys.exit(0)
예제 #3
0
    def test_must_set_aws_region_in_boto_session(self):
        region = "myregion"
        ctx = Context()

        ctx.region = region
        self.assertEquals(ctx.region, region)
        self.assertEquals(region, boto3._get_default_session().region_name)
예제 #4
0
    def test_obtain_sts_creds(self, rmock):
        self.mock_sts_creds(rmock, 'A')

        # Should fetch credentials
        creds = boto3._get_default_session().get_credentials()
        self.assertEqual(creds.access_key, "access-key-A")
        self.assertEqual(creds.secret_key, "secret-key-A")
        self.assertEqual(creds.token, "security-token-A")

        self.mock_sts_creds(rmock, 'B')

        # Should used cached credentials
        creds = boto3._get_default_session().get_credentials()
        self.assertEqual(creds.access_key, "access-key-A")
        self.assertEqual(creds.secret_key, "secret-key-A")
        self.assertEqual(creds.token, "security-token-A")
예제 #5
0
    def test_must_set_aws_region_in_boto_session(self):
        region = "myregion"
        ctx = Context()

        ctx.region = region
        self.assertEquals(ctx.region, region)
        self.assertEquals(region, boto3._get_default_session().region_name)
예제 #6
0
 def boto_session(self):
     claims = self.user_claims
     if claims:
         group = self.cognito_group(claims['cognito:groups'], claims['iss'])
         if group:
             return self.create_boto_session(group['RoleArn'])
     return boto3._get_default_session()
예제 #7
0
def aws_session():
    os.environ['AWS_ACCESS_KEY_ID'] = 'fake_access_key'
    os.environ['AWS_SECRET_ACCESS_KEY'] = 'fake_secret_key'
    os.environ['AWS_DEFAULT_REGION'] = 'us-east-1'
    boto3.setup_default_session()
    with mock.patch('ml_git.storages.s3store.boto3.Session') as mock_session:
        mock_session.return_value = boto3._get_default_session()
        yield
예제 #8
0
def validate_using_player_account_gem(interface_url, user, cognito_id):
    client = cgf_service_client.for_url(interface_url,
                                        verbose=True,
                                        session=boto3._get_default_session())
    result = client.navigate('accountinfo', cognito_id).GET()
    if result.DATA == None:
        return False
    return result.DATA[
        'CognitoUsername'] == user  # TODO: add support for playername or other attributes
예제 #9
0
def check_player_account_gem_for_ban(interface_url, user):
    # get cognito id from identity map
    cognito_id = identity_validator.get_id_from_user(user)
    client = cgf_service_client.for_url(interface_url,
                                        verbose=True,
                                        session=boto3._get_default_session())
    result = client.navigate('accountinfo', cognito_id).GET()
    # ask player account if that player is banned
    return result.DATA.get('AccountBlacklisted', False)
예제 #10
0
def get(request):
    interface_url = cgf_lambda_settings.get_service_url(
        "CloudGemPlayerAccount_banplayer_1_0_0")
    if not interface_url:
        return {"players": ban_handler.get_banned_players()}

    service_client = cgf_service_client.for_url(
        interface_url, verbose=True, session=boto3._get_default_session())
    result = service_client.navigate('list').GET()
    return result.DATA
예제 #11
0
def post(request, user = None):
    interface_url = cgf_lambda_settings.get_service_url("CloudGemPlayerAccount_banplayer_1_0_0")

    if not interface_url:
        return {
            "status": ban_handler.ban(user)
        }

    service_client = cgf_service_client.for_url(interface_url, verbose=True, session=boto3._get_default_session())
    result = service_client.navigate('playerban').POST({"id":  identity_validator.get_id_from_user(user)})
    return result.DATA
예제 #12
0
    def test_renew_sts_creds(self, rmock):
        now = datetime.now(tz=pytz.UTC)
        self.mock_sts_creds(rmock, 'A')

        # Should fetch credentials
        with freeze_time(now):
            creds = boto3._get_default_session().get_credentials()
            self.assertEqual(creds.access_key, "access-key-A")
            self.assertEqual(creds.secret_key, "secret-key-A")
            self.assertEqual(creds.token, "security-token-A")
            self.assertFalse(creds.refresh_needed())

        self.mock_sts_creds(rmock, 'B')

        # Should fetch credentials again, since the time changed
        with freeze_time(now + timedelta(hours=2)):
            creds = boto3._get_default_session().get_credentials()
            self.assertTrue(creds.refresh_needed())
            self.assertEqual(creds.access_key, "access-key-B")
            self.assertEqual(creds.secret_key, "secret-key-B")
            self.assertEqual(creds.token, "security-token-B")
예제 #13
0
def main(message, context):
    interface_urls = cgf_lambda_settings.list_service_urls()
    event = message['emitted']

    for interface_url in interface_urls:
        if c.INT_METRICS_LISTENER in interface_url.keys()[0]:
            service_client = cgf_service_client.for_url(
                interface_url.values()[0],
                verbose=True,
                session=boto3._get_default_session())
            source = service_client.navigate('source').GET()

            if event.get('source', None) == source.name:
                print("#######Emitting")
                service_client.navigate('emit').POST(message)
예제 #14
0
def __send_communicator_broadcast(message):
    if not __do_communicator_updates():
        return
        
    interface_url = cgf_lambda_settings.get_service_url("CloudGemWebCommunicator_sendmessage_1_0_0")
    if not interface_url:
        print 'Messaging interface not found'
        return
        
    client = cgf_service_client.for_url(interface_url, verbose=True, session=boto3._get_default_session())
    try:
        result = client.navigate('broadcast').POST({"channel": "CloudGemDynamicContent", "message": message})
        print 'Got send result {}'.format(result)
    except Exception as error:
        raise errors.ClientError('Failed to broadcast {} due to error: {}'.format(message, error))
    def wrapper(RoleArn: str,
                *,
                RoleSessionName: str = None,
                PolicyArns: typing.List[typing.Dict[str, str]] = None,
                Policy: typing.Union[str, typing.Dict] = None,
                DurationSeconds: typing.Union[int, datetime.timedelta] = None,
                Tags: typing.List[typing.Dict[str, str]] = None,
                TransitiveTagKeys: typing.List[str] = None,
                ExternalId: str = None,
                SerialNumber: str = None,
                TokenCode: str = None,
                region_name: typing.Union[str, bool] = None,
                validate: bool = True,
                cache: dict = None,
                additional_kwargs: typing.Dict = None) -> boto3.Session:
        """Produce a boto3 Session using the given role.

        Unlike creating a session with the credentials returned directly
        by sts.AssumeRole, the returned session will refresh the credentials
        automatically when they expire by calling AssumeRole again.

        By default, the parameters are checked so that errors can be raised
        at this point, rather than more confusingly when the first call
        is made using the child session.
        This can be disabled by setting validate=False.

        The parent session is available on the child session
        in the property assume_role_parent_session.

        Additional arguments for AssumeRole, if any are added in the future,
        can be passed in additional_kwargs."""
        session = boto3._get_default_session()
        return assume_role(session,
                           RoleArn,
                           RoleSessionName=RoleSessionName,
                           PolicyArns=PolicyArns,
                           Policy=Policy,
                           DurationSeconds=DurationSeconds,
                           Tags=Tags,
                           TransitiveTagKeys=TransitiveTagKeys,
                           ExternalId=ExternalId,
                           SerialNumber=SerialNumber,
                           TokenCode=TokenCode,
                           region_name=region_name,
                           validate=validate,
                           cache=cache,
                           additional_kwargs=additional_kwargs)
    def __init__(
        self,
        log_group_name,
        region_name=None,
        start_time=None,
        end_time=None,
        boto_client_kwargs=None,
        profile_name=None

    ):
        boto_client_kwargs = boto_client_kwargs or {}
        boto_profile_name = profile_name or 'default'

        print('log group name: %s' % log_group_name)
        print('region: %s' % region_name)
        print('using aws profile: %s' % boto_profile_name)
        print('boto kwargs: %s' % boto_client_kwargs)

        # If a specific region is requested, use it.
        # If not, try to use the environment's configuration (i.e. the
        # AWS_DEFAULT_REGION variable of ~/.aws/config file).
        # If that doesn't work, use a default region.
        if region_name is not None:
            boto3.setup_default_session(profile_name=boto_profile_name, region_name=region_name)
            session = boto3._get_default_session()
            print('default session set up with %s...' % session.profile_name)
            self.logs_client = boto3.client('logs', **boto_client_kwargs)
        else:
            try:
                self.logs_client = boto3.client('logs', **boto_client_kwargs)
            except NoRegionError:
                boto3.setup_default_session(profile_name=boto_profile_name, region_name=DEFAULT_REGION_NAME)
                self.logs_client = boto3.client('logs', **boto_client_kwargs)

        self.log_group_name = log_group_name

        # If no time filters are given use the last hour
        now = datetime.utcnow()
        start_time = start_time or now - timedelta(hours=1)
        end_time = end_time or now

        self.start_ms = timegm(start_time.utctimetuple()) * 1000
        self.end_ms = timegm(end_time.utctimetuple()) * 1000
    def __init__(self,
                 log_group_name,
                 region_name=None,
                 start_time=None,
                 end_time=None,
                 boto_client_kwargs=None,
                 profile_name=None):
        boto_client_kwargs = boto_client_kwargs or {}
        boto_profile_name = profile_name or 'default'

        print('log group name: %s' % log_group_name)
        print('region: %s' % region_name)
        print('using aws profile: %s' % boto_profile_name)
        print('boto kwargs: %s' % boto_client_kwargs)

        # If a specific region is requested, use it.
        # If not, try to use the environment's configuration (i.e. the
        # AWS_DEFAULT_REGION variable of ~/.aws/config file).
        # If that doesn't work, use a default region.
        if region_name is not None:
            boto3.setup_default_session(profile_name=boto_profile_name,
                                        region_name=region_name)
            session = boto3._get_default_session()
            print('default session set up with %s...' % session.profile_name)
            self.logs_client = boto3.client('logs', **boto_client_kwargs)
        else:
            try:
                self.logs_client = boto3.client('logs', **boto_client_kwargs)
            except NoRegionError:
                boto3.setup_default_session(profile_name=boto_profile_name,
                                            region_name=DEFAULT_REGION_NAME)
                self.logs_client = boto3.client('logs', **boto_client_kwargs)

        self.log_group_name = log_group_name

        # If no time filters are given use the last hour
        now = datetime.utcnow()
        start_time = start_time or now - timedelta(hours=1)
        end_time = end_time or now

        self.start_ms = timegm(start_time.utctimetuple()) * 1000
        self.end_ms = timegm(end_time.utctimetuple()) * 1000
예제 #18
0
def delete(request, user=None):
    """
    Call PlayerAccount to unban the player
    
    Player must be a registered uer in the PlayerAccount Gem and Leaderboards must have seen the player
    via a data request to have a mapping between the user name and the cognition identity (for get_id_from_user)
    """
    print("Handling player unban for {}".format(user))
    interface_url = cgf_lambda_settings.get_service_url(
        "CloudGemPlayerAccount_banplayer_1_0_0")
    if not interface_url:
        return {"status": ban_handler.lift_ban(user)}

    service_client = cgf_service_client.for_url(
        interface_url, verbose=True, session=boto3._get_default_session())
    navigation = service_client.navigate('playerban')
    cog_id = identity_validator.get_id_from_user(user)
    if cog_id is None:
        raise errors.ClientError(UNKNOWN_PLAYER_ERROR_MESSAGE.format(user))

    result = navigation.DELETE({"id": cog_id})
    return result.DATA
예제 #19
0
def process_sqs_queue(queue_url):
    log = getLogger('accounts.sqs')
    log.info('Processing account events from %s', queue_url)
    try:
        region = queue_url.split('.')[1]
        available_regions = boto3._get_default_session().get_available_regions('sqs')
        if region not in available_regions:
            log.error(
                'SQS misconfigured, expected region, got %s from %s'
                % (region, queue_url)
            )
        # Connect to the SQS queue.
        # Credentials are specified in EC2 as an IAM role on prod/stage/dev.
        # If you're testing locally see boto3 docs for how to specify:
        # http://boto3.readthedocs.io/en/latest/guide/configuration.html
        sqs = boto3.client('sqs', region_name=region)
        # Poll for messages indefinitely.
        while True:
            response = sqs.receive_message(
                QueueUrl=queue_url,
                WaitTimeSeconds=settings.FXA_SQS_AWS_WAIT_TIME,
                MaxNumberOfMessages=10,
            )
            msgs = response.get('Messages', []) if response else []
            for message in msgs:
                try:
                    process_fxa_event(message.get('Body', ''))
                    # This intentionally deletes the event even if it was some
                    # unrecognized type.  Not point leaving a backlog.
                    if 'ReceiptHandle' in message:
                        sqs.delete_message(
                            QueueUrl=queue_url, ReceiptHandle=message['ReceiptHandle']
                        )
                except Exception as exc:
                    log.exception('Error while processing message: %s' % exc)
    except Exception as exc:
        log.exception('Error while processing account events: %s' % exc)
        raise exc
예제 #20
0
def _get_template(template, base_name, task_name, task_id, func_id, concurrent,
                  event_enabled, event_batch, runtime_memory, msg_max_retry):
    if template not in _cached_templates:
        data = pkgutil.get_data(__package__, template)
        _cached_templates[template] = Template(data.decode('utf-8'))
    stscli = boto3.client('sts')
    aws_account = stscli.get_caller_identity().get('Account')
    aws_region = boto3._get_default_session().region_name
    kwargs = {
        'aws_account': aws_account,
        'aws_region': aws_region,
        'base_name': base_name,
        'task_name': task_name,
        'task_id': "0" if task_id is None else task_id,
        'func_id': func_id,
        'concurrent': concurrent,
        'event_enabled': "true" if event_enabled else "false",
        'event_batch': event_batch,
        'runtime_memory': runtime_memory,
        'msg_max_retry': msg_max_retry,
    }
    data = _cached_templates[template].substitute(kwargs)
    return yaml.load(data)
예제 #21
0
파일: core.py 프로젝트: ysm001/ebi
def main():
    """ Main function called from console_scripts
    """
    logger = logging.getLogger('ebi')
    logger.propagate = True
    logger.setLevel(logging.INFO)
    logger.addHandler(logging.StreamHandler())

    parser = argparse.ArgumentParser()
    subparsers = parser.add_subparsers()
    parser_bgdeploy = subparsers.add_parser('bgdeploy')
    parser_clonedeploy = subparsers.add_parser('clonedeploy')
    parser_create = subparsers.add_parser('create')
    parser_deploy = subparsers.add_parser('deploy')

    apply_args_bgdeploy(parser_bgdeploy)
    apply_args_clonedeploy(parser_clonedeploy)
    apply_args_create(parser_create)
    apply_args_deploy(parser_deploy)

    parsed = parser.parse_args()

    if not hasattr(parsed, 'func'):
        parser.print_help()
        return

    conf = {}
    if parsed.profile:
        conf['profile_name'] = parsed.profile
    if parsed.region:
        conf['region_name'] = parsed.region
    boto3.setup_default_session(**conf)
    session = boto3._get_default_session()
    ebaws.set_region(session._session.get_config_variable('region'))
    ebaws.set_profile(session.profile_name)
    parsed.func(parsed)
예제 #22
0
def process_sqs_queue(queue_url):
    log = getLogger('accounts.sqs')
    log.info('Processing account events from %s', queue_url)
    try:
        region = queue_url.split('.')[1]
        available_regions = (boto3._get_default_session()
                             .get_available_regions('sqs'))
        if region not in available_regions:
            log.error('SQS misconfigured, expected region, got %s from %s' % (
                region, queue_url))
        # Connect to the SQS queue.
        # Credentials are specified in EC2 as an IAM role on prod/stage/dev.
        # If you're testing locally see boto3 docs for how to specify:
        # http://boto3.readthedocs.io/en/latest/guide/configuration.html
        sqs = boto3.client('sqs', region_name=region)
        # Poll for messages indefinitely.
        while True:
            response = sqs.receive_message(
                QueueUrl=queue_url,
                WaitTimeSeconds=settings.FXA_SQS_AWS_WAIT_TIME,
                MaxNumberOfMessages=10)
            msgs = response.get('Messages', []) if response else []
            for message in msgs:
                try:
                    process_fxa_event(message.get('Body', ''))
                    # This intentionally deletes the event even if it was some
                    # unrecognized type.  Not point leaving a backlog.
                    if 'ReceiptHandle' in message:
                        sqs.delete_message(
                            QueueUrl=queue_url,
                            ReceiptHandle=message['ReceiptHandle'])
                except Exception as exc:
                    log.exception('Error while processing message: %s' % exc)
    except Exception as exc:
        log.exception('Error while processing account events: %s' % exc)
        raise exc
예제 #23
0
def describe():
    # client = boto3.client('ec2')
    client = hp.getBotoClient('ec2')
    #VPC
    vpcs = client.describe_vpcs()['Vpcs']
    vpcIDs = []
    vpcCIDRs = []
    vpcNames = []
    igwNames = []
    igwIDs = []
    regions = []
    #Region
    region_dict = {
        'us-east-1': 'N. Virginia',
        'us-east-2': 'Ohio',
        'N. California': 'us-west-1',
        'us-west-2': 'Oregon',
        'af-south-1': 'Cape Town',
        'ap-east-1': 'Hong Kong',
        'ap-south-1': 'Mumbai',
        'ap-northeast-2': 'Seoul',
        'ap-southeast-1': 'Singapore',
        'ap-southeast-2': 'Sydney',
        'ap-northeast-1': 'Tokyo',
        'ca-central-1': 'Central',
        'eu-central-1': 'Frankfurt',
        'eu-west-1': 'Ireland',
        'eu-west-2': 'London',
        'eu-south-1': 'Milan',
        'eu-west-3': 'Paris',
        'eu-north-1': 'Stockholm',
        'me-south-1': 'Bahrain',
        'sa-east-1': 'Sao Paulo'
    }
    region = boto3._get_default_session().region_name

    for vpc in vpcs:
        if vpc['IsDefault'] == True:
            continue
        id = hp.getFromJSON(vpc, 'VpcId')
        cidr = hp.getFromJSON(vpc, 'CidrBlock')
        vpcIDs.append(id)
        vpcCIDRs.append(cidr)
        name = hp.findNameinTags(vpc)
        vpcNames.append(name)
        regions.append(region_dict[region])
        # IGW
        filters = [{'Name': "attachment.vpc-id", 'Values': [id]}]
        igws = client.describe_internet_gateways(
            Filters=filters)['InternetGateways']
        if igws:
            igw = igws[0]
            name = hp.findNameinTags(igw)
            igwNames.append(name)
            igwId = igw['InternetGatewayId']
            igwIDs.append(igwId)

    vpcdf = pd.DataFrame({
        "Region": regions,
        "VPC Name": vpcNames,
        "VPC ID": vpcIDs,
        "CIDR": vpcCIDRs
    })
    igwdf = pd.DataFrame({"IGW Name": igwNames, "IGW ID": igwIDs})

    #Subnet
    filters = [{'Name': "vpc-id", 'Values': vpcIDs}]
    subnetdf, vpcrtdf = describe_subnet(filters, client)

    #NAT
    filters = [{'Name': "vpc-id", 'Values': vpcIDs}]
    natdf = describe_nat(filters, client)

    #VPC Flow Log
    filters = [{'Name': "resource-id", 'Values': vpcIDs}]
    flowdf = describe_flow(filters, client)

    return vpcdf, subnetdf, vpcrtdf, igwdf, natdf, flowdf
예제 #24
0
            if res != 0:
                raise (RuntimeError(
                    "Something went wrong executing {0}  Got exit: {1}".format(
                        cmd, res)))

        print("Starting elasticsearch configuration...")
        # create a temporary config file
        with open("templates/elasticsearch.yml.tmp", "w") as tmpfile:
            with open("templates/elasticsearch.yml", "r") as f:
                # copy over the template
                for l in f:
                    tmpfile.write(l)

                # add cloud credentials
                # hack: boto3 doesn't yet offer a way to access the store configuration values
                S = boto3._get_default_session()
                profile = S._session.full_config['profiles']['default']

                # add profile information to elasticsearch config to enable cloud discovery
                tmpfile.write("cloud.aws.access_key: {0}\n".format(
                    profile['aws_access_key_id']))
                tmpfile.write("cloud.aws.secret_key: {0}\n".format(
                    profile['aws_secret_access_key']))
                tmpfile.write("cloud.aws.region: {0}\n".format(
                    profile['region']))
                tmpfile.write("discovery.type: ec2\n")
                tmpfile.write("discovery.ec2.groups: {0}\n".format(
                    get_tag('elasticsearch-security-group')))
                #tmpfile.write("discovery.ec2.host_type: public_ip\n")
                tmpfile.write("cluster.name: {0}\n".format(
                    get_tag('elasticsearch-cluster')))
예제 #25
0
 def init_app(self, app: Flask, assume_user_role=True):
     self.base_session = boto3._get_default_session()
     if assume_user_role:
         app.before_request(self.assume_role)
예제 #26
0
 def _run_(self, conf):
     print "Using key pair to set up default session: " + conf.test_user_key_pair.id + ":" + conf.test_user_key_pair.secret
     boto3.setup_default_session(aws_access_key_id=conf.test_user_key_pair.id,
                                 aws_secret_access_key=conf.test_user_key_pair.secret,
                                 region_name=conf.aws_test_region)
     print "Default session set up to: " + str(boto3._get_default_session())
예제 #27
0
    args = parser0.parse_args()

    logfile = open('%s.log' % args.name, 'w')

    if args.command == 'deploy':
        print '%s: Starting deployment of %s' % (timestamp(), args.name)
        repo = clone_workers_repo(logfile)
        # create stream and RDS database
        stream = create_stream(args.name)
        db = create_database(args.name, args.password, dbclass=args.dbclass)
        os.environ['DATABASE_URL'] = db['URL']
        migrate_database(repo, logfile)

        # set up environment variables
        session = boto3._get_default_session()._session
        env = [
            'DEPLOY_NAME=%s' % args.name,
            'KINESIS_STREAM=%s' % args.name,
            'DATABASE_URL=%s' % db['URL'],
            'AWS_REGION=%s' % session.get_config_variable('region'),
            'AWS_ACCESS_KEY_ID=%s' % session.get_credentials().access_key,
            'AWS_SECRET_ACCESS_KEY=%s' % session.get_credentials().secret_key,
        ]
        # create environment variable file
        with open('%s.env' % args.name, 'w') as f:
            [f.write(e + '\n') for e in env]
        add_env(args.name, repo, logfile)
        # create lambda function
        zfile = '%s/%s.zip' % (repo, repo)
        func = create_function(args.name, zfile, lsize=int(args.lsize), timeout=int(args.ltimeout))
예제 #28
0
 def sessionDefault(self):
     self._session = boto3._get_default_session()
     return self._session
예제 #29
0
def test_patch_botocore_credentials(make_test_session):
    """Test to the default boto3 session credentials get patched correctly."""
    session = boto3._get_default_session()
    localstack = make_test_session()

    credentials = session.get_credentials()
    initial_access_key = credentials.access_key if credentials else None
    initial_secret_key = credentials.secret_key if credentials else None
    initial_token = credentials.token if credentials else None
    initial_method = credentials.method if credentials else None

    assert initial_access_key != constants.DEFAULT_AWS_ACCESS_KEY_ID
    assert initial_secret_key != constants.DEFAULT_AWS_SECRET_ACCESS_KEY
    assert initial_token != constants.DEFAULT_AWS_SESSION_TOKEN
    assert initial_method != "localstack-default"

    with localstack:
        # should prefer access credentials from environment variables.
        with mock.patch.dict(
            os.environ,
            AWS_ACCESS_KEY_ID=str(mock.sentinel.AWS_ACCESS_KEY_ID),
            AWS_SECRET_ACCESS_KEY=str(mock.sentinel.AWS_SECRET_ACCESS_KEY),
            AWS_SESSION_TOKEN=str(mock.sentinel.AWS_SESSION_TOKEN),
        ):
            with localstack.botocore.patch_botocore():
                credentials = session.get_credentials()
                assert credentials is not None
                assert credentials.access_key == str(mock.sentinel.AWS_ACCESS_KEY_ID)
                assert credentials.secret_key == str(
                    mock.sentinel.AWS_SECRET_ACCESS_KEY
                )
                assert credentials.token == str(mock.sentinel.AWS_SESSION_TOKEN)
                assert credentials.method == "env"

        # check credentials get unpatched correctly
        credentials = session.get_credentials()
        assert (credentials.access_key if credentials else None) == initial_access_key
        assert (credentials.secret_key if credentials else None) == initial_secret_key
        assert (credentials.token if credentials else None) == initial_token
        assert (credentials.method if credentials else None) == initial_method

        # should fallback to default credentials if none in the environment
        with mock.patch.dict(
            os.environ,
            AWS_ACCESS_KEY_ID="",
            AWS_SECRET_ACCESS_KEY="",
            AWS_SESSION_TOKEN="",
        ):
            os.environ.pop("AWS_ACCESS_KEY_ID", None)
            os.environ.pop("AWS_SECRET_ACCESS_KEY", None)
            os.environ.pop("AWS_SESSION_TOKEN", None)
            with localstack.botocore.patch_botocore():
                credentials = session.get_credentials()
                assert credentials is not None
                assert credentials.access_key == constants.DEFAULT_AWS_ACCESS_KEY_ID
                assert credentials.secret_key == constants.DEFAULT_AWS_SECRET_ACCESS_KEY
                assert credentials.token == constants.DEFAULT_AWS_SESSION_TOKEN
                assert credentials.method == "localstack-default"

        # check credentials get unpatched correctly
        credentials = session.get_credentials()
        assert (credentials.access_key if credentials else None) == initial_access_key
        assert (credentials.secret_key if credentials else None) == initial_secret_key
        assert (credentials.token if credentials else None) == initial_token
        assert (credentials.method if credentials else None) == initial_method
#!/usr/bin/env python3

import boto3
import itertools
import collections
import csv
import sys
import datetime
import json
import collections
import re
from dateutil.tz import tzutc

DIR_INSTANCE_METADATA = 'out/instance-metadata'

REGION = boto3._get_default_session().region_name
ACCOUNT = boto3.client('sts').get_caller_identity()['Account']


def safe_list_get(l, idx, default):
    try:
        return l[idx]
    except IndexError:
        return default


def get_ec2_metadata(ec2, region):
    print("[{} - {}] Getting instances metadata...".format(ACCOUNT, REGION))
    instances_pag = ec2.get_paginator('describe_instances')
    metadata = [{
        'instance_id':
예제 #31
0
파일: aws.py 프로젝트: brennerm/aws-top
def get_region():
    return boto3._get_default_session().region_name