def get_sts_credentials(self, role_session_name, role_arn):
     """
     We persist the STS credentials so that we don't create a new session with each request
     """
     if not self.sts_credentials:
         sts_connection = sts.STSConnection()
         assumed_role = sts_connection.assume_role(
             role_arn=role_arn, role_session_name=role_session_name)
         self.sts_credentials = assumed_role.credentials
     return self.sts_credentials
示例#2
0
def update_kinesis(project_model, message_type):
    """
    notifies media atom of a project update or create by pushing a message onto its kinesis stream.
    the kinesis stream is indicated in settings.
    :param project_model: ProjectModel instance that has been created/updated
    :param message_type: either `media_atom.MSG_PROJECT_CREATED` or `media_atom.MSG_PROJECT_UPDATED`
    :return:
    """
    from portal.plugins.gnm_vidispine_utils.vs_helpers import site_id
    from boto import sts, kinesis
    from django.conf import settings
    import json, logging

    SESSION_NAME = 'pluto-media-atom-integration'

    project_id = site_id + "-" + str(project_model.collection_id)
    logger.info("{0}: Project updated, notifying {1} via role {2}".format(
        project_id, settings.MEDIA_ATOM_STREAM_NAME,
        settings.MEDIA_ATOM_ROLE_ARN))

    sts_connection = sts.STSConnection(
        aws_access_key_id=settings.MEDIA_ATOM_AWS_ACCESS_KEY_ID,
        aws_secret_access_key=settings.MEDIA_ATOM_AWS_SECRET_ACCESS_KEY)

    assume_role_result = sts_connection.assume_role(
        role_arn=settings.MEDIA_ATOM_ROLE_ARN, role_session_name=SESSION_NAME)

    credentials = assume_role_result.credentials

    logger.debug("{0}: Got kinesis credentials".format(project_id))
    kinesis_connection = kinesis.connect_to_region(
        region_name='eu-west-1',
        aws_access_key_id=credentials.access_key,
        aws_secret_access_key=credentials.secret_key,
        security_token=credentials.session_token)

    message_content = {
        'type': message_type,
        'id': project_id,
        'title': project_model.gnm_project_headline,
        'status': project_model.gnm_project_status,
        'commissionId':
        site_id + "-" + str(project_model.commission.collection_id),
        'commissionTitle': project_model.commission.gnm_commission_title,
        'productionOffice': project_model.gnm_project_production_office,
        'created': project_model.created.isoformat()
    }
    logger.debug("{0}: Message is {1}".format(project_id, message_content))

    kinesis_connection.put_record(stream_name=settings.MEDIA_ATOM_STREAM_NAME,
                                  data=json.dumps(message_content),
                                  partition_key=project_id)
    logger.info("{0}: Project update sent".format(project_id))
def copytoredshift(record):
    dynamo_table_name = os.environ["DYN_TABLENAME"]
    redshift_username = os.environ["RSDB_USERNAME"]
    redshift_password = os.environ["RSDB_PASSWORD"]
    redshift_database = os.environ["RSDB_DATABASE"]
    redshift_port = os.environ["RSDB_PORT"]
    customer = os.environ["CUSTOMER"]
    cage = os.environ["CAGE"]

    role_name = "NucleatorBucketandqDistributorServiceRunner"

    iam_conn = boto.connect_iam()
    role = iam_conn.get_role(role_name)
    role_arn = role["get_role_response"]["get_role_result"]["role"]["arn"]

    stsconn = sts.STSConnection()
    response = stsconn.assume_role(role_arn, "redshift_copy_session")
    access_key = response.credentials.access_key
    secret_key = response.credentials.secret_key
    session_token = response.credentials.session_token

    if customer is "47Lining":
        endpoint = "redshift.%s.%s.com" % (cage, customer)
    else:
        endpoint = "redshift.%s.%s.47lining.com" % (cage, customer)

    print "Connecting to redshift cluster: %s" % endpoint
    conn = psycopg2.connect(dbname=redshift_database,
                            host=endpoint,
                            port=redshift_port,
                            user=redshift_username,
                            password=redshift_password)
    cur = conn.cursor()

    print "Connected. Creating table"
    cur.execute(
        "CREATE TABLE IF NOT EXISTS imageproccessingtable(key varchar(50) NOT NULL, url varchar(200) NOT NULL, dateoriginal timestamp NOT NULL, gpslatitude float8 NOT NULL, gpslongitude float8 NOT NULL, image varchar(100));"
    )
    conn.commit()

    print "Table recreated. Running copy command..."
    cur.execute(
        "copy imageproccessingtable from 'dynamodb://%s' credentials 'aws_access_key_id=%s;aws_secret_access_key=%s;token=%s' readratio 100;"
        % (dynamo_table_name, access_key, secret_key, session_token))
    conn.commit()

    print "Copy command completed"
    def __init__(self,
                 aws_access_key_id=None,
                 aws_secret_access_key=None,
                 **kwargs):
        # only import boto when needed to allow top-lvl s3 module import
        import boto
        import boto.s3.connection
        from boto.s3.key import Key

        options = self._get_s3_config()
        options.update(kwargs)
        # Removing key args would break backwards compability
        role_arn = options.get('aws_role_arn')
        role_session_name = options.get('aws_role_session_name')

        aws_session_token = None

        if role_arn and role_session_name:
            from boto import sts

            sts_client = sts.STSConnection()
            assumed_role = sts_client.assume_role(role_arn, role_session_name)
            aws_secret_access_key = assumed_role.credentials.secret_key
            aws_access_key_id = assumed_role.credentials.access_key
            aws_session_token = assumed_role.credentials.session_token

        else:
            if not aws_access_key_id:
                aws_access_key_id = options.get('aws_access_key_id')

            if not aws_secret_access_key:
                aws_secret_access_key = options.get('aws_secret_access_key')

        for key in [
                'aws_access_key_id', 'aws_secret_access_key',
                'aws_role_session_name', 'aws_role_arn'
        ]:
            if key in options:
                options.pop(key)

        self.s3 = boto.s3.connection.S3Connection(
            aws_access_key_id,
            aws_secret_access_key,
            security_token=aws_session_token,
            **options)
        self.Key = Key
示例#5
0
文件: s3.py 项目: zhengge2017/luigi
    def s3(self):
        # only import boto when needed to allow top-lvl s3 module import
        import boto
        import boto.s3.connection

        options = dict(self._options)

        if self._s3:
            return self._s3

        aws_access_key_id = options.get('aws_access_key_id')
        aws_secret_access_key = options.get('aws_secret_access_key')

        # Removing key args would break backwards compability
        role_arn = options.get('aws_role_arn')
        role_session_name = options.get('aws_role_session_name')

        aws_session_token = None

        if role_arn and role_session_name:
            from boto import sts

            sts_client = sts.STSConnection()
            assumed_role = sts_client.assume_role(role_arn, role_session_name)
            aws_secret_access_key = assumed_role.credentials.secret_key
            aws_access_key_id = assumed_role.credentials.access_key
            aws_session_token = assumed_role.credentials.session_token

        for key in [
                'aws_access_key_id', 'aws_secret_access_key',
                'aws_role_session_name', 'aws_role_arn'
        ]:
            if key in options:
                options.pop(key)
        self._s3 = boto.s3.connection.S3Connection(
            aws_access_key_id,
            aws_secret_access_key,
            security_token=aws_session_token,
            **options)
        return self._s3
def copyinitialdata(record):
    region = os.environ["REGION"]
    dest_bucket_name = os.environ["S3_BUCKET_NAME"]
    source_bucket_name = os.environ["S3_SOURCE_FIRST_BUCKET_NAME"]
    dynamo_table_name = os.environ["DYN_TABLENAME"]

    role_name = "NucleatorBucketandqDistributorServiceRunner"
    iam_conn = boto.connect_iam()
    role = iam_conn.get_role(role_name)
    role_arn = role["get_role_response"]["get_role_result"]["role"]["arn"]
    stsconn = sts.STSConnection()
    response = stsconn.assume_role(role_arn, "redshift_copy_session")
    access_key = response.credentials.access_key
    secret_key = response.credentials.secret_key
    session_token = response.credentials.session_token

    print "Running S3 Copy Command"
    command = "export AWS_ACCESS_KEY_ID=%s; export AWS_SECRET_ACCESS_KEY=%s; export AWS_SESSION_TOKEN=%s; aws s3 cp s3://%s/ s3://%s/ --recursive --include '*' > /dev/null" % (
        access_key, secret_key, session_token, source_bucket_name,
        dest_bucket_name)
    subprocess.call(command, shell=True)

    copytoredshift(record)
def copyseconddata(record):
    region = os.environ["REGION"]
    dest_bucket_name = os.environ["S3_BUCKET_NAME"]
    source_bucket_name = os.environ["S3_SOURCE_SECOND_BUCKET_NAME"]
    dynamo_table_name = os.environ["DYN_TABLENAME"]

    print "Deleting and recreating dynamo table so only new records are inserted into redshift"
    dynamo_conn = boto.dynamodb.connect_to_region(region)
    table = dynamo_conn.get_table(dynamo_table_name)
    dynamo_conn.delete_table(table)
    dynamo_schema = dynamo_conn.create_schema(hash_key_name='key',
                                              hash_key_proto_value=str)
    time.sleep(5)
    print "Sleeping for 5 seconds to let table delete"
    table = dynamo_conn.create_table(name=dynamo_table_name,
                                     schema=dynamo_schema,
                                     read_units=500,
                                     write_units=150)

    role_name = "NucleatorBucketandqDistributorServiceRunner"
    iam_conn = boto.connect_iam()
    role = iam_conn.get_role(role_name)
    role_arn = role["get_role_response"]["get_role_result"]["role"]["arn"]
    stsconn = sts.STSConnection()
    response = stsconn.assume_role(role_arn, "redshift_copy_session")
    access_key = response.credentials.access_key
    secret_key = response.credentials.secret_key
    session_token = response.credentials.session_token

    print "Running S3 Copy Command"
    command = "export AWS_ACCESS_KEY_ID=%s; export AWS_SECRET_ACCESS_KEY=%s; export AWS_SESSION_TOKEN=%s; aws s3 cp s3://%s/ s3://%s/ --recursive --include '*' > /dev/null" % (
        access_key, secret_key, session_token, source_bucket_name,
        dest_bucket_name)
    subprocess.call(command, shell=True)

    copytoredshift(record)
示例#8
0
def assume_identity(config, profile):
    # if AWS_PROFILE was the option last used, and it didn't require assuming a role
    if config.get('AWS_PROFILE_REFRESH_NOT_NEEDED'):
        return None

    _config_lock = config.get('CONFIG_LOCK') or config_lock
    _config_lock.acquire()
    if 'assumed_roles' not in config:
        config['assumed_roles'] = {}
    if 'role_last_updated' not in config:
        config['role_last_updated'] = {}

    try:
        assumed_roles = config.get('assumed_roles', {})
        assumed_role = assumed_roles.get(profile)
        if assumed_role and not assumed_role.credentials.is_expired(
                time_offset_seconds=900):
            return False

        # fetch the credentials from the aws configs
        shared_credentials = config.get('AWS_SHARED_CREDENTIALS')

        if not shared_credentials:
            config_path = config.get('AWS_CONFIG_FILE') or os.environ.get(
                'AWS_CONFIG_FILE') or os.path.join(expanduser('~'), '.aws',
                                                   'config')
            credentials_path = (config.get('AWS_CONFIG_FILE')
                                or os.environ.get('AWS_CONFIG_FILE')
                                or os.path.join(expanduser('~'), '.aws',
                                                'credentials')).replace(
                                                    '/config', '/credentials')

            shared_credentials = Config(do_load=False)
            if os.path.isfile(credentials_path):
                shared_credentials.load_from_path(credentials_path)
            if os.path.isfile(config_path):
                shared_credentials.load_from_path(config_path)
            config['AWS_SHARED_CREDENTIALS'] = shared_credentials

        profile_key = profile
        if not shared_credentials.has_section(profile_key):
            profile_key = 'profile {}'.format(profile_key)
        if not shared_credentials.has_section(profile_key):
            raise ProfileNotFoundError('Profile {} not found'.format(
                config['AWS_PROFILE']))

        # no matter what, get the access and secret key pair
        if all([
                shared_credentials.has_option(profile_key, x)
                for x in ('aws_access_key_id', 'aws_secret_access_key')
        ]):
            aws_access_key_id = shared_credentials.get(profile_key,
                                                       'aws_access_key_id')
            aws_secret_access_key = shared_credentials.get(
                profile_key, 'aws_secret_access_key')
        elif shared_credentials.has_option(profile_key, 'source_profile'):
            source_profile_key = shared_credentials.get(
                profile_key, 'source_profile')
            if not shared_credentials.has_section(source_profile_key):
                source_profile_key = 'profile {}'.format(source_profile_key)
            if not shared_credentials.has_section(source_profile_key):
                raise ProfileNotFoundError(
                    'Source profile {} for profile {} not found'.format(
                        shared_credentials.get(profile_key, 'source_profile'),
                        profile))

            # source_section = shared_credentials['_sections'][source_profile_key]
            if all([
                    shared_credentials.has_option(source_profile_key, x)
                    for x in ('aws_access_key_id', 'aws_secret_access_key')
            ]):
                aws_access_key_id = shared_credentials.get(
                    source_profile_key, 'aws_access_key_id')
                aws_secret_access_key = shared_credentials.get(
                    source_profile_key, 'aws_secret_access_key')
            else:
                raise ProfileNotFoundError(
                    'Source profile {} for profile {} has no access or secret key'
                    .format(
                        shared_credentials.get(profile_key, 'source_profile'),
                        profile))

        # if there's a role_arn, use it to assume a role
        if shared_credentials.has_option(profile_key, 'role_arn'):
            role_arn = shared_credentials.get(profile_key, 'role_arn')
            sts_connection = sts.STSConnection(
                aws_access_key_id=aws_access_key_id,
                aws_secret_access_key=aws_secret_access_key)
            config['assumed_roles'][profile] = sts_connection.assume_role(
                role_arn, ROLE_SESSION_NAME, policy=None, duration_seconds=960)
            config['role_last_updated'][profile] = datetime.datetime.utcnow(
            ).isoformat()[:19] + 'Z'

        return True

    finally:
        _config_lock.release()