Exemple #1
0
def create_bucket(region, session, bucket_prefix):
    bucket = create_bucket_name(bucket_prefix)

    if region != 'us-east-1':
        session.resource('s3').create_bucket(
            Bucket=bucket,
            CreateBucketConfiguration={'LocationConstraint': region})
    else:
        session.resource('s3').create_bucket(Bucket=bucket)
    return bucket
Exemple #2
0
 def getResource(self, resourceType):
     session = self.getSession()
     resource = session.resource(resourceType)
     if hasattr(session, "aws_session_token"):
         return session.resource(
             resourceType,
             aws_access_key_id=session.aws_access_key,
             aws_secret_access_key=session.aws_secret_key,
             aws_session_token=session.aws_session_token)
     else:
         return session.resource(
             resourceType,
             aws_access_key_id=session.aws_access_key,
             aws_secret_access_key=session.aws_secret_key)
Exemple #3
0
    def run(self):
        session_config = Config(
            connect_timeout=self.config.C_TIME,
            read_timeout=self.config.R_TIME,
            retries={'max_attempts': self.config.MAX_RETRIES
                     }  #We're gonna declare 0 attempts for now
        )
        session = boto3.session.Session()
        #boto3.set_stream_logger('')
        self.s3 = session.resource(
            's3',
            aws_access_key_id=self.config.key_id,
            aws_secret_access_key=self.config.access_key,
            region_name=self.device.region,
            config=session_config)

        #if isConnected():
        try:
            device_folder = self.device.location + '-' + self.device.name
            file_timestamp = self.file_path.split('/')[-1]
            key = os.path.join(device_folder, self.config.DATA_FOLDER,
                               file_timestamp)
            self.s3.Bucket(self.config.BUCKET_NAME).upload_file(
                self.file_path, key)
            #print("[LOGS] Uploaded image {0} to {1}".format(self.file_path, key))
            return 200

        except ClientError as e:
            print('[ALERT] Boto upload FAILED: ', e)
            return 400
Exemple #4
0
def test_can_create_all_resources(resource_args):
    """Verify we can create all existing resources."""
    session, service_name = resource_args
    resource = session.resource(service_name)
    # Verifying we have a "meta" attr is just an arbitrary
    # sanity check.
    assert hasattr(resource, 'meta')
Exemple #5
0
 def test_can_inject_method_onto_resource(self):
     session = boto3.Session(botocore_session=self.botocore_session)
     self.botocore_session.register('creating-resource-class.s3',
                                    self.add_new_method(name='my_method'))
     resource = session.resource('s3')
     self.assertTrue(hasattr(resource, 'my_method'))
     self.assertEqual(resource.my_method('anything'), 'anything')
Exemple #6
0
def run_annotation(job_id, user, filename, bucket_name, s3key):

	# Get the s3 bucket
    s3 = session.resource('s3')
    bucket = s3.Bucket(bucket_name)

    # Check if the user exists
    d = 'AnnotationResult/user/' + user + '/'
    if os.path.exists(d) == False:
    	os.mkdir(d)

	# Create a directory for each submitted job_id request
    directory = 'AnnotationResult/user/' + user + '/' + job_id + '/'
    os.mkdir(directory)

    # Download the input file from s3 input bucket
    bucket.download_file(s3key, 'AnnotationResult/user/' + user + '/' +  job_id + '/' + filename)

	# Update the job status only if the current status is pending
	DBresponse = ann_table.get_item(Key={'job_id': job_id})
	item = DBresponse['Item']
	if item['job_status'] == 'PENDING':
		ann_table.update_item(Key={'job_id': job_id}, UpdateExpression='SET job_status = :v', ExpressionAttributeValues= {':v': 'RUNNING'})

        # Launch annotation job as a background process
        subprocess.Popen(["python", "run.py", 'AnnotationResult/user/' + user + '/' + job_id + '/' + filename])
        response.status = 200
        f = open('AnnotationResult/user/' + user + '/' + job_id + '/status.txt', 'w+')
        f.write("code:\t" + response.status + "\tdata:\t" + str(job_id) + "\tfilename:\t" + filename + '\n')
        f.close()
Exemple #7
0
    def __init__(self,
                 keyname=None,
                 use_akms=None,
                 host=None,
                 region='us-east-1',
                 *args,
                 **kwargs):

        import botocore.session

        # DEPRECATED - `host`
        if host:
            logger.warning('the host param is deprecated')

        use_akms = (use_akms
                    if use_akms is not None else 'USE_AKMS' in os.environ)

        args = (keyname, use_akms)
        with self.lock:
            if args not in self.sessions:
                session = Session(botocore_session=botocore.session.Session(
                    session_vars={
                        'akms_keyname': keyname,
                        'use_akms': use_akms,
                    }))
                client = session.client('s3')
                s3 = session.resource('s3')
                self.sessions[args] = (session, client, s3)

        (self.session, self.client, self.s3) = self.sessions[args]
Exemple #8
0
 def save(self, commit=True):
     short_url_data = self.data.get('short_url')
     url = expandedurl.objects.create()
     url.short_url = short_url_data
     site = requests.get(short_url_data)
     url.http_status_code = site.status_code
     url.destination_url = site.url
     session = Session(aws_access_key_id=AWS_ACCESS_KEY_ID,
                       aws_secret_access_key=AWS_SECRET_ACCESS_KEY,
                       region_name='us-west-2')
     client = session.resource('s3')
     driver = webdriver.PhantomJS(
         service_log_path=os.path.devnull)  # or add to your PATH
     driver.set_window_size(1024, 768)  # optional
     driver.get(str(url.destination_url))
     url.page_title = driver.title
     filepath = '/tmp/' + str(url.id) + '.png'
     s3path = 'captures/' + str(url.id) + '.png'
     driver.save_screenshot(filepath)
     data = open(filepath, 'rb')  # save a screenshot to disk
     client.Object(AWS_STORAGE_BUCKET_NAME, s3path).put(
         Body=data
     )  #upload_file(filepath, AWS_STORAGE_BUCKET_NAME, AWS_ACCESS_KEY_ID)
     url.screen_capture = 'https://s3.amazonaws.com/%s' % AWS_STORAGE_BUCKET_NAME + '/' + s3path
     driver.service.process.kill()
     os.remove(filepath)
     return url
Exemple #9
0
    def get_resource_type(
        self,
        resource_type: Optional[str] = None,
        region_name: Optional[str] = None,
        config: Optional[Config] = None,
    ) -> boto3.resource:
        """Get the underlying boto3 resource using boto3 session"""
        session, endpoint_url = self._get_credentials(region_name=region_name)

        if resource_type:
            warnings.warn(
                "resource_type is deprecated. Set resource_type from class attribute.",
                DeprecationWarning,
                stacklevel=2,
            )
        else:
            resource_type = self.resource_type

        # No AWS Operators use the config argument to this method.
        # Keep backward compatibility with other users who might use it
        if config is None:
            config = self.config

        return session.resource(resource_type,
                                endpoint_url=endpoint_url,
                                config=config,
                                verify=self.verify)
Exemple #10
0
 def test_can_inject_method_onto_resource(self):
     session = boto3.Session(botocore_session=self.botocore_session)
     self.botocore_session.register('creating-resource-class.s3',
                                    self.add_new_method(name='my_method'))
     resource = session.resource('s3')
     assert hasattr(resource, 'my_method')
     assert resource.my_method('anything') == 'anything'
Exemple #11
0
 def resource(self, *args, **kwargs):
     """
     Outside of a context established by `.assumed_role_credentials()` or
     `.direct_access_credentials()` this is the same as boto3.resource.
     Within such a context, it returns boto3 clients that use different,
     temporary credentials.
     """
     session = self._per_thread.session or boto3
     return session.resource(*args, **kwargs)
def suspend_bucket_versioning(session, bucket_name):
    """
    Suspending the bucket versioning
    :param session:
    :param bucket_name:
    :return:
    """
    s3 = session.resource('s3')
    bucket_versioning = s3.BucketVersioning(bucket_name)
    response = bucket_versioning.suspend()
    logger.info(response)
    return
Exemple #13
0
    def get_resource_type(
        self,
        resource_type: str,
        region_name: Optional[str] = None,
        config: Optional[Config] = None,
    ) -> boto3.resource:
        """Get the underlying boto3 resource using boto3 session"""
        session, endpoint_url = self._get_credentials(region_name)

        # No AWS Operators use the config argument to this method.
        # Keep backward compatibility with other users who might use it
        if config is None:
            config = self.config

        return session.resource(resource_type, endpoint_url=endpoint_url, config=config, verify=self.verify)
Exemple #14
0
    def resource(session=None,
                 region_name=None,
                 api_version=None,
                 use_ssl=False,
                 verify=None,
                 endpoint_url=None,
                 aws_access_key_id=None,
                 aws_secret_access_key=None,
                 aws_session_token=None,
                 config=None,
                 endpoints=None):
        ''' Create a high-level Boto Resource matching DynamoDB. 
        
        Instead of
            ddb = boto3.resource('dynamodb')

        use
            dax = AmazonDaxClient.resource()
        '''
        # Local import so it doesn't fail if boto3 is not available
        from .Resource import is_boto3_session, DaxSession

        if session is None or isinstance(session, botocore.session.Session):
            session = DaxSession(aws_access_key_id=aws_access_key_id,
                                 aws_secret_access_key=aws_secret_access_key,
                                 aws_session_token=aws_session_token,
                                 region_name=region_name,
                                 botocore_session=session,
                                 endpoints=endpoints)
        else:
            if is_boto3_session(session):
                session = DaxSession.from_boto3(session, endpoints)
            else:
                raise ValueError('session must be a botocore or boto3 session')

        # Create the resource
        res = session.resource('dynamodb',
                               region_name=region_name,
                               api_version=api_version,
                               use_ssl=use_ssl,
                               verify=verify,
                               endpoint_url=endpoint_url,
                               aws_access_key_id=aws_access_key_id,
                               aws_secret_access_key=aws_secret_access_key,
                               aws_session_token=aws_session_token,
                               config=config)

        return res
Exemple #15
0
def main(aws_profile, output_file):
    """
    This script audits invalid records in the signature table and generates a report
    """
    start = datetime.now()
    try:
        if os.environ.get("STAGE") is None:
            logging.warning(
                "Please set the 'STAGE' environment variable - typically one of: {dev, staging, prod}"
            )
            return
        stage = os.environ.get("STAGE", "dev")
        signatures_table_name = "cla-{}-signatures".format(stage)
        companies_table_name = "cla-{}-companies".format(stage)
        users_table_name = "cla-{}-users".format(stage)
        session = boto3.Session(profile_name=aws_profile)
        dynamodb = session.resource("dynamodb")
        signatures_table = dynamodb.Table(signatures_table_name)
        companies_table = dynamodb.Table(companies_table_name)
        users_table = dynamodb.Table(users_table_name)
        signatures = signatures_table.scan()["Items"]
        logging.info("{} number of signatures".format(len(signatures)))

        # set tables used in the audit process
        audit_signature = AuditSignature(dynamodb, batch=signatures)
        audit_signature.set_signatures_table(signatures_table)
        audit_signature.set_companies_table(companies_table)
        audit_signature.set_users_table(users_table)
        invalid_fields = audit_signature.process_batch()
        columns = ["signature_id", "error_type", "column", "data"]
        with open(output_file, "w", newline="") as csv_file:
            writer = csv.DictWriter(csv_file,
                                    fieldnames=columns,
                                    delimiter=" ")
            writer.writeheader()
            writer.writerows({
                "signature_id": audit["signature_id"],
                "error_type": audit["error_type"],
                "column": audit["column"],
                "data": audit["data"],
            } for audit in invalid_fields)

        logging.info("Auditing Duration : {}".format(datetime.now() - start))
    except (Exception, ClientError) as err:
        logging.error(err, exc_info=True)
def run_annotation():
    # Extract job parameters from the request body (NOT the URL query string!)
    body = request.body.read()
    res = json.loads(body)

    bucket_name = res['s3_inputs_bucket']
    job_id = res['job_id']
    filename = res['input_file_name']
    s3key = res['s3_key_input_file']
    user = res['username']

    # Get the input file S3 object and copy it to a local file
    session = Session(aws_access_key_id=ACCESS_KEY,
                      aws_secret_access_key=SECRET_KEY)
    s3 = session.resource('s3')
    bucket = s3.Bucket(bucket_name)

    # Check if the user exists
    d = 'AnnotationResult/user/' + user + '/'
    if os.path.exists(d) == False:
        os.mkdir(d)

    directory = 'AnnotationResult/user/' + user + '/' + job_id + '/'
    os.mkdir(directory)

    # Download the input file from s3 input bucket
    bucket.download_file(
        s3key, 'AnnotationResult/user/' + user + '/' + job_id + '/' + filename)
    # Launch annotation job as a background process
    subprocess.Popen([
        "python", "run.py",
        'AnnotationResult/user/' + user + '/' + job_id + '/' + filename
    ])
    response.status = 200
    f = open('AnnotationResult/user/' + user + '/' + job_id + '/status.txt',
             'w+')
    f.write("code:\t" + response.status + "\tdata:\t" + str(job_id) +
            "\tfilename:\t" + filename + '\n')
    f.close()

    # Return response to notify user of successful submission
    data = {'id': job_id, 'input_file': filename}
    return json.dumps({'code': response.status, 'data': data})
Exemple #17
0
 def capture(self, url):
     session = Session(aws_access_key_id=AWS_ACCESS_KEY_ID,
                       aws_secret_access_key=AWS_SECRET_ACCESS_KEY,
                       region_name='us-west-2')
     s3 = session.resource('s3')
     driver = webdriver.PhantomJS(
         service_log_path=os.path.devnull)  # or add to your PATH
     driver.set_window_size(1024, 768)  # optional
     driver.get(str(url.destination_url))
     url.page_title = driver.title
     filepath = '/tmp/' + str(url.id) + '.png'
     s3path = 'captures/' + str(url.id) + '.png'
     driver.save_screenshot(filepath)
     data = open(filepath, 'rb')  # save a screenshot to disk
     s3.Object(AWS_STORAGE_BUCKET_NAME, s3path).put(
         Body=data
     )  #upload_file(filepath, AWS_STORAGE_BUCKET_NAME, AWS_ACCESS_KEY_ID)
     url.screen_capture = 'https://s3.amazonaws.com/%s' % AWS_STORAGE_BUCKET_NAME + '/' + s3path
     driver.service.process.kill()
     os.remove(filepath)
     return url
def delete_s3_bucket(session, s3conn, bucket_name):
    """
    Clean up all S3 buckets
    :param session:
    :param s3conn:
    :param bucket_name:
    :return:
    """
    client = session.client('s3')
    resource = session.resource('s3')
    list_of_s3_buckets = client.list_buckets()['Buckets']
    for bucket in list_of_s3_buckets:
        if bucket['Name'] == bucket_name:
            try:
                if re.search('portal', bucket_name):
                    logger.info("Portal bucket %s will not be deleted only delete the files." % bucket_name)
                    s3_delete_files_with_prefix(client, bucket_name, 'admin')
                    s3_delete_files_with_prefix(client, bucket_name, 'auth')
                    s3_delete_files_with_prefix(client, bucket_name, 'logs')
                else:
                    client.get_bucket_location(Bucket=bucket_name)
                    logger.info("Bucket %s OK" % bucket_name)
                    s3_bucket_logging_disable(s3conn, bucket_name)
                    remove_bucket_replication(client, bucket_name)
                    disable_bucket_versioning(client, bucket_name)
                    logger.info("Delete s3 bucket lifecycle configuration.")
                    client.delete_bucket_lifecycle(Bucket=bucket_name)
                    delete_objects_in_bucket(resource, bucket_name)
                    full_bucket = s3conn.get_bucket(bucket_name)
                    for key in full_bucket.list():
                        key.delete()
                    logger.info("Delete bucket %s" % bucket_name)
                    s3conn.delete_bucket(bucket_name)
            except (S3ResponseError, ClientError) as e:
                message = e.response['Error']['Message']
                logger.info("Bucket %s - %s" % (bucket_name, message))
                pass
            break
    return
Exemple #19
0
def init(profile, conf, bucket=None, path=None):
    """Init

	Initialises the module so that it can be used

	Args:
		profile (str): The name of the profile to use to connect
		conf (dict): The configuration parameters, see boto3.resource for more info
		bucket (str): Optional bucket (needed if not passed in init)
		path (str): Optional path to prepend to all keys

	Returns:
		None
	"""

    # Pull in the module variable
    global _c, _r, _s3

    # Init the conf
    _s3 = {
        "profile": profile,
        "conf": BotoConfig(**conf),
        "bucket": bucket,
        "path": path
    }

    # Create a new session using the profile
    session = boto3.Session(profile_name=profile)

    # Get an S3 resource
    _r = session.resource('s3', config=_s3['conf'])

    # Get a client
    _c = session.client('s3',
                        config=boto3.session.Config(
                            s3={'addressing_style': 'path'},
                            signature_version='s3v4'))
Exemple #20
0
def main(aws_profile, output_file):
    """
    This script audits invalid records in the projects table - specifically projects template pdfs
    """
    try:
        if os.environ.get("STAGE") is None:
            logging.warning(
                "Please set the 'STAGE' environment variable - typically one of: {dev, staging, prod}"
            )
            return
        stage = os.environ.get("STAGE", "dev")
        projects_table_name = "cla-{}-projects".format(stage)
        session = boto3.Session(profile_name=aws_profile)
        dynamodb = session.resource("dynamodb")
        projects_table = dynamodb.Table(projects_table_name)
        projects = projects_table.scan()["Items"]

        # set the projects table used in the audit process
        audit_project = ProjectAudit(dynamodb, batch=projects)
        invalid_fields = audit_project.process_batch()

        columns = ["project_id", "error_type", "column", "data"]
        with open(output_file, "w", newline="") as csv_file:
            writer = csv.DictWriter(csv_file,
                                    fieldnames=columns,
                                    delimiter=" ")
            writer.writeheader()
            writer.writerows({
                "project_id": audit["project_id"],
                "error_type": audit["error_type"],
                "column": audit["column"],
                "data": audit["data"],
            } for audit in invalid_fields)

    except (Exception, ClientError) as err:
        logging.error(err, exc_info=True)
Exemple #21
0
def archive_data(job_id, user_role, result_file):

        # Pass in user role, job_id, completion_time, username
        glacier_client = boto3.client('glacier')
        s3 = session.resource('s3')
        client = boto3.client('s3')
        bucket = s3.Bucket('gas-results')

        # Get the corresponding job id
        res = ann_table.query(KeyConditionExpression=Key('job_id').eq(job_id))
        items = res['Items']

        # When the 30 mins reach, we check again if the user is premium in case he upgrade to premium within 30 time frame
        # We only archive result file if the user is a free_user role
        if items[0]['user_role'] == 'free_user':
                for obj in bucket.objects.filter(Prefix=result_file):
                		# Archive result file to Glacier
                        archive_res = glacier_client.upload_archive(vaultName='ucmpcs',body=obj.get()['Body'].read())

                        print 'Archive id: ' + archive_res['archiveId']
                        archive_id = archive_res['archiveId']

                        # Update the job details within DynamoDB
                        updateData = ann_table.update_item(
                                Key={
                                        'job_id': job_id
                                },
                                UpdateExpression="set results_file_archive_id = :a",
                                ExpressionAttributeValues={
                                        ':a': archive_id
                                },
                                ReturnValues="UPDATED_NEW"
                        )

                        # Remove the result file from s3 bucket
                        client.delete_object(Bucket='gas-results', Key=result_file)
Exemple #22
0
def _test_create_resource(session, service_name):
    resource = session.resource(service_name)
    # Verifying we have a "meta" attr is just an arbitrary
    # sanity check.
    assert_true(hasattr(resource, 'meta'))
Exemple #23
0
def create_keypair(region, session, key_name, save_path):
    new_keypair = session.resource('ec2').create_key_pair(KeyName=key_name)
    with open(save_path, 'w') as file:
        file.write(new_keypair.key_material)
    
    print(new_keypair.key_fingerprint)
Exemple #24
0
def connect(account_name, connection_type, **args):
    """

    Examples of use:
    ec2 = sts_connect.connect(environment, 'ec2', region=region, validate_certs=False)
    ec2 = sts_connect.connect(environment, 'ec2', validate_certs=False, debug=1000)
    ec2 = sts_connect.connect(environment, 'ec2')
    where environment is ( test, prod, dev )
    s3  = sts_connect.connect(environment, 's3')
    ses = sts_connect.connect(environment, 'ses')

    :param account: Account to connect with (i.e. test, prod, dev)

    :raises Exception: RDS Region not valid
                       AWS Tech not supported.

    :returns: STS Connection Object for given tech

    :note: To use this method a SecurityMonkey role must be created
            in the target account with full read only privileges.
    """
    if 'assumed_role' in args:
        role = args['assumed_role']
    else:
        account = Account.query.filter(Account.name == account_name).first()
        sts = boto3.client('sts')
        role_name = 'SecurityMonkey'
        if account.getCustom(
                "role_name") and account.getCustom("role_name") != '':
            role_name = account.getCustom("role_name")
        role = sts.assume_role(RoleArn='arn:aws:iam::' + account.identifier +
                               ':role/' + role_name,
                               RoleSessionName='secmonkey')

    from security_monkey import app

    if connection_type == 'botocore':
        botocore_session = botocore.session.get_session()
        botocore_session.set_credentials(
            role['Credentials']['AccessKeyId'],
            role['Credentials']['SecretAccessKey'],
            token=role['Credentials']['SessionToken'])
        return botocore_session

    region = 'us-east-1'
    if 'region' in args:
        region = args.pop('region')
        if hasattr(region, 'name'):
            region = region.name

    if 'boto3' in connection_type:
        # Should be called in this format: boto3.iam.client
        _, tech, api = connection_type.split('.')
        session = boto3.Session(
            aws_access_key_id=role['Credentials']['AccessKeyId'],
            aws_secret_access_key=role['Credentials']['SecretAccessKey'],
            aws_session_token=role['Credentials']['SessionToken'],
            region_name=region)
        if api == 'resource':
            return session.resource(tech)
        return session.client(tech)

    module = __import__("boto.{}".format(connection_type))
    for subm in connection_type.split('.'):
        module = getattr(module, subm)

    return module.connect_to_region(
        region,
        aws_access_key_id=role['Credentials']['AccessKeyId'],
        aws_secret_access_key=role['Credentials']['SecretAccessKey'],
        security_token=role['Credentials']['SessionToken'])
        # Get job_id, s3key and file name
        input_file_name = sys.argv[1]
        files = input_file_name.split('/')
        first = input_file_name.find('/')
        second = input_file_name.rindex('/')
        s3key = input_file_name[first + 1:second]
        job_id = files[3]

        # Calculate how much time taken to run the annotation job request
        with Timer() as t:
            driver.run(input_file_name, 'vcf')
        print "Total runtime: %s seconds" % t.secs

        # Get the input file upload path
        s3 = session.resource('s3')
        bucket = s3.Bucket('gas-results')
        index = input_file_name.rindex('/')
        prefix = input_file_name[:index]
        rindex = input_file_name.find('.')
        filename = input_file_name[index + 1:rindex]
        findex = prefix.find('/')
        path = prefix[findex + 1:index]

        # Upload the result file
        bucket.upload_file(prefix + '/' + filename + '.annot.vcf',
                           'zhuoyuzhu/' + path + '/' + filename + '.annot.vcf')

        # Upload the log file
        bucket.upload_file(
            prefix + '/' + filename + '.vcf.count.log',
def connect(account_name, connection_type, **args):
    """

    Examples of use:
    ec2 = sts_connect.connect(environment, 'ec2', region=region, validate_certs=False)
    ec2 = sts_connect.connect(environment, 'ec2', validate_certs=False, debug=1000)
    ec2 = sts_connect.connect(environment, 'ec2')
    where environment is ( test, prod, dev )
    s3  = sts_connect.connect(environment, 's3')
    ses = sts_connect.connect(environment, 'ses')

    :param account: Account to connect with (i.e. test, prod, dev)

    :raises Exception: RDS Region not valid
                       AWS Tech not supported.

    :returns: STS Connection Object for given tech

    :note: To use this method a SecurityMonkey role must be created
            in the target account with full read only privileges.
    """
    account = Account.query.filter(Account.name == account_name).first()
    sts = boto.connect_sts()
    role_name = 'SecurityMonkey'
    if account.role_name and account.role_name != '':
        role_name = account.role_name
    role = sts.assume_role('arn:aws:iam::' + account.number + ':role/' + role_name, 'secmonkey')

    if connection_type == 'botocore':
        botocore_session = botocore.session.get_session()
        botocore_session.set_credentials(
            role.credentials.access_key,
            role.credentials.secret_key,
            token=role.credentials.session_token
        )
        return botocore_session

    if connection_type == 'iam_boto3':
        session = boto3.Session(
            aws_access_key_id=role.credentials.access_key,
            aws_secret_access_key=role.credentials.secret_key,
            aws_session_token=role.credentials.session_token
        )
        return session.resource('iam')

    region = 'us-east-1'
    if args.has_key('region'):
        region = args.pop('region')
        if hasattr(region, 'name'):
            region = region.name

    # ElasticSearch Service:
    if connection_type == 'es':
        session = boto3.Session(
            aws_access_key_id=role.credentials.access_key,
            aws_secret_access_key=role.credentials.secret_key,
            aws_session_token=role.credentials.session_token,
            region_name=region
        )
        return session.client('es')

    module = __import__("boto.{}".format(connection_type))
    for subm in connection_type.split('.'):
        module = getattr(module, subm)

    return module.connect_to_region(
        region,
        aws_access_key_id=role.credentials.access_key,
        aws_secret_access_key=role.credentials.secret_key,
        security_token=role.credentials.session_token
    )
def connect(account_name, connection_type, **args):
    """

    Examples of use:
    ec2 = sts_connect.connect(environment, 'ec2', region=region, validate_certs=False)
    ec2 = sts_connect.connect(environment, 'ec2', validate_certs=False, debug=1000)
    ec2 = sts_connect.connect(environment, 'ec2')
    where environment is ( test, prod, dev )
    s3  = sts_connect.connect(environment, 's3')
    ses = sts_connect.connect(environment, 'ses')

    :param account: Account to connect with (i.e. test, prod, dev)

    :raises Exception: RDS Region not valid
                       AWS Tech not supported.

    :returns: STS Connection Object for given tech

    :note: To use this method a SecurityMonkey role must be created
            in the target account with full read only privileges.
    """
    if 'assumed_role' in args:
        role = args['assumed_role']
    else:
        account = Account.query.filter(Account.name == account_name).first()
        sts = boto3.client('sts')
        role_name = 'SecurityMonkey'
        if account.role_name and account.role_name != '':
            role_name = account.role_name
        role = sts.assume_role(RoleArn='arn:aws:iam::' + account.number +
                               ':role/' + role_name, RoleSessionName='secmonkey')

    from security_monkey import app


    if connection_type == 'botocore':
        botocore_session = botocore.session.get_session()
        botocore_session.set_credentials(
            role['Credentials']['AccessKeyId'],
            role['Credentials']['SecretAccessKey'],
            token=role['Credentials']['SessionToken']
        )
        return botocore_session

    region = 'us-east-1'
    if 'region' in args:
        region = args.pop('region')
        if hasattr(region, 'name'):
            region = region.name

    if 'boto3' in connection_type:
        # Should be called in this format: boto3.iam.client
        _, tech, api = connection_type.split('.')
        session = boto3.Session(
            aws_access_key_id=role['Credentials']['AccessKeyId'],
            aws_secret_access_key=role['Credentials']['SecretAccessKey'],
            aws_session_token=role['Credentials']['SessionToken'],
            region_name=region
        )
        if api == 'resource':
            return session.resource(tech)
        return session.client(tech)

    module = __import__("boto.{}".format(connection_type))
    for subm in connection_type.split('.'):
        module = getattr(module, subm)

    return module.connect_to_region(
        region,
        aws_access_key_id=role['Credentials']['AccessKeyId'],
        aws_secret_access_key=role['Credentials']['SecretAccessKey'],
        security_token=role['Credentials']['SessionToken']
    )
Exemple #28
0
    def writeObjectsToDatabase(self, lookupTableObjectList,
                               objectTableObjectList, lookupTableName,
                               objectTableName):
        client = None
        lookupTable = None
        objectTable = None

        try:
            session = boto3.Session(profile_name=self.profile)
            client = session.resource('dynamodb', region_name=str(self.region))
        except Exception as e:
            return {
                "status": "success",
                "payload": {
                    "error":
                    "There was a problem obtaining the connection to DynamoDB.",
                    "traceback": ''.join(traceback.format_exc())
                }
            }

        try:
            lookupTable = client.Table(str(lookupTableName))
            objectTable = client.Table(str(objectTableName))
        except Exception as e:
            return {
                "status": "success",
                "payload": {
                    "error":
                    "There was a problem connecting to the DynamoDB tables.",
                    "traceback": ''.join(traceback.format_exc())
                }
            }

        try:
            with lookupTable.batch_writer() as batch:
                for item in lookupTableObjectList:
                    batch.put_item(Item=item)
        except Exception as e:
            return {
                "status": "success",
                "payload": {
                    "error":
                    "There was a problem adding the provided data to the Lookup DynamoDB table.",
                    "traceback": ''.join(traceback.format_exc())
                }
            }

        try:
            with objectTable.batch_writer() as batch:
                for item in objectTableObjectList:
                    batch.put_item(Item=item)
        except Exception as e:
            return {
                "status": "success",
                "payload": {
                    "error":
                    "There was a problem adding the provided data to the Object DynamoDB table.",
                    "traceback": ''.join(traceback.format_exc())
                }
            }

        return {
            "status":
            "success",
            "payload":
            "Successfully pre-populated the Lookup and Object database tables with the provided information."
        }
Exemple #29
0
def connect(account_name, connection_type, **args):
    """

    Examples of use:
    ec2 = sts_connect.connect(environment, 'ec2', region=region, validate_certs=False)
    ec2 = sts_connect.connect(environment, 'ec2', validate_certs=False, debug=1000)
    ec2 = sts_connect.connect(environment, 'ec2')
    where environment is ( test, prod, dev )
    s3  = sts_connect.connect(environment, 's3')
    ses = sts_connect.connect(environment, 'ses')

    :param account: Account to connect with (i.e. test, prod, dev)

    :raises Exception: RDS Region not valid
                       AWS Tech not supported.

    :returns: STS Connection Object for given tech

    :note: To use this method a SecurityMonkey role must be created
            in the target account with full read only privileges.
    """
    account = Account.query.filter(Account.name == account_name).first()
    sts = boto.connect_sts()
    role_name = 'SecurityMonkey'
    if account.role_name and account.role_name != '':
        role_name = account.role_name
    role = sts.assume_role('arn:aws:iam::' + account.number + ':role/' + role_name, 'secmonkey')

    if connection_type == 'botocore':
        botocore_session = botocore.session.get_session()
        botocore_session.set_credentials(
            role.credentials.access_key,
            role.credentials.secret_key,
            token=role.credentials.session_token
        )
        return botocore_session

    if connection_type == 'ec2':
        return boto.connect_ec2(
            role.credentials.access_key,
            role.credentials.secret_key,
            security_token=role.credentials.session_token,
            **args)

    if connection_type == 'elb':
        if 'region' in args:
            region = args['region']
            del args['region']
        else:
            region = 'us-east-1'

        return boto.ec2.elb.connect_to_region(
            region,
            aws_access_key_id=role.credentials.access_key,
            aws_secret_access_key=role.credentials.secret_key,
            security_token=role.credentials.session_token,
            **args)

    if connection_type == 's3':
        if 'region' in args:
            region = args['region']
            # drop region key-val pair from args or you'll get an exception
            del args['region']
            return boto.s3.connect_to_region(
                region,
                aws_access_key_id=role.credentials.access_key,
                aws_secret_access_key=role.credentials.secret_key,
                security_token=role.credentials.session_token,
                **args)

        return boto.connect_s3(
            role.credentials.access_key,
            role.credentials.secret_key,
            security_token=role.credentials.session_token,
            **args)

    if connection_type == 'ses':
        if 'region' in args:
            region = args['region']
            del args['region']
            return boto.ses.connect_to_region(
                region,
                aws_access_key_id=role.credentials.access_key,
                aws_secret_access_key=role.credentials.secret_key,
                security_token=role.credentials.session_token,
                **args)

        return boto.connect_ses(
            role.credentials.access_key,
            role.credentials.secret_key,
            security_token=role.credentials.session_token,
            **args)

    if connection_type == 'iam_boto3':
        session = boto3.Session(
            aws_access_key_id=role.credentials.access_key,
            aws_secret_access_key=role.credentials.secret_key,
            aws_session_token=role.credentials.session_token
        )
        return session.resource('iam')

    if connection_type == 'iam':
        if 'region' in args:
            region = args['region']
            # drop region key-val pair from args or you'll get an exception
            del args['region']
            return boto.iam.connect_to_region(
                region,
                aws_access_key_id=role.credentials.access_key,
                aws_secret_access_key=role.credentials.secret_key,
                security_token=role.credentials.session_token,
                **args)

        return boto.connect_iam(
            role.credentials.access_key,
            role.credentials.secret_key,
            security_token=role.credentials.session_token,
            **args)

    if connection_type == 'route53':
        return boto.connect_route53(
            role.credentials.access_key,
            role.credentials.secret_key,
            security_token=role.credentials.session_token,
            **args)

    if connection_type == 'sns':
        if 'region' in args:
            region = args['region']
            del args['region']
            return boto.sns.connect_to_region(
                region.name,
                aws_access_key_id=role.credentials.access_key,
                aws_secret_access_key=role.credentials.secret_key,
                security_token=role.credentials.session_token,
                **args)

        return boto.connect_sns(
            role.credentials.access_key,
            role.credentials.secret_key,
            security_token=role.credentials.session_token,
            **args)

    if connection_type == 'sqs':
        if 'region' in args:
            region = args['region']
            del args['region']
            return boto.sqs.connect_to_region(
                region.name,
                aws_access_key_id=role.credentials.access_key,
                aws_secret_access_key=role.credentials.secret_key,
                security_token=role.credentials.session_token,
                **args)

        return boto.connect_sqs(
            role.credentials.access_key,
            role.credentials.secret_key,
            security_token=role.credentials.session_token,
            **args)

    if connection_type == 'vpc':
        return boto.connect_vpc(
            role.credentials.access_key,
            role.credentials.secret_key,
            security_token=role.credentials.session_token,
            **args)

    if connection_type == 'rds':
        if 'region' in args:
            reg = args['region']
            rds_region = None
            for boto_region in boto.rds.regions():
                if reg.name == boto_region.name:
                    rds_region = boto_region

            if rds_region is None:
                raise Exception('The supplied region {0} is not in boto.rds.regions. {1}'.format(reg, boto.rds.regions()))

        return boto.connect_rds(
            role.credentials.access_key,
            role.credentials.secret_key,
            security_token=role.credentials.session_token,
            **args)

    if connection_type == 'redshift':
        if 'region' in args:
            region = args['region']
            del args['region']
            return boto.redshift.connect_to_region(
                region.name,
                aws_access_key_id=role.credentials.access_key,
                aws_secret_access_key=role.credentials.secret_key,
                security_token=role.credentials.session_token,
                **args)

        return boto.connect_redshift(
            role.credentials.access_key,
            role.credentials.secret_key,
            security_token=role.credentials.session_token,
            **args)

    if connection_type == 'vpc':
        if 'region' in args:
            region = args['region']
            del args['region']
            return boto.vpc.connect_to_region(
                region.name,
                aws_access_key_id=role.credentials.access_key,
                aws_secret_access_key=role.credentials.secret_key,
                security_token=role.credentials.session_token,
                **args)

        return boto.connect_vpc(
            role.credentials.access_key,
            role.credentials.secret_key,
            security_token=role.credentials.session_token,
            **args)

    err_msg = 'The connection_type supplied (%s) is not implemented.' % connection_type
    raise Exception(err_msg)
Exemple #30
0
import boto3
import botocore.session

session = boto3.session.Session(profile_name='power-user')
dynamodb = session.resource('dynamodb')
table = dynamodb.Table('prod-visual-schedules-data-table')
print(table.creation_date_time)

with table.batch_writer() as batch:
    batch.put_item(
        Item={
            'pk': "KID_SCHEDULE|2020|Kiera",
            'sk': "PERIOD|1",
            'start': "08:45",
            'end': "09:35",
            'TERMS|Nine Weeks 1': {
                "Class": "Pre AP English 2",
                "Category": "English"
            },
            'TERMS|Nine Weeks 2': {
                "Class": "Pre AP English 2",
                "Category": "English"
            },
            'TERMS|Nine Weeks 3': {
                "Class": "Pre AP English 2",
                "Category": "English"
            },
            'TERMS|Nine Weeks 4': {
                "Class": "Pre AP English 2",
                "Category": "English"
            },
AWS_KEY_ID = 'AKIAIZMP2MTECH75C74A'
AWS_SECRET_KEY = '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'
AWS_BUCKET_NAME = 'memrix1'
ALLOWED_EXTENSIONS = set(['jpg', 'jpeg', 'gif', 'png']) 

GMAPS_API_KEY = "AIzaSyDEgKQ8xiwb_MHBJXT6wL_t2p_mH1REWZ4"

from boto3.session import Session
import botocore
import botocore.session

session = Session(aws_access_key_id=AWS_KEY_ID,
                  aws_secret_access_key=AWS_SECRET_KEY,
                  region_name='us-west-2')

s3 = session.resource('s3')
s3Client = session.client('s3')
bucket = s3.Bucket('mybucket')
exists = True
try:
    s3.meta.client.head_bucket(Bucket='mybucket')

except botocore.exceptions.ClientError as e:
    # If a client error is thrown, then check that it was a 404 error.
    # If it was a 404 error, then the bucket does not exist.
    error_code = int(e.response['Error']['Code'])
    if error_code == 404:
        exists = False

# for getting image urls
# session = botocore.session.get_session()
def connect(account_name, connection_type, **args):
    """

    Examples of use:
    ec2 = sts_connect.connect(environment, 'ec2', region=region, validate_certs=False)
    ec2 = sts_connect.connect(environment, 'ec2', validate_certs=False, debug=1000)
    ec2 = sts_connect.connect(environment, 'ec2')
    where environment is ( test, prod, dev )
    s3  = sts_connect.connect(environment, 's3')
    ses = sts_connect.connect(environment, 'ses')

    :param account: Account to connect with (i.e. test, prod, dev)

    :raises Exception: RDS Region not valid
                       AWS Tech not supported.

    :returns: STS Connection Object for given tech

    :note: To use this method a SecurityMonkey role must be created
            in the target account with full read only privledges.
    """
    account = Account.query.filter(Account.name == account_name).first()
    sts = boto.connect_sts()
    role = sts.assume_role('arn:aws:iam::' + account.number + ':role/SecurityMonkey', 'secmonkey')

    if connection_type == 'botocore':
        botocore_session = botocore.session.get_session()
        botocore_session.set_credentials(
            role.credentials.access_key,
            role.credentials.secret_key,
            token=role.credentials.session_token
        )
        return botocore_session

    if connection_type == 'ec2':
        return boto.connect_ec2(
            role.credentials.access_key,
            role.credentials.secret_key,
            security_token=role.credentials.session_token,
            **args)

    if connection_type == 'elb':
        if 'region' in args:
            region = args['region']
            del args['region']
        else:
            region = 'us-east-1'

        return boto.ec2.elb.connect_to_region(
            region,
            aws_access_key_id=role.credentials.access_key,
            aws_secret_access_key=role.credentials.secret_key,
            security_token=role.credentials.session_token,
            **args)

    if connection_type == 's3':
        if 'region' in args:
            region = args['region']
            # drop region key-val pair from args or you'll get an exception
            del args['region']
            return boto.s3.connect_to_region(
                region,
                aws_access_key_id=role.credentials.access_key,
                aws_secret_access_key=role.credentials.secret_key,
                security_token=role.credentials.session_token,
                **args)

        return boto.connect_s3(
            role.credentials.access_key,
            role.credentials.secret_key,
            security_token=role.credentials.session_token,
            **args)

    if connection_type == 'ses':
        if 'region' in args:
            region = args['region']
            del args['region']
            return boto.ses.connect_to_region(
                region,
                aws_access_key_id=role.credentials.access_key,
                aws_secret_access_key=role.credentials.secret_key,
                security_token=role.credentials.session_token,
                **args)

        return boto.connect_ses(
            role.credentials.access_key,
            role.credentials.secret_key,
            security_token=role.credentials.session_token,
            **args)

    if connection_type == 'iam_boto3':
        session = boto3.Session(
            aws_access_key_id=role.credentials.access_key,
            aws_secret_access_key=role.credentials.secret_key,
            aws_session_token=role.credentials.session_token
        )
        return session.resource('iam')

    if connection_type == 'iam':
        if 'region' in args:
            region = args['region']
            # drop region key-val pair from args or you'll get an exception
            del args['region']
            return boto.iam.connect_to_region(
                region,
                aws_access_key_id=role.credentials.access_key,
                aws_secret_access_key=role.credentials.secret_key,
                security_token=role.credentials.session_token,
                **args)

        return boto.connect_iam(
            role.credentials.access_key,
            role.credentials.secret_key,
            security_token=role.credentials.session_token,
            **args)

    if connection_type == 'route53':
        return boto.connect_route53(
            role.credentials.access_key,
            role.credentials.secret_key,
            security_token=role.credentials.session_token,
            **args)

    if connection_type == 'sns':
        if 'region' in args:
            region = args['region']
            del args['region']
            return boto.sns.connect_to_region(
                region.name,
                aws_access_key_id=role.credentials.access_key,
                aws_secret_access_key=role.credentials.secret_key,
                security_token=role.credentials.session_token,
                **args)

        return boto.connect_sns(
            role.credentials.access_key,
            role.credentials.secret_key,
            security_token=role.credentials.session_token,
            **args)

    if connection_type == 'sqs':
        if 'region' in args:
            region = args['region']
            del args['region']
            return boto.sqs.connect_to_region(
                region.name,
                aws_access_key_id=role.credentials.access_key,
                aws_secret_access_key=role.credentials.secret_key,
                security_token=role.credentials.session_token,
                **args)

        return boto.connect_sqs(
            role.credentials.access_key,
            role.credentials.secret_key,
            security_token=role.credentials.session_token,
            **args)

    if connection_type == 'vpc':
        return boto.connect_vpc(
            role.credentials.access_key,
            role.credentials.secret_key,
            security_token=role.credentials.session_token,
            **args)

    if connection_type == 'rds':
        if 'region' in args:
            reg = args['region']
            rds_region = None
            for boto_region in boto.rds.regions():
                if reg.name == boto_region.name:
                    rds_region = boto_region

            if rds_region is None:
                raise Exception('The supplied region {0} is not in boto.rds.regions. {1}'.format(reg, boto.rds.regions()))

        return boto.connect_rds(
            role.credentials.access_key,
            role.credentials.secret_key,
            security_token=role.credentials.session_token,
            **args)

    if connection_type == 'redshift':
        if 'region' in args:
            region = args['region']
            del args['region']
            return boto.redshift.connect_to_region(
                region.name,
                aws_access_key_id=role.credentials.access_key,
                aws_secret_access_key=role.credentials.secret_key,
                security_token=role.credentials.session_token,
                **args)

        return boto.connect_redshift(
            role.credentials.access_key,
            role.credentials.secret_key,
            security_token=role.credentials.session_token,
            **args)

    if connection_type == 'vpc':
        if 'region' in args:
            region = args['region']
            del args['region']
            return boto.vpc.connect_to_region(
                region.name,
                aws_access_key_id=role.credentials.access_key,
                aws_secret_access_key=role.credentials.secret_key,
                security_token=role.credentials.session_token,
                **args)

        return boto.connect_vpc(
            role.credentials.access_key,
            role.credentials.secret_key,
            security_token=role.credentials.session_token,
            **args)

    err_msg = 'The connection_type supplied (%s) is not implemented.' % connection_type
    raise Exception(err_msg)