コード例 #1
0
def get_user_data_configuration():
    """Retrieve and update the application configuration with information from the user-data

    Returns:
        `None`
    """
    from cloud_inquisitor import get_local_aws_session, app_config

    kms_region = app_config.kms_region
    session = get_local_aws_session()

    if session.get_credentials().method == 'iam-role':
        kms = session.client('kms', region_name=kms_region)
    else:
        sts = session.client('sts')
        audit_role = sts.assume_role(RoleArn=app_config.aws_api.instance_role_arn, RoleSessionName='cloud_inquisitor')
        kms = boto3.session.Session(
            audit_role['Credentials']['AccessKeyId'],
            audit_role['Credentials']['SecretAccessKey'],
            audit_role['Credentials']['SessionToken'],
        ).client('kms', region_name=kms_region)

    user_data_url = app_config.user_data_url
    res = requests.get(user_data_url)

    if res.status_code == 200:
        data = kms.decrypt(CiphertextBlob=b64decode(res.content))
        kms_config = json.loads(zlib.decompress(data['Plaintext']).decode('utf-8'))

        app_config.database_uri = kms_config['db_uri']
    else:
        raise RuntimeError('Failed loading user-data, cannot continue: {}: {}'.format(res.status_code, res.content))
コード例 #2
0
def copy_objects_by_thread(thread_id, copied_objects):
    # HOWTO: each thread should have its own session
    # http://boto3.readthedocs.io/en/latest/guide/resources.html#multithreading
    session = boto3.session.Session()

    if args.optimal:
        # HOWTO: low-level control
        # http://boto3.readthedocs.io/en/latest/_modules/boto3/s3/transfer.html
        client_config = botocore.config.Config(
            max_pool_connections=args.max_concurrency)
        transfer_config = boto3.s3.transfer.TransferConfig(
            multipart_threshold=8 * 1024 * 1024,
            multipart_chunksize=8 * 1024 * 1024,
            max_concurrency=args.max_concurrency,
            num_download_attempts=5,
            max_io_queue=100,
            io_chunksize=256 * 1024)
        client = session.client('s3', config=client_config)
    else:
        s3 = session.resource('s3')
        client = session.client('s3')

    count = 0
    while True:
        prefix = tasks.get()
        # HOWTO: list objects
        response = s3_client.list_objects_v2(
            Bucket=src_bucket.name,
            Prefix=prefix)  # Important: using prefix to limit listing
        for content in response['Contents']:
            key = content['Key']
            trace('thread %d copy object: s3://%s/%s' % \
                (thread_id, src_bucket.name, key))
            if not args.dryrun:
                if args.optimal:
                    client.copy(CopySource={
                        'Bucket': src_bucket.name,
                        'Key': key
                    },
                                Bucket=dst_bucket.name,
                                Key=key,
                                Config=transfer_config)
                else:
                    obj = s3.Object(dst_bucket.name, key)
                    obj.copy_from(CopySource={
                        'Bucket': src_bucket.name,
                        'Key': key
                    }, )
                count += 1
        copied_objects[thread_id] = count
        tasks.task_done()
コード例 #3
0
def get_aws_session(account):
    """Function to return a boto3 Session based on the account passed in the first argument.

    Args:
        account (:obj:`Account`): Account to create the session object for

    Returns:
        :obj:`boto3:boto3.session.Session`
    """
    from cloud_inquisitor.config import dbconfig
    from cloud_inquisitor.plugins.types.accounts import AWSAccount

    if not isinstance(account, AWSAccount):
        raise InquisitorError('Non AWSAccount  passed to get_aws_session, got {}'.format(account.__class__.__name__))

    # If no keys are on supplied for the account, use sts.assume_role instead
    session = get_local_aws_session()
    if session.get_credentials().method == 'iam-role':
        sts = session.client('sts')
    else:
        # If we are not running on an EC2 instance, assume the instance role
        # first, then assume the remote role
        temp_sts = session.client('sts')

        audit_sts_role = temp_sts.assume_role(
            RoleArn=app_config.aws_api.instance_role_arn,
            RoleSessionName='inquisitor'
        )
        sts = boto3.session.Session(
            audit_sts_role['Credentials']['AccessKeyId'],
            audit_sts_role['Credentials']['SecretAccessKey'],
            audit_sts_role['Credentials']['SessionToken']
        ).client('sts')

    role = sts.assume_role(
        RoleArn='arn:aws:iam::{}:role/{}'.format(
            account.account_number,
            dbconfig.get('role_name', default='cinq_role')
        ),
        RoleSessionName='inquisitor'
    )

    sess = boto3.session.Session(
        role['Credentials']['AccessKeyId'],
        role['Credentials']['SecretAccessKey'],
        role['Credentials']['SessionToken']
    )

    return sess
コード例 #4
0
def plugin_write(vl, config):
    try:
        session = boto3.session.Session(region_name=config.aws_region)
        client_config = botocore.client.Config(connect_timeout=5,
                                               read_timeout=5)
        client = session.client('cloudwatch', config=client_config)
        metrics_list = list(metrics(vl, config))
        ts = datetime.fromtimestamp(vl.time)
        data = []

        for i, v in enumerate(vl.values):
            fullname, unit, dims = metrics_list[i]
            name = fullname[:255]
            if len(name) < len(fullname):
                collectd.warning(
                    'Metric name was truncated for CloudWatch: {}'.format(
                        fullname))

            data.append(
                dict(MetricName=name,
                     Timestamp=ts,
                     Value=v,
                     Unit=unit,
                     Dimensions=dims))

        client.put_metric_data(Namespace=vl.plugin, MetricData=data)
    except Exception, e:
        collectd.error(str(e))
コード例 #5
0
def set_client():
    client = session.client(service_name=SERVICE_NAME,
                            region_name=REGION_NAME,
                            endpoint_url=ENDPOINT_URL,
                            aws_access_key_id=ACCESS_ID,
                            aws_secret_access_key=SECRET_KEY)
    return client
コード例 #6
0
def create_client_with_profile(profile_name, region, resource_name='ec2'):
    """ Create a new boto3 client with a boto3 profile  in ~/.aws/credentials
    Args:
        profile_name (str): The name of the profile that you have set in your
            ~/.aws/credentials profile.
        region (str): The aws region you want to connect to.
        resource_name (str): Valid aws resource.
            default=ec2

    Basic Usage:
        >>> client, err_msg = create_client_with_profile('lab01', 'us-west-2')

    Returns:
        Tuple (botocore.client.EC2, str)
    """
    client = None
    err_msg = ''
    try:
        session = (boto3.session.Session(profile_name=profile_name,
                                         region_name=region))
        client = session.client(resource_name)
    except Exception as e:
        err_msg = str(e)

    return client, err_msg
コード例 #7
0
def demo():
    region_id = region_select()
    session = boto3.session.Session()
    client = session.client('ec2', region_name=region_id)
    ec2 = session.resource('ec2', region_name=region_id)
    ec2operat = EC2_Operate(client, ec2, region_id)
    print ec2operat.get_availbility_zone()
コード例 #8
0
def plugin_write(vl, config):
    try:
        session = boto3.session.Session(region_name=config.aws_region)
        client_config = botocore.client.Config(connect_timeout=5, read_timeout=5)
        client = session.client('cloudwatch', config=client_config)
        metrics_list = list(metrics(vl, config))
        ts = datetime.fromtimestamp(vl.time)
        data = []

        for i, v in enumerate(vl.values):
            fullname, unit, dims = metrics_list[i]
            name = fullname[:255]
            if len(name) < len(fullname):
                collectd.warning('Metric name was truncated for CloudWatch: {}'.format(fullname))

            data.append(dict(
                MetricName=name,
                Timestamp=ts,
                Value=v,
                Unit=unit,
                Dimensions=dims
            ))

        client.put_metric_data(Namespace=vl.plugin, MetricData=data)
    except Exception, e:
        collectd.error(str(e))
コード例 #9
0
ファイル: deploy.py プロジェクト: ryantuck/boto3-deploy
def deploy_iam_roles(session):

    role_dir = 'iam/roles'

    # get list of roles in iam dir
    iam_role_files = os.listdir(role_dir)

    # create session
    iam_client = session.client('iam')

    for role_file in os.listdir(role_dir):

        role_name, _ = os.path.splitext(role_file)

        # load in role and policy configs
        role = json.load(open('/'.join((role_dir, role_file))))
        policy = json.load(open('iam/policies/{}.json'.format(role['policy'])))

        print 'creating role: {}'.format(role_name)
        try:
            iam_client.create_role(
                    RoleName=role_name,
                    AssumeRolePolicyDocument=json.dumps(policy))
        except:
            print '\trole already exists'
コード例 #10
0
def get_secret(secret_name: str) -> Optional[str]:
    """
    Retrieve secret with name 'secret_name' from AWS Secrets Manager.

    :param secret_name: Name of the secret to retrieve.
    :return: The secret.
    """

    AWS_REGION = current_app.config["AWS_REGION"]

    # Create a session with the Secrets Manager
    session = boto3.session.Session()
    client = session.client(service_name="secretsmanager",
                            region_name=AWS_REGION)

    # Try to retrieve the secret from AWS Secrets Manager
    try:
        secret_value = client.get_secret_value(SecretId=secret_name)
    except ClientError as e:
        logging.error(f"AWS error: {e}. (Region={AWS_REGION}).")
        return None
    else:
        # Decrypts secret using the associated KMS CMK.
        if "SecretString" in secret_value:
            return json.loads(secret_value["SecretString"])[secret_name]
        else:
            return base64.b64decode(secret_value["SecretBinary"])
コード例 #11
0
    def ReadSecrets(self):
        # Create a Secrets Manager client
        session = None
        if self.profilename:
            # Use the specified profile
            session = boto3.session.Session(profile_name=self.profilename)
        else:
            # Pull the profile from the environment
            session = boto3.session.Session()
        client = session.client(service_name='secretsmanager',
                                region_name=self.region)

        # In this sample we only handle the specific exceptions for the 'GetSecretValue' API.
        # See https://docs.aws.amazon.com/secretsmanager/latest/apireference/API_GetSecretValue.html
        # We rethrow the exception by default.

        try:
            response = client.get_secret_value(SecretId=self.secretname)
            jsondata = response['SecretString']
            data = json.loads(jsondata)
            self.property_certfilebin = base64.b64decode(data['cert'])
            self.property_privatekeyfilebin = base64.b64decode(data['key'])

        except ClientError as e:
            self.logging.error(
                f"Get secrets failed: {e.response['Error']['Code']}")
            raise e
コード例 #12
0
ファイル: aws.py プロジェクト: malywonsz/mpu
def list_files(bucket: str,
               prefix: str = "",
               profile_name: Optional[str] = None) -> List[str]:
    """
    List up to 1000 files in a bucket.

    Parameters
    ----------
    bucket : str
    prefix : str
    profile_name : str, optional
        AWS profile

    Returns
    -------
    s3_paths : List[str]
    """
    session = boto3.session.Session(profile_name=profile_name)
    conn = session.client("s3")
    keys = []
    ret = conn.list_objects_v2(Bucket=bucket, Prefix=prefix)
    if "Contents" not in ret:
        return []
    # Make this a generator in future and use the marker:
    # https://boto3.readthedocs.io/en/latest/reference/services/
    #     s3.html#S3.Client.list_objects
    for key in conn.list_objects_v2(Bucket=bucket, Prefix=prefix)["Contents"]:
        keys.append("s3://" + bucket + "/" + key["Key"])
    return keys
コード例 #13
0
def all_regions(args):
    session = boto3.session.Session(
        aws_access_key_id=args.access_key_id,
        aws_secret_access_key=args.secret_access_key)
    ec2 = session.client('ec2', region_name='us-west-2')
    regions = ec2.describe_regions()
    return [r['RegionName'] for r in regions['Regions']]
コード例 #14
0
 def __init__(self, service_name, aws_access_key_id, aws_secret_access_key,
              region_name):
     session = boto3.session.Session(
         aws_access_key_id=aws_access_key_id,
         aws_secret_access_key=aws_secret_access_key,
         region_name=region_name)
     self.aws_client = session.client(service_name)
コード例 #15
0
ファイル: s3upload.py プロジェクト: zahedul/s3upload
def create_presigned_post(bucket_name,
                          object_name,
                          fields=None,
                          conditions=None,
                          expiration=3600):
    """Generate a presigned URL S3 POST request to upload a file

    :param bucket_name: string
    :param object_name: string
    :param fields: Dictionary of prefilled form fields
    :param conditions: List of conditions to include in the policy
    :param expiration: Time in seconds for the presigned URL to remain valid
    :return: Dictionary with the following keys:
        url: URL to post to
        fields: Dictionary of form fields and values to submit with the POST
    :return: None if error.
    """

    # Generate a presigned S3 POST URL
    session = boto3.session.Session(profile_name=AWS_PROFILE)
    s3_client = session.client('s3')
    try:
        response = s3_client.generate_presigned_post(bucket_name,
                                                     object_name,
                                                     Fields=fields,
                                                     Conditions=conditions,
                                                     ExpiresIn=expiration)
    except ClientError as e:
        logging.error(e)
        return None

    # The response contains the presigned URL and required fields
    return response
コード例 #16
0
def get_rds_status():
    session = boto3.Session(
        profile_name='--profile name goes here--')  # aws credentials
    rds = session.client('rds')
    if now >= night_begin and now <= night_end:
        try:
            dbs = rds.describe_db_instances(
                DBInstanceIdentifier='temp-min'
            )  # checking RDS instance named temp-min
            if dbs['DBInstances'][0]['DBInstanceStatus'] == 'available':
                rds_status = 'The RDS temp instance is RUNNING'
                rds_color = green
            else:
                rds_status = 'The RDS temp instance is ' + dbs['DBInstances'][
                    0]['DBInstanceStatus']
                rds_color = orange
        except Exception as error:
            rds_status = 'The RDS temp instance does not exist'
            rds_color = red
    else:
        try:
            dbs = rds.describe_db_instances(DBInstanceIdentifier='temp-min')
            if dbs['DBInstances'][0]['DBInstanceStatus'] == 'available':
                rds_status = 'The RDS temp instance is RUNNING'
                rds_color = red
            else:
                rds_status = 'The RDS temp instance is ' + dbs['DBInstances'][
                    0]['DBInstanceStatus']
                rds_color = orange
        except Exception as error:
            rds_status = 'The RDS temp instance does not exist'
            rds_color = green
    return rds_status, rds_color
コード例 #17
0
 def test_transfer_methods_injected_to_client(self):
     session = boto3.session.Session(region_name='us-west-2')
     client = session.client('s3')
     self.assertTrue(hasattr(client, 'upload_file'),
                     'upload_file was not injected onto S3 client')
     self.assertTrue(hasattr(client, 'download_file'),
                     'download_file was not injected onto S3 client')
コード例 #18
0
ファイル: aws.py プロジェクト: malywonsz/mpu
def s3_read(source: str, profile_name: Optional[str] = None) -> bytes:
    """
    Read a file from an S3 source.

    Parameters
    ----------
    source : str
        Path starting with s3://, e.g. 's3://bucket-name/key/foo.bar'
    profile_name : str, optional
        AWS profile

    Returns
    -------
    content : bytes

    Raises
    ------
    botocore.exceptions.NoCredentialsError
        Botocore is not able to find your credentials. Either specify
        profile_name or add the environment variables AWS_ACCESS_KEY_ID,
        AWS_SECRET_ACCESS_KEY and AWS_SESSION_TOKEN.
        See https://boto3.readthedocs.io/en/latest/guide/configuration.html
    """
    session = boto3.session.Session(profile_name=profile_name)
    s3 = session.client("s3")
    bucket_name, key = _s3_path_split(source)
    s3_object = s3.get_object(Bucket=bucket_name, Key=key)
    body = s3_object["Body"]
    return body.read()
コード例 #19
0
ファイル: test_s3.py プロジェクト: ashutoshsonu/boto3
 def test_transfer_methods_injected_to_client(self):
     session = boto3.session.Session(region_name='us-west-2')
     client = session.client('s3')
     self.assertTrue(hasattr(client, 'upload_file'),
                     'upload_file was not injected onto S3 client')
     self.assertTrue(hasattr(client, 'download_file'),
                     'download_file was not injected onto S3 client')
コード例 #20
0
ファイル: utils.py プロジェクト: lloesche/cloudkeeper
def aws_session(aws_account=None, aws_role=None):
    if ArgumentParser.args.aws_role_override:
        aws_role = ArgumentParser.args.aws_role
    if aws_role and aws_account:
        role_arn = f"arn:aws:iam::{aws_account}:role/{aws_role}"
        session = boto3.session.Session(
            aws_access_key_id=ArgumentParser.args.aws_access_key_id,
            aws_secret_access_key=ArgumentParser.args.aws_secret_access_key,
            region_name="us-east-1",
        )
        sts = session.client("sts")
        token = sts.assume_role(
            RoleArn=role_arn,
            RoleSessionName=f"{aws_account}-{str(uuid.uuid4())}")
        credentials = token["Credentials"]
        return boto3.session.Session(
            aws_access_key_id=credentials["AccessKeyId"],
            aws_secret_access_key=credentials["SecretAccessKey"],
            aws_session_token=credentials["SessionToken"],
        )
    else:
        return boto3.session.Session(
            aws_access_key_id=ArgumentParser.args.aws_access_key_id,
            aws_secret_access_key=ArgumentParser.args.aws_secret_access_key,
        )
コード例 #21
0
ファイル: s3_backup.py プロジェクト: Levtastic/kittenbot
 def do_auth_command(self, bot, connection, event, command, parameters, reply_target, auth_level):
     if command not in self.auth_commands:
         return False # not for us
     
     if command == 'backup':
         creds = bot.db.get('s3_credentials')
         # <access key>|<secret key>|<bucket name>|<uploaded file name>
         akey, skey, bname, uname = creds.split('|')
         
         session = boto3.session.Session(
             aws_access_key_id = akey,
             aws_secret_access_key = skey,
         )
         
         client = session.client('s3')
         transfer = boto3.s3.transfer.S3Transfer(client)
         
         transfer.upload_file(
             bot.module_parameters['database:name'],
             bname,
             uname,
             extra_args = {
                 'StorageClass': 'STANDARD_IA',
             },
         )
         
         bot.send(connection, reply_target, bot.db.get_random('yes'), event)
         return True
     
     return False
コード例 #22
0
def get_org_accounts(filter_current_account=False):
    session = aws_session()
    client = session.client('organizations')
    accounts = []
    try:
        response = client.list_accounts()
        accounts = response.get('Accounts', [])
        while response.get('NextToken') is not None:
            response = client.list_accounts(NextToken=response['NextToken'])
            accounts.extend(response.get('Accounts', []))
    except botocore.exceptions.ClientError as e:
        if e.response['Error']['Code'] == 'AccessDeniedException':
            log.error(
                'AWS error - missing permissions to list organization accounts'
            )
        else:
            raise
    filter_account_id = current_account_id() if filter_current_account else -1
    accounts = [
        aws_account['Id'] for aws_account in accounts
        if aws_account['Id'] != filter_account_id
    ]
    for account in accounts:
        log.debug('AWS found org account {}'.format(account))
    log.info('AWS found a total of {} org accounts'.format(len(accounts)))
    return accounts
コード例 #23
0
ファイル: killer3.py プロジェクト: dmark/instance-killer
def main():

    from boto3 import session, ec2
    from pprint import PrettyPrinter

    utc = UTC()
    pp = PrettyPrinter(indent=4)
    args = get_args()

    delta_kill = datetime.now(utc) - timedelta(hours=args.terminate)
    delta_warn = datetime.now(utc) - timedelta(hours=args.warn)

    session = session.Session(region_name=args.region,
                              profile_name=args.profile)
    ec2client = session.client('ec2')
    response = ec2client.describe_instances()

    warn_instances = []
    for reservation in response['Reservations']:
        for instance in reservation['Instances']:
            launchtime = instance['LaunchTime']
            if launchtime < delta_kill and args.yes:
                print("Terminating instance", instance['InstanceId'])
                # ec2client.terminate_instances(InstanceIds=[instance[u'InstanceId']])
            elif launchtime < delta_kill and not args.yes:
                print("Skipping instance", instance['InstanceId'])
            elif launchtime < delta_warn:
                warn_instances += instance

    if warn_instances:
        print("The following instances are more than ", args.warn, "hrs old.")
        pp.pprint(warn_instances)
コード例 #24
0
ファイル: models.py プロジェクト: muminoff/hippo
    def create_s3_account(sender, instance, created, **kwargs):

        if created:
            import requests
            import json
            url = settings.BACKEND_ENDPOINT_URL + '/riak-cs/user'
            data = {'email': instance.email, 'name': instance.get_full_name()}
            response = requests.post(url, json=data)
            response_dict = json.loads(response.text)
            S3Account.objects.create(user=instance,
                                     id=response_dict['id'],
                                     key_id=response_dict['key_id'],
                                     key_secret=response_dict['key_secret'],
                                     status=response_dict['status'])

            session = boto3.session.Session()
            s3client = session.client(
                's3',
                use_ssl=False,
                endpoint_url=settings.BACKEND_ENDPOINT_URL,
                aws_access_key_id=response_dict['key_id'],
                aws_secret_access_key=response_dict['key_secret'])

            try:
                s3client.create_bucket(Bucket=str(uuid.uuid4()))
            except ClientError as e:
                print(str(e))
                pass
コード例 #25
0
	def download_aws(self):

		#bucket_name = frappe.db.get_value("Aws Setup", None, "s3_bucket")
		#report_name = frappe.db.get_value("Aws Setup", None, "report_name")

		bucket_name = frappe.db.get_value("Monthly Recurring Setup", None, "s3_bucket")
                report_name = frappe.db.get_value("Monthly Recurring Setup", None, "report_name")

		session = boto3.session.Session(region_name='us-east-1')
		s3client = session.client('s3', config= boto3.session.Config(signature_version='s3v4'))
		#resp = s3client.list_objects(Bucket=bucket_name)

		# print names of all objects
		first_day_of_this_month = frappe.utils.data.get_first_day (self.transaction_date, 0, 0)
                next_month = frappe.utils.data.get_first_day (self.transaction_date, 0, 1)

		#for obj in resp['Contents']:
		#	if "Billing-aptech-Manifest.json" in str(obj['Key']):
    		#		frappe.msgprint("Object Name: " + obj['Key'])

		# Get the service client
		s3 = boto3.client('s3')

		# Download object at bucket-name with key-name to tmp.txt
		path = "/home/ubuntu/frappe-bench/apps/aptitudetech_private/aptitudetech_private/aptitudetech_private/doctype/aws_transaction/"
		s3client.download_file(bucket_name, report_name + "/Billing-aptech/" + str(first_day_of_this_month).replace("-", "") + "-" + str(next_month).replace("-","") + "/Billing-aptech-Manifest.json", path + "tmp1.json")
		
		path = "/home/ubuntu/frappe-bench/apps/aptitudetech_private/aptitudetech_private/aptitudetech_private/doctype/aws_transaction/"
                j = json.loads(open(path + "tmp1.json", 'r').read())

		s3client.download_file(bucket_name, str(j["reportKeys"]).replace("[u'","").replace("']",""), path + "tmp2.zip")
		
		zip_ref = zipfile.ZipFile(path + "tmp2.zip", 'r')
                zip_ref.extractall(path)
                zip_ref.close()
    def aws_sts_role(self, api_call, role_arn):

        try:
            # experimenting the aws STS roles configurations
            session = boto3.session.Session(profile_name="betaDev",
                                            region_name=self.region)
            # Session = boto3.Session(region_name=self.REGION)
            client = session.client(api_call)

            logger.debug(client)
            assume_role = client.assume_role(
                RoleArn=role_arn,
                RoleSessionName='AssumeRoleSession',
                PolicyArns=[
                    {
                        "arn":
                        "arn:aws:iam::938819073054:policy/awsCustomPolicy"
                    },
                ],
                #        ExternalId= 'ec2filter'
            )

            logger.info(assume_role)

        except Exception as end_point_err:
            logger.exception("Logging Exception: " + str(end_point_err) + "\n")
            # raise
            sys.exit(1)

        else:
            return assume_role
コード例 #27
0
ファイル: cfUpdate.py プロジェクト: owms/ecs-deploy
    def process_cf_file(self, args):
        try:
            cf_params_local = copy.deepcopy(args[0])
            cluster = args[1]
            elb_name_suffix = args[2]
            env = args[3]
            filename = args[4]
            has_ecs_service = args[5]
            listener_port = args[6]
            region = args[7]
            session = boto3.session.Session()
            if has_ecs_service:
                elb_name = 'ecs-elb-' + cluster
                if elb_name_suffix is not None:
                    elb_name = "-".join([elb_name, elb_name_suffix])
                self.populate_ecs_service_params(session, cf_params_local, cluster, elb_name, env, region, listener_port)
            # Skip non-cf files
            ext = filename.split('.')[-1]
            if ext != 'template' and ext != 'yml':
                return
            cf_client = session.client('cloudformation', region_name=region)
            name = filename.split('/')[-1].split('.')[0]
            logging.info("%s: Processing CloudFormation Template" % filename)
            cf_params_local['name'] = name
            parameters = [{'ParameterKey': 'name', 'ParameterValue': name}]
            if name is None or name in filename:
                with open(filename, 'r') as f_h:
                    try:
                        cf_template = f_h.read()
                    except:
                        logging.exception("%s: Error reading file." % (filename))
                        self.catfile(filename)
                        raise
                    validate_response = self.validate_template(cf_client, cf_template, filename)

                    service_name = "%s-%s-%s" % (env, name, cluster)
                    if elb_name_suffix is not None:
                        service_name = "-".join([service_name, elb_name_suffix])
                    cf_command = cf_client.create_stack
                    existing_stack_id = self.find_existing_stack(cf_client, cf_params_local, service_name)
                    if existing_stack_id is not None:
                        cf_command = cf_client.update_stack
                    self.populate_cf_params(cf_params_local, existing_stack_id, filename, parameters, validate_response)
                    logging.info("%s: Updating CloudFormation Stack" % (service_name))
                    try:
                        cf_response = cf_command(StackName=service_name, TemplateBody=cf_template, Parameters=parameters, Capabilities=["CAPABILITY_IAM"])
                        creating_stack_id = cf_response['StackId']
                        stack_status = self.wait_for_stack_creation(cf_client, creating_stack_id, service_name)
                    except botocore.exceptions.ClientError as e:
                        if e.response["Error"]["Message"] == 'No updates are to be performed.':
                            logging.info("%s: No updates to be performed, CF update succeeded." % service_name)
                        else:
                            raise
                    self.q.put("%s Succeeded" % filename)
                    logging.info("%s Succeeded" % filename)
        except Exception as e:
            logging.error("%s: Error executing CloudFormation Stack" % filename)
            logging.exception(e)
            self.q.put("%s Failed" % filename)
コード例 #28
0
ファイル: check_keys.py プロジェクト: traboukos/smart_open
def check(session):
    client = session.client('s3')
    try:
        response = client.list_buckets()
    except Exception as e:
        logging.exception(e)
        return None
    else:
        return [b['Name'] for b in response['Buckets']]
コード例 #29
0
async def start_up(app, loop):
    app.cache = {}
    session = boto3.session.Session(region_name=os.environ['AWS_REGION'])
    app.client = session.client('s3')
    app.redis = await aioredis.create_pool(address=(os.environ['REDIS_HOST'],
                                                    os.environ['REDIS_PORT']),
                                           minsize=5,
                                           maxsize=10,
                                           loop=loop)
コード例 #30
0
 def __init__(self, aws_access_key, aws_secret_key, bucket_region):
     self.aws_access_key = aws_access_key
     self.aws_secret_key = aws_secret_key
     session = boto3.session.Session(region_name=bucket_region)
     self.s3_obj = session.client(
         's3',
         aws_access_key_id=aws_access_key,
         aws_secret_access_key=aws_secret_key,
         config=boto3.session.Config(signature_version='s3v4'))
コード例 #31
0
def __send_ses_email(sender, recipients, subject, html_body, text_body):
    """Send an email using SES

    Args:
        sender (str): From email address
        recipients (`1ist` of `str`): List of recipient email addresses
        subject (str): Subject of the email
        html_body (str): HTML body of the email
        text_body (str): Text body of the email

    Returns:
        `None`
    """
    access_key = app.config.get('AWS_API_ACCESS_KEY')
    secret_key = app.config.get('AWS_API_SECRET_KEY')
    source_arn = dbconfig.get('source_arn', NS_EMAIL)
    return_arn = dbconfig.get('return_path_arn', NS_EMAIL)

    if access_key and secret_key:
        session = boto3.session.Session(access_key, secret_key)
    else:
        session = boto3.session.Session()

    ses = session.client('ses', region_name=dbconfig.get('ses_region', NS_EMAIL, 'us-west-2'))

    body = {}
    if html_body:
        body['Html'] = {
            'Data': html_body
        }
    if text_body:
        body['Text'] = {
            'Data': text_body
        }

    ses_options = {
        'Source': sender,
        'Destination': {
            'ToAddresses': recipients
        },
        'Message': {
            'Subject': {
                'Data': subject
            },
            'Body': body
        }
    }

    # Set SES options if needed
    if source_arn and return_arn:
        ses_options.update({
            'SourceArn': source_arn,
            'ReturnPathArn': return_arn
        })

    ses.send_email(**ses_options)
コード例 #32
0
 def get_latest_ubuntu_ami() -> str:
     session = boto3.session.Session()
     images = session.client('ec2').describe_images(Filters=[{
         'Name':
         'name',
         'Values':
         ['ubuntu/images/hvm-ssd/ubuntu-focal-20.04-amd64-server*']
     }])
     return sorted(images['Images'],
                   key=lambda i: i['CreationDate'])[-1]['ImageId']
コード例 #33
0
def create_instance(region, instance_name, centos_version,
                    instancetype, vpc, publicaccess, azid):
    session = boto3.session.Session()
    client = session.client('ec2', region_name=region)
    ec2 = session.resource('ec2', region_name=region)
    ec2operat = EC2_Operate(client, ec2, region)
    instancetag = ec2operat.ec2_create(instance_name, centos_version, instancetype, vpc, publicaccess, azid)
#import to db
    get_instance_info_bytags(region, [instancetag])
    return instancetag
コード例 #34
0
def getResourceFromSession(resourceName, region):
    credentials = getCredentials()
    session = boto3.session.Session()
    resource = session.client(
        resourceName,
        region,
        aws_access_key_id=credentials['AccessKeyId'],
        aws_secret_access_key=credentials['SecretAccessKey'],
        aws_session_token=credentials['SessionToken'])
    return resource
コード例 #35
0
ファイル: models.py プロジェクト: muminoff/hippo
    def get_buckets(self):
        session = boto3.session.Session()
        s3client = session.client('s3',
                                  use_ssl=False,
                                  endpoint_url=settings.BACKEND_ENDPOINT_URL,
                                  aws_access_key_id=self.key_id,
                                  aws_secret_access_key=self.key_secret)

        response = s3client.list_buckets()
        return [bucket['Name'] for bucket in response['Buckets']]
コード例 #36
0
ファイル: cfUpdate.py プロジェクト: owms/ecs-deploy
 def populate_ecs_service_params(self, session, cf_params, cluster, elb_name, env, region, listener_port):
     elb_client = session.client('elbv2', region_name=region)
     balancer_arn, vpc_id = ApplyECS.get_load_balancer(elb_client, elb_name, cluster, env)
     listener_arn = ApplyECS.get_elb_listener(elb_client, balancer_arn, port=listener_port)
     cf_params['vpcid'] = vpc_id
     cf_params['listenerarn'] = listener_arn
     response = elb_client.describe_rules(ListenerArn=listener_arn)
     rules = response['Rules']
     existing_priorities = set([rule['Priority'] for rule in rules])
     if len(existing_priorities) >= 11:
         logging.error("Listener %s already has %d rules, cannot add more services" % (listener_arn, len(existing_priorities)))
         raise Exception("Listener %s already has %d rules, cannot add more services" % (listener_arn, len(existing_priorities)))
     for i in range(10, 21):
         if str(i) not in existing_priorities:
             cf_params['priority'] = str(i)
             break
コード例 #37
0
ファイル: deploy.py プロジェクト: ryantuck/boto3-deploy
def deploy_lambda_function(session, function_name):

    # load in configs
    function_cfg = json.load(open('lambda/conf/{}.json'.format(function_name)))
    default_cfg = json.load(open('lambda/conf/defaults.json'))
    default_cfg.update(function_cfg)
    cfg = default_cfg

    lambda_client = session.client('lambda')

    try:

        # function already exists, so update it
        print 'checking if function exists: {}'.format(function_name)
        lambda_client.get_function_configuration(
                FunctionName=function_name)

        print '\tupdating function configuration: {}'.format(function_name)
        lambda_client.update_function_configuration(
                FunctionName=function_name,
                Role=session.resource('iam').Role(cfg['role']).arn,
                Handler=cfg['handler'],
                Description=cfg.get('description', None),
                Timeout=cfg['timeout'],
                MemorySize=cfg['memory'],
                VpcConfig=cfg.get('vpc', {}))

        print '\tupdating function code: {}'.format(function_name)
        lambda_client.update_function_code(
                FunctionName=function_name,
                ZipFile=open('_deploy/_zip/{}.zip'.format(function_name), 'rb').read())
    except:

        # function is new, so create it
        print '\tcreating new function: {}'.format(function_name)
        lambda_client.create_function(
                FunctionName=function_name,
                Runtime=cfg['runtime'],
                Role=session.resource('iam').Role(cfg['role']).arn,
                Code={
                    'ZipFile': open('_deploy/_zip/{}.zip'.format(function_name), 'rb').read()
                    },
                Handler=cfg['handler'],
                Description=cfg.get('description', None),
                Timeout=cfg['timeout'],
                MemorySize=cfg['memory'],
                VpcConfig=cfg.get('vpc', {}))
コード例 #38
0
ファイル: service.py プロジェクト: graphaelli/ectou-metadata
def security_credentials_role_name():
    role_arn = _get_role_arn()
    credentials = _credential_map.get(role_arn)

    # Refresh credentials if going to expire soon.
    now = datetime.datetime.now(tz=dateutil.tz.tzutc())
    if not credentials or credentials['Expiration'] < now + _refresh_timeout:
        try:
            # Use any boto3 credential provider except the instance metadata provider.
            botocore_session = botocore.session.Session()
            botocore_session.get_component('credential_provider').remove('iam-role')
            session = boto3.session.Session(botocore_session=botocore_session)

            credentials = session.client('sts').assume_role(RoleArn=role_arn,
                                                            RoleSessionName="ectou-metadata")['Credentials']
            credentials['LastUpdated'] = now

            _credential_map[role_arn] = credentials

        except Exception as e:
            bottle.response.status = 404
            bottle.response.content_type = 'text/plain'  # EC2 serves json as text/plain
            return json.dumps({
                'Code': 'Failure',
                'Message': e.message,
            }, indent=2)

    # Return current credential.
    bottle.response.content_type = 'text/plain'  # EC2 serves json as text/plain
    return json.dumps({
        'Code': 'Success',
        'LastUpdated': _format_iso(credentials['LastUpdated']),
        "Type": "AWS-HMAC",
        'AccessKeyId': credentials['AccessKeyId'],
        'SecretAccessKey': credentials['SecretAccessKey'],
        'Token': credentials['SessionToken'],
        'Expiration': _format_iso(credentials['Expiration'])
    }, indent=2)
コード例 #39
0
ファイル: boto3.py プロジェクト: iquaba/salt
def get_connection(service, module=None, region=None, key=None, keyid=None,
                   profile=None):
    '''
    Return a boto connection for the service.

    .. code-block:: python

        conn = __utils__['boto.get_connection']('ec2', profile='custom_profile')
    '''

    module = module or service

    cxkey, region, key, keyid = _get_profile(service, region, key,
                                             keyid, profile)
    cxkey = cxkey + ':conn'

    if cxkey in __context__:
        return __context__[cxkey]

    try:
        session = boto3.session.Session(aws_access_key_id=keyid,
                          aws_secret_access_key=key,
                          region_name=region)
        if session is None:
            raise SaltInvocationError('Region "{0}" is not '
                                      'valid.'.format(region))
        conn = session.client(module)
        if conn is None:
            raise SaltInvocationError('Region "{0}" is not '
                                      'valid.'.format(region))
    except boto.exception.NoAuthHandlerFound:
        raise SaltInvocationError('No authentication credentials found when '
                                  'attempting to make boto {0} connection to '
                                  'region "{1}".'.format(service, region))
    __context__[cxkey] = conn
    return conn
コード例 #40
0
log_lvl = os.getenv('LOG_LVL', default='WARNING')

if log_lvl == 'ERROR':
    logger.setLevel(logging.ERROR)
elif log_lvl == 'INFO':
    logger.setLevel(logging.INFO)
elif log_lvl == 'DEBUG':
    logger.setLevel(logging.DEBUG)
else:
    logger.setLevel(logging.WARNING)

logger.addHandler(logging.StreamHandler())


session = boto3.session.Session(region_name=REGION)
sqs = session.client('sqs')
s3 = session.client('s3')


def pull_files(message_URL):
    '''
    pull message form message_URL
    '''
    msgs = {}
    while 'Messages' not in msgs:
        logger.debug('pull message from SQS: {}'.format(message_URL))
        while 'Messages' not in msgs:
            try:
                msgs = sqs.receive_message(QueueUrl=message_URL)
            except botocore.exceptions.ClientError as err:
                logger.debug(traceback.format_exc())
コード例 #41
0
ファイル: as3.py プロジェクト: reciproco/delma
import boto3
import boto3.session
from util import AESCipher

session = boto3.session.Session(region_name='eu-west-1')
s3Client = session.client('s3')


data = open('manage.py', 'rb')
cipher = AESCipher(key='1234567890123456')
encrypted = cipher.encrypt(data.read().decode('utf-8'))
print(encrypted)
s3Client.put_object(Bucket='reciproco',Key='manage.py', Body=encrypted)


print(s3Client.generate_presigned_url('get_object', Params = {'Bucket': 'reciproco', 'Key': 'manage.py'}, ExpiresIn = 100))

new_cipher = AESCipher(key='1234567890123456')
response = s3Client.get_object(Bucket='reciproco',Key='manage.py')
i = response["Body"].read()
print(i)
decrypted = new_cipher.decrypt(i)
print(decrypted.decode('utf-8'))
コード例 #42
0
ファイル: cli.py プロジェクト: nagyistge/zalando-stups.berry
def run_berry(args):
    try:
        with open(args.config_file) as fd:
            config = yaml.load(fd)
    except Exception as e:
        logging.warn('Could not load configuration from {}: {}'.format(args.config_file, e))
        config = {}

    application_id = args.application_id or config.get('application_id')
    mint_bucket = args.mint_bucket or config.get('mint_bucket')
    local_directory = args.local_directory

    if not application_id:
        raise UsageError('Application ID missing, please set "application_id" in your configuration YAML')

    if not mint_bucket:
        raise UsageError('Mint Bucket is not configured, please set "mint_bucket" in your configuration YAML')

    if args.aws_credentials_file:
        aws_credentials = use_aws_credentials(application_id, args.aws_credentials_file)
    else:
        aws_credentials = {}

    session = boto3.session.Session(**aws_credentials)
    s3 = session.client('s3')
    while True:
        for fn in ['user', 'client']:
            key_name = '{}/{}.json'.format(application_id, fn)
            try:
                local_file = os.path.join(local_directory, '{}.json'.format(fn))
                tmp_file = local_file + '.tmp'
                response = None
                retry = 3
                while retry:
                    try:
                        response = s3.get_object(Bucket=mint_bucket, Key=key_name)
                        retry = False
                    except botocore.exceptions.ClientError as e:
                        # more friendly error messages
                        # https://github.com/zalando-stups/berry/issues/2
                        status_code = e.response.get('ResponseMetadata', {}).get('HTTPStatusCode')
                        msg = e.response['Error'].get('Message')
                        error_code = e.response['Error'].get('Code')
                        endpoint = e.response['Error'].get('Endpoint', '')
                        retry -= 1
                        if error_code == 'InvalidRequest' and 'Please use AWS4-HMAC-SHA256.' in msg:
                            logging.debug(('Invalid Request while trying to read "{}" from mint S3 bucket "{}". ' +
                                           'Retrying with signature version v4! ' +
                                           '(S3 error message: {})').format(
                                         key_name, mint_bucket, msg))
                            s3 = session.client('s3', config=Config(signature_version='s3v4'))
                        elif error_code == 'PermanentRedirect' and endpoint.endswith('.amazonaws.com'):
                            region = get_bucket_region(s3, mint_bucket, endpoint)
                            logging.debug(('Got Redirect while trying to read "{}" from mint S3 bucket "{}". ' +
                                           'Retrying with region {}, endpoint {}! ' +
                                           '(S3 error message: {})').format(
                                         key_name, mint_bucket, region, endpoint, msg))
                            s3 = session.client('s3', region)
                        elif status_code == 403:
                            logging.error(('Access denied while trying to read "{}" from mint S3 bucket "{}". ' +
                                           'Check your IAM role/user policy to allow read access! ' +
                                           '(S3 error message: {})').format(
                                          key_name, mint_bucket, msg))
                            retry = False
                        elif status_code == 404:
                            logging.error(('Credentials file "{}" not found in mint S3 bucket "{}". ' +
                                           'Mint either did not sync them yet or the mint configuration is wrong. ' +
                                           '(S3 error message: {})').format(
                                          key_name, mint_bucket, msg))
                            retry = False
                        else:
                            logging.error('Could not read from mint S3 bucket "{}": {}'.format(
                                          mint_bucket, e))
                            retry = False

                if response:
                    body = response['Body']
                    json_data = body.read()

                    # check that the file contains valid JSON
                    new_data = json.loads(json_data.decode('utf-8'))

                    try:
                        with open(local_file, 'r') as fd:
                            old_data = json.load(fd)
                    except:
                        old_data = None
                    # check whether the file contents changed
                    if new_data != old_data:
                        with open(tmp_file, 'wb') as fd:
                            fd.write(json_data)
                        os.rename(tmp_file, local_file)
                        logging.info('Rotated {} credentials for {}'.format(fn, application_id))
            except:
                logging.exception('Failed to download {} credentials'.format(fn))

        if args.once:
            break

        time.sleep(args.interval)  # pragma: no cover