示例#1
0
    def __init__(self, args):
        # aws-cli compatible MFA cache
        cli_cache = os.path.join(os.path.expanduser('~'),'.aws/cli/cache')

        # Construct boto3 session with MFA cache
        session = boto3.session.Session(profile_name=args.profile, region_name=args.region)
        session._session.get_component('credential_provider').get_provider('assume-role').cache = botocore.credentials.JSONFileCache(cli_cache)

        # Create boto3 clients from session
        self.ssm_client = session.client('ssm')
        self.ec2_client = session.client('ec2')
示例#2
0
def setup_clients(profile_name, region_name):
    try:
        import boto3
        import botocore.session
    except:
        logging.error("Could not import \'boto3\' library. Please install.")
        sys.exit(3)
    session = boto3.session.Session(profile_name=profile_name)
    asg_client = session.client('autoscaling', region_name=region_name)
    ec2_client = session.client('ec2', region_name=region_name)
    elb_client = session.client('elb', region_name=region_name)
    return asg_client, ec2_client, elb_client
示例#3
0
def setup_clients(profile_name, region_name):
    try:
        import boto3
        import botocore.session
    except:
        logging.error("Could not import \'boto3\' library. Please install.")
        sys.exit(3)
    session = boto3.session.Session(profile_name=profile_name)
    asg_client = session.client('autoscaling', region_name=region_name)
    ec2_client = session.client('ec2', region_name=region_name)
    elb_client = session.client('elb', region_name=region_name)
    return asg_client, ec2_client, elb_client
示例#4
0
    def _impersonate_to_role(
            self, role_arn: str, session: boto3.session.Session,
            session_kwargs: Dict[str, Any]) -> boto3.session.Session:
        assume_role_kwargs = self.extra_config.get("assume_role_kwargs", {})
        assume_role_method = self.extra_config.get('assume_role_method')
        self.log.info("assume_role_method=%s", assume_role_method)
        if not assume_role_method or assume_role_method == 'assume_role':
            sts_client = session.client("sts", config=self.config)
            sts_response = self._assume_role(
                sts_client=sts_client,
                role_arn=role_arn,
                assume_role_kwargs=assume_role_kwargs)
        elif assume_role_method == 'assume_role_with_saml':
            sts_client = session.client("sts", config=self.config)
            sts_response = self._assume_role_with_saml(
                sts_client=sts_client,
                role_arn=role_arn,
                assume_role_kwargs=assume_role_kwargs)
        elif assume_role_method == 'assume_role_with_web_identity':
            botocore_session = self._assume_role_with_web_identity(
                role_arn=role_arn,
                assume_role_kwargs=assume_role_kwargs,
                base_session=session._session,
            )
            return boto3.session.Session(
                region_name=session.region_name,
                botocore_session=botocore_session,
                **session_kwargs,
            )
        else:
            raise NotImplementedError(
                f'assume_role_method={assume_role_method} in Connection {self.conn.conn_id} Extra.'
                'Currently "assume_role" or "assume_role_with_saml" are supported.'
                '(Exclude this setting will default to "assume_role").')
        # Use credentials retrieved from STS
        credentials = sts_response["Credentials"]
        aws_access_key_id = credentials["AccessKeyId"]
        aws_secret_access_key = credentials["SecretAccessKey"]
        aws_session_token = credentials["SessionToken"]
        self.log.info(
            "Creating session with aws_access_key_id=%s region_name=%s",
            aws_access_key_id,
            session.region_name,
        )

        return boto3.session.Session(
            aws_access_key_id=aws_access_key_id,
            aws_secret_access_key=aws_secret_access_key,
            region_name=session.region_name,
            aws_session_token=aws_session_token,
            **session_kwargs,
        )
示例#5
0
    def test_runtime_error_raised_when_shadowing_client_method(self):
        botocore_session = botocore.session.get_session()
        session = boto3.session.Session(region_name='us-west-2',
                                        botocore_session=botocore_session)

        def shadows_put_object(class_attributes, **kwargs):
            utils.inject_attribute(class_attributes, 'put_object', 'invalid')

        botocore_session.register('creating-client-class', shadows_put_object)

        with self.assertRaises(RuntimeError):
            # This should raise an exception because we're trying to
            # shadow the put_object client method in the
            # shadows_put_object handler above.
            session.client('s3')
示例#6
0
    def test_runtime_error_raised_when_shadowing_client_method(self):
        botocore_session = botocore.session.get_session()
        session = boto3.session.Session(region_name='us-west-2',
                                        botocore_session=botocore_session)

        def shadows_put_object(class_attributes, **kwargs):
            utils.inject_attribute(class_attributes, 'put_object', 'invalid')

        botocore_session.register('creating-client-class', shadows_put_object)

        with self.assertRaises(RuntimeError):
            # This should raise an exception because we're trying to
            # shadow the put_object client method in the
            # shadows_put_object handler above.
            session.client('s3')
def test_all_collections_have_paginators_if_needed():
    # If a collection relies on an operation that is paginated, it
    # will require a paginator to iterate through all of the resources
    # with the all() method. If there is no paginator, it will only
    # make it through the first page of results. So we need to make sure
    # if a collection looks like it uses a paginated operation then there
    # should be a paginator applied to it.
    botocore_session = botocore.session.get_session()
    session = Session(botocore_session=botocore_session)
    loader = botocore_session.get_component('data_loader')
    for service_name in session.get_available_resources():
        client = session.client(service_name, region_name='us-east-1')
        json_resource_model = loader.load_service_model(
            service_name, 'resources-1')
        resource_defs = json_resource_model['resources']
        resource_models = []
        # Get the service resource model
        service_resource_model = ResourceModel(
            service_name, json_resource_model['service'], resource_defs)
        resource_models.append(service_resource_model)
        # Generate all of the resource models for a service
        for resource_name, resource_defintion in resource_defs.items():
            resource_models.append(ResourceModel(
                resource_name, resource_defintion, resource_defs))
        for resource_model in resource_models:
            # Iterate over all of the collections for each resource model
            # and ensure that the collection has a paginator if it needs one.
            for collection_model in resource_model.collections:
                yield (
                    _assert_collection_has_paginator_if_needed, client,
                    service_name, resource_name, collection_model)
示例#8
0
def createClient(region):
    if environ.get('CODEBUILD_BUILD_ID') is not None:
        return boto3.client('dynamodb', region_name=region)
    else:
        logging.info('using profile {}'.format(PROFILE))
        session = boto3.Session(profile_name=PROFILE)
        return session.client('dynamodb', region_name=region)
def test_all_collections_have_paginators_if_needed():
    # If a collection relies on an operation that is paginated, it
    # will require a paginator to iterate through all of the resources
    # with the all() method. If there is no paginator, it will only
    # make it through the first page of results. So we need to make sure
    # if a collection looks like it uses a paginated operation then there
    # should be a paginator applied to it.
    botocore_session = botocore.session.get_session()
    session = Session(botocore_session=botocore_session)
    loader = botocore_session.get_component('data_loader')
    for service_name in session.get_available_resources():
        client = session.client(service_name, region_name='us-east-1')
        json_resource_model = loader.load_service_model(
            service_name, 'resources-1')
        resource_defs = json_resource_model['resources']
        resource_models = []
        # Get the service resource model
        service_resource_model = ResourceModel(
            service_name, json_resource_model['service'], resource_defs)
        resource_models.append(service_resource_model)
        # Generate all of the resource models for a service
        for resource_name, resource_defintion in resource_defs.items():
            resource_models.append(ResourceModel(
                resource_name, resource_defintion, resource_defs))
        for resource_model in resource_models:
            # Iterate over all of the collections for each resource model
            # and ensure that the collection has a paginator if it needs one.
            for collection_model in resource_model.collections:
                yield (
                    _assert_collection_has_paginator_if_needed, client,
                    service_name, resource_name, collection_model)
示例#10
0
def _collection_test_args():
    botocore_session = botocore.session.get_session()
    session = Session(botocore_session=botocore_session)
    loader = botocore_session.get_component('data_loader')
    for service_name in session.get_available_resources():
        client = session.client(service_name, region_name='us-east-1')
        json_resource_model = loader.load_service_model(
            service_name, 'resources-1')
        resource_defs = json_resource_model['resources']
        resource_models = []
        # Get the service resource model
        service_resource_model = ResourceModel(service_name,
                                               json_resource_model['service'],
                                               resource_defs)
        resource_models.append(service_resource_model)
        # Generate all of the resource models for a service
        for resource_name, resource_defintion in resource_defs.items():
            resource_models.append(
                ResourceModel(resource_name, resource_defintion,
                              resource_defs))
        for resource_model in resource_models:
            # Iterate over all of the collections for each resource model
            # and ensure that the collection has a paginator if it needs one.
            for collection_model in resource_model.collections:
                yield (client, service_name, resource_name, collection_model)
示例#11
0
def kmsDataKeyDemo():
    #
    # AES ve KMS kullanarak Client-Side Encryption
    #
    # KMS object olusturuyoruz
    session = Session(profile_name=PROFILE_NAME, region_name=REGION)
    kms = session.client('kms')

    # KMS'deki Master Keyi kullanarak bir datakey olusturuyoruz. Hem plain hem encrypt(cipher) halini bize veriyor.
    datakey = kms.generate_data_key(KeyId=MASTER_KEY_ARN, KeySpec='AES_256')
    datakeyPlain = datakey['Plaintext']
    datakeyCipher = datakey['CiphertextBlob']
    print("Data key olusturuldu!")
    # Textimizi datakey'imizle sifrelemek icin AES kullaniyoruz.
    encryptor = AES.new(datakeyPlain, AES.MODE_EAX, IV)
    textPlain = b'This is an important data'
    textCipher = encryptor.encrypt(textPlain)

    # Sadece ornek amacli yazdiriyoruz.
    print('Plain text data key', base64.b64encode(datakeyPlain))
    print('Plain text data', textPlain)
    print('Cipher text data', base64.b64encode(textCipher))

    # Bu asamada, plain text haldeki data keyimizi silebiliriz.
    # Encrypted Key'imizi ise daha sonrasinda decrypt edebilmek icin saklamaliyiz.

    # Decrypt icin KMS'e encrypted data keyimizi veriyoruz.
    key = kms.decrypt(CiphertextBlob=datakeyCipher)
    keyPlain = key['Plaintext']
    decryptor = AES.new(keyPlain, AES.MODE_EAX, IV)
    decrypted_plainText = decryptor.decrypt(textCipher)
    print("Decrypt edildi.")
    print("Plaintext data=", decrypted_plainText)
    # Eger farkli olursa assert error almamiz beklenmekte
    assert decrypted_plainText == textPlain
示例#12
0
def delete_secrets_with_force(region, session, secret_names): 
    sm_client = session.client('secretsmanager', region)
    for s in secret_names:
        try:
            resp = sm_client.delete_secret(SecretId=s, ForceDeleteWithoutRecovery=True)
        except:
            raise
示例#13
0
    def __init__(self,
                 keyname=None,
                 use_akms=None,
                 host=None,
                 region='us-east-1',
                 *args,
                 **kwargs):

        import botocore.session

        # DEPRECATED - `host`
        if host:
            logger.warning('the host param is deprecated')

        use_akms = (use_akms
                    if use_akms is not None else 'USE_AKMS' in os.environ)

        args = (keyname, use_akms)
        with self.lock:
            if args not in self.sessions:
                session = Session(botocore_session=botocore.session.Session(
                    session_vars={
                        'akms_keyname': keyname,
                        'use_akms': use_akms,
                    }))
                client = session.client('s3')
                s3 = session.resource('s3')
                self.sessions[args] = (session, client, s3)

        (self.session, self.client, self.s3) = self.sessions[args]
示例#14
0
    def get_client_type(
        self,
        client_type: Optional[str] = None,
        region_name: Optional[str] = None,
        config: Optional[Config] = None,
    ) -> boto3.client:
        """Get the underlying boto3 client using boto3 session"""
        session, endpoint_url = self._get_credentials(region_name=region_name)

        if client_type:
            warnings.warn(
                "client_type is deprecated. Set client_type from class attribute.",
                DeprecationWarning,
                stacklevel=2,
            )
        else:
            client_type = self.client_type

        # No AWS Operators use the config argument to this method.
        # Keep backward compatibility with other users who might use it
        if config is None:
            config = self.config

        return session.client(client_type,
                              endpoint_url=endpoint_url,
                              config=config,
                              verify=self.verify)
示例#15
0
文件: views.py 项目: dankiy/asker
def vote(request, question_id):
    question = get_object_or_404(Question, pk=question_id)
    try:
        selected_choice = question.choice_set.get(pk=request.POST['choice'])
    except (KeyError, Choice.DoesNotExist):
        return render(request, 'polls/detail.html', {
            'question': question,
            'error_message': "You didn't select a choice.",
        })
    if request.user.choice_set.filter(question=question).exists():
        return render(request, 'polls/results.html', {
            'question': question,
            'error_message': "You have already voted.",
        })
    else:
        selected_choice.votes.add(request.user)
        selected_choice.num_votes += 1
        selected_choice.save()

        if question.notifications:
            session = boto3.Session(
                aws_access_key_id=os.environ['AWS_ACCESS_KEY'],
                aws_secret_access_key=os.environ['AWS_SECRET_KEY'],
                aws_session_token=os.environ['AWS_SESSION_TOKEN'],
                region_name=os.environ['AWS_REGION'])
            sns = session.client('sns')
            response = sns.publish(
                TopicArn='arn:aws:sns:us-east-1:716390431917:Polls',
                Message=request.user.username+' has voted for '+selected_choice.choice_text+\
                ' in '+question.question_text
            )

        return HttpResponseRedirect(
            reverse('polls:results', args=(question.id, )))
示例#16
0
def kmsMasterKeyEncrypt():

    #
    # KMS Encrypt ile Customer Master Key kullanarak Client-Side Encryption
    #
    # You can encrypt small amounts of arbitrary data, such as a personal identifier or database password, or other sensitive information.

    # KMS object olusturuyoruz
    session = Session(profile_name=PROFILE_NAME, region_name=REGION)
    kms = session.client('kms')
    dbPassword = b"this is my super secret password"

    # KMS Customer Master keyimizle Encrypt ediyoruz, ve cipher_db_password'umuzu aliyoruz.

    obj = kms.encrypt(KeyId=MASTER_KEY_ARN, Plaintext=dbPassword)
    # obj return CiphertextBlob, KeyId, EncryptionAlgorithm, ResponseMetadata
    print('Cipher  text db password='******'CiphertextBlob']))
    ciphertextBlob = obj['CiphertextBlob']
    # Burada Keyimizin yonetimi AWS KMSte oldugu icin herhangi bir sey saklamamiza gerek yok.

    # Decrypt islemi icin sadece bize donen obj'yi saklamamiz yeterli.
    # decrypt_obj return keyId, Plaintext, EncryptionAlgorithm, ResponseMetadata
    decrypt_obj = kms.decrypt(CiphertextBlob=ciphertextBlob)
    print("Plaintext DB Passowrd=", decrypt_obj['Plaintext'].decode('utf-8'))
    assert dbPassword == decrypt_obj['Plaintext']
示例#17
0
    def get_session(self) -> boto3.Session:
        session = boto3.Session(profile_name=self.aws_profile,
                                region_name=self.region_name)

        caller_identity = session.client("sts").get_caller_identity()
        self.user_id = caller_identity["UserId"]
        self.user_arn = caller_identity["Arn"]
        return session
示例#18
0
 def client(self, *args, **kwargs):
     """
     Outside of a context established by `.assumed_role_credentials()` or
     `.direct_access_credentials()` this is the same as boto3.client. Within
     such a context, it returns boto3 clients that use different, temporary
     credentials.
     """
     session = self._per_thread.session or boto3
     return session.client(*args, **kwargs)
示例#19
0
def get_client(profile_name=None, region_name=None):
    _session = botocore.session.get_session()
    _session.set_config_variable('profile', profile_name)
    _session.set_config_variable('data_path', _get_path())
    _session.register('after-call.apigateway.*', fix_response)
    session = boto3.Session(profile_name=profile_name,
                            region_name=region_name,
                            botocore_session=_session)
    return session.client('apigateway')
示例#20
0
def get_sgs_and_update_secret(region, session, rds_id, rds_secret_name):
    rds_client = session.client('rds', region)
    try:
        resp = rds_client.describe_db_instances(DBInstanceIdentifier=rds_id)
        hostname=resp['DBInstances'][0]['Endpoint']['Address']
        update_rds_secret_with_hostname(region,session, rds_secret_name, hostname)
        return resp['DBInstances'][0]['VpcSecurityGroups'] 
    except:
        raise
示例#21
0
def update_rds_secret_with_hostname(region, session, secret_name, hostname):
    sm_client = session.client('secretsmanager', region_name=region)
    try:
        data = sm_client.get_secret_value(SecretId=secret_name)
        secret = json.loads(data['SecretString'])
        secret['host'] = hostname
        sm_client.update_secret(SecretId=secret_name, SecretString=json.dumps(secret))
    except:
        raise
示例#22
0
def detele_rds_instance(region, session, rds_id):
    rds_client = session.client('rds', region)
    try:
        resp = rds_client.delete_db_instance(DBInstanceIdentifier=rds_id, SkipFinalSnapshot=True, DeleteAutomatedBackups=True)
        print(resp)
    except ClientError as e:
        if e.response['Error']['Code'] == "InvalidDBInstanceState":
            print("DB might have been deleted already")
    except:
        raise
示例#23
0
def create_rds_secret(region, session, secret_name, rds_id, host, port, username, password): 
    sm_client = session.client('secretsmanager', region_name=region)
    data = {"username": username, "password": password, "engine": 'mysql', "host": host, "port": port, 'dbInstanceIdentifier': rds_id}
    try:
        sm_client.create_secret(Name=secret_name, SecretString=json.dumps(data))
    except ClientError as e:
        if e.response['Error']['Code'] == "ResourceExistsException":
            print("secret exists, update instead")
            sm_client.update_secret(SecretId=secret_name, SecretString=json.dumps(data))
    except:
        raise
示例#24
0
def obtain_new_iam_auth_token(url: URL,
                              region_name: str = "auto",
                              profile_name: Optional[str] = None) -> str:
    # Boto3 is not core requirement, but ImportError is probably the right exception to throw anyway.
    from boto3.session import Session as Boto3Session

    session = Boto3Session(profile_name=profile_name)
    client = session.client("rds", region_name=region_name)
    return client.generate_db_auth_token(DBHostname=url.host,
                                         Port=url.port,
                                         DBUsername=url.username,
                                         Region=region_name)
示例#25
0
def main(argv):
    global logger, logging, backup_region, aws_profile, aws_region
    logger.info('Started')

    args = parse_args()
    if args.loglevel:
        loglevel = args.loglevel
        logger.info("Changing logging level: " + loglevel.upper())
        if loglevel == "debug":
            logging.getLogger().setLevel(logging.DEBUG)
        elif loglevel == "info":
            logging.getLogger().setLevel(logging.INFO)
        elif loglevel == "error":
            logging.getLogger().setLevel(logging.ERROR)
        elif loglevel == "warning":
            logging.getLogger().setLevel(logging.WARNING)
        elif loglevel == "critical":
            logging.getLogger().setLevel(logging.CRITICAL)
    else:
        logger.info("Using default logging level: INFO")

    if args.aws_profile:
        aws_profile = args.aws_profile
        logger.info("AWS Profile: " + aws_profile)
    else:
        logger.error("--profile parameter is missing")
        return 1

    if args.aws_region:
        aws_region = args.aws_region
    else:
        aws_region = 'us-east-1'

    if args.bucket_name:
        bucket_name = args.bucket_name
    else:
        logger.error("--bucket parameter is missing.")
        return 1
    if args.prefix:
        prefix = args.prefix
    else:
        prefix = None

    # initiate connection to AWS
    session = boto3.Session(profile_name=aws_profile, region_name=aws_region)
    s3conn = S3Connection(profile_name=aws_profile)
    client = session.client('s3')
    s3_delete_files_with_prefix(client, bucket_name, prefix)
    if args.delete_bucket:
        logger.info("Delete bucket %s" % bucket_name)
        delete_s3_bucket(session, s3conn, bucket_name)
    logger.info('Finished')
    return
示例#26
0
    def add_regional_client(self, region_name):
        """Adds a regional client for the specified region if it does not already exist.

        :param str region_name: AWS Region ID (ex: us-east-1)
        """
        if region_name not in self._regional_clients:
            session = boto3.session.Session(
                region_name=region_name,
                botocore_session=self.config.botocore_session)
            client = session.client("kms",
                                    config=self._user_agent_adding_config)
            self._register_client(client, region_name)
            self._regional_clients[region_name] = client
示例#27
0
def get_reserved_vpc_cidr(session, regions):
    vpcs = []
    for region in regions:
        client = session.client('ec2', region)
        vpc = client.describe_vpcs(Filters=[{
            "Name": "isDefault",
            "Values": ["false"]
        }])
        cidrs = [
            cidr['CidrBlock'] for cidr in vpc['Vpcs']
            if cidr['CidrBlock'] not in vpcs
        ]
        vpcs = vpcs + cidrs
    return vpcs
示例#28
0
    def expand_role(self, role: str, region_name: Optional[str] = None) -> str:
        """
        If the IAM role is a role name, get the Amazon Resource Name (ARN) for the role.
        If IAM role is already an IAM role ARN, no change is made.

        :param role: IAM role name or ARN
        :param region_name: Optional region name to get credentials for
        :return: IAM role ARN
        """
        if "/" in role:
            return role
        else:
            session, endpoint_url = self._get_credentials(region_name=region_name)
            _client = session.client('iam', endpoint_url=endpoint_url, config=self.config, verify=self.verify)
            return _client.get_role(RoleName=role)["Role"]["Arn"]
示例#29
0
    def get_client_type(
        self,
        client_type: str,
        region_name: Optional[str] = None,
        config: Optional[Config] = None,
    ) -> boto3.client:
        """Get the underlying boto3 client using boto3 session"""
        session, endpoint_url = self._get_credentials(region_name)

        # No AWS Operators use the config argument to this method.
        # Keep backward compatibility with other users who might use it
        if config is None:
            config = self.config

        return session.client(client_type, endpoint_url=endpoint_url, config=config, verify=self.verify)
示例#30
0
def clean_up_portal_buckets(session):
    """
    Clean up all S3 buckets
    :param session:
    :return:
    """
    client = session.client('s3')
    list_of_s3_buckets = client.list_buckets()['Buckets']
    for bucket in list_of_s3_buckets:
        bucket_name = bucket['Name']
        try:
            if re.search('portal', bucket_name):
                logger.info("Portal bucket %s will not be deleted only delete the files." % bucket_name)
                s3_delete_files_with_prefix(client, bucket_name, 'admin')
                s3_delete_files_with_prefix(client, bucket_name, 'auth')
                s3_delete_files_with_prefix(client, bucket_name, 'logs')
        except (S3ResponseError, ClientError) as e:
            message = e.response['Error']['Message']
            logger.info("Bucket %s - %s" % (bucket_name, message))
            pass
    return
示例#31
0
def delete_s3_bucket(session, s3conn, bucket_name):
    """
    Clean up all S3 buckets
    :param session:
    :param s3conn:
    :param bucket_name:
    :return:
    """
    client = session.client('s3')
    resource = session.resource('s3')
    list_of_s3_buckets = client.list_buckets()['Buckets']
    for bucket in list_of_s3_buckets:
        if bucket['Name'] == bucket_name:
            try:
                if re.search('portal', bucket_name):
                    logger.info("Portal bucket %s will not be deleted only delete the files." % bucket_name)
                    s3_delete_files_with_prefix(client, bucket_name, 'admin')
                    s3_delete_files_with_prefix(client, bucket_name, 'auth')
                    s3_delete_files_with_prefix(client, bucket_name, 'logs')
                else:
                    client.get_bucket_location(Bucket=bucket_name)
                    logger.info("Bucket %s OK" % bucket_name)
                    s3_bucket_logging_disable(s3conn, bucket_name)
                    remove_bucket_replication(client, bucket_name)
                    disable_bucket_versioning(client, bucket_name)
                    logger.info("Delete s3 bucket lifecycle configuration.")
                    client.delete_bucket_lifecycle(Bucket=bucket_name)
                    delete_objects_in_bucket(resource, bucket_name)
                    full_bucket = s3conn.get_bucket(bucket_name)
                    for key in full_bucket.list():
                        key.delete()
                    logger.info("Delete bucket %s" % bucket_name)
                    s3conn.delete_bucket(bucket_name)
            except (S3ResponseError, ClientError) as e:
                message = e.response['Error']['Message']
                logger.info("Bucket %s - %s" % (bucket_name, message))
                pass
            break
    return
示例#32
0
def init(profile, conf, bucket=None, path=None):
    """Init

	Initialises the module so that it can be used

	Args:
		profile (str): The name of the profile to use to connect
		conf (dict): The configuration parameters, see boto3.resource for more info
		bucket (str): Optional bucket (needed if not passed in init)
		path (str): Optional path to prepend to all keys

	Returns:
		None
	"""

    # Pull in the module variable
    global _c, _r, _s3

    # Init the conf
    _s3 = {
        "profile": profile,
        "conf": BotoConfig(**conf),
        "bucket": bucket,
        "path": path
    }

    # Create a new session using the profile
    session = boto3.Session(profile_name=profile)

    # Get an S3 resource
    _r = session.resource('s3', config=_s3['conf'])

    # Get a client
    _c = session.client('s3',
                        config=boto3.session.Config(
                            s3={'addressing_style': 'path'},
                            signature_version='s3v4'))
示例#33
0
def lambda_handler(event, context):
    try:
        if (event['RequestType'] == 'Create'):
            if not validate_mask(event['ResourceProperties']['GlobalCidr'],
                                 event['ResourceProperties']['MaskBit']):
                logger.debug(
                    "Mask bit length is smaller than the mask for the VPN Cidr"
                )
                if context:
                    send(
                        event, context, FAILED,
                        "Mask bit length is smaller than the mask for the VPN Cidr"
                    )
                return
            ip_network = IPNetwork(event['ResourceProperties']['GlobalCidr'])
            ipv4_addr_space = IPSet([ip_network])
            if context:
                session = boto3.session.Session()
            else:
                session = boto3.session.Session(
                    profile_name=event['ResourceProperties']['Profile'])
            client = session.client('ec2')
            regions = get_regions(client)

            #get reserved ip cidrs
            reserved = get_reserved_vpc_cidr(session, regions)

            #define available
            available = ipv4_addr_space

            #define unavailable
            unavailable = None

            #get reserved vpc cidrs
            if reserved:
                unavailable = IPSet(reserved)

            #merge with passed vpc cidrs
            if len(event['ResourceProperties']['Reserved']) > 0:
                print("reserved")
                if str(event['ResourceProperties']['Reserved'][0]) != '':
                    unavailable = IPSet(event['ResourceProperties']
                                        ['Reserved']) | IPSet(reserved)
            if unavailable is not None:
                available = ipv4_addr_space ^ unavailable
            print(available)
            subnets = ip_network.subnet(
                int(event['ResourceProperties']['MaskBit']))
            for subnet in subnets:
                if subnet in available:
                    if context:
                        send(event, context, SUCCESS, None, None, str(subnet))
                    else:
                        print(str(subnet))
                    return
            logger.debug("Unable to find available ip space")
            if context:
                send(
                    event, context, FAILED,
                    "Unable to find available ip space, try a bigger mask bit length"
                )
        else:
            if context:
                send(event, context, SUCCESS)
    except Exception as e:
        logger.error(e)
        if context:
            send(event, context, FAILED, str(e))
        raise e
    return
示例#34
0
def connect(account_name, connection_type, **args):
    """

    Examples of use:
    ec2 = sts_connect.connect(environment, 'ec2', region=region, validate_certs=False)
    ec2 = sts_connect.connect(environment, 'ec2', validate_certs=False, debug=1000)
    ec2 = sts_connect.connect(environment, 'ec2')
    where environment is ( test, prod, dev )
    s3  = sts_connect.connect(environment, 's3')
    ses = sts_connect.connect(environment, 'ses')

    :param account: Account to connect with (i.e. test, prod, dev)

    :raises Exception: RDS Region not valid
                       AWS Tech not supported.

    :returns: STS Connection Object for given tech

    :note: To use this method a SecurityMonkey role must be created
            in the target account with full read only privileges.
    """
    account = Account.query.filter(Account.name == account_name).first()
    sts = boto.connect_sts()
    role_name = 'SecurityMonkey'
    if account.role_name and account.role_name != '':
        role_name = account.role_name
    role = sts.assume_role('arn:aws:iam::' + account.number + ':role/' + role_name, 'secmonkey')

    if connection_type == 'botocore':
        botocore_session = botocore.session.get_session()
        botocore_session.set_credentials(
            role.credentials.access_key,
            role.credentials.secret_key,
            token=role.credentials.session_token
        )
        return botocore_session

    if connection_type == 'iam_boto3':
        session = boto3.Session(
            aws_access_key_id=role.credentials.access_key,
            aws_secret_access_key=role.credentials.secret_key,
            aws_session_token=role.credentials.session_token
        )
        return session.resource('iam')

    region = 'us-east-1'
    if args.has_key('region'):
        region = args.pop('region')
        if hasattr(region, 'name'):
            region = region.name

    # ElasticSearch Service:
    if connection_type == 'es':
        session = boto3.Session(
            aws_access_key_id=role.credentials.access_key,
            aws_secret_access_key=role.credentials.secret_key,
            aws_session_token=role.credentials.session_token,
            region_name=region
        )
        return session.client('es')

    module = __import__("boto.{}".format(connection_type))
    for subm in connection_type.split('.'):
        module = getattr(module, subm)

    return module.connect_to_region(
        region,
        aws_access_key_id=role.credentials.access_key,
        aws_secret_access_key=role.credentials.secret_key,
        security_token=role.credentials.session_token
    )
示例#35
0
def _test_create_client(session, service_name):
    client = session.client(service_name)
    assert_true(hasattr(client, 'meta'))
示例#36
0
def S3KMSEncrypt(filename, S3_BUCKET):
    #
    # Data key ile Client-Side Encryption yaparak S3'ye dosya yukleme
    #

    # KMS object olusturuyoruz
    session = Session(profile_name=PROFILE_NAME, region_name=REGION)
    kms = session.client('kms')

    # KMS'deki Master Key'i kullanarak bir datakey olusturuyoruz. Hem plain hem encrypt(cipher) halini bize veriyor.
    datakey = kms.generate_data_key(KeyId=MASTER_KEY_ARN, KeySpec='AES_256')
    datakeyPlain = datakey['Plaintext']
    datakeyCipher = datakey['CiphertextBlob']
    print("Data Key Olusturuldu.")

    with open(filename, 'rb') as file:
        file_contents = file.read()

    # Fernet Encryption icin kullandigimiz kutuphanemiz.
    fernet = Fernet(base64.b64encode(datakeyPlain))
    encrypted_file_contents = fernet.encrypt(file_contents)
    outfile_name = filename + ".enc"
    with open(outfile_name, 'wb') as file_encrypted:
        file_encrypted.write(encrypted_file_contents)

    # S3 ye dosyamizi yukluyoruz
    metadata = {'key': base64.b64encode(datakeyCipher).decode('ascii')}
    s3 = session.client('s3')
    s3.upload_file(outfile_name,
                   S3_BUCKET,
                   outfile_name,
                   ExtraArgs={'Metadata': metadata})
    print("Dosya S3ye aktarildi.\n")
    # Daha sonrasinda encrypted hale getirdigimiz datamizi siliyoruz.
    os.remove(outfile_name)

    ###
    # DECRYPTION PART
    ###

    # Dosyamizi indirip decryption islemine geciyoruz.

    transfer = S3Transfer(s3)
    transfer.download_file(S3_BUCKET, outfile_name, outfile_name)
    print("Encrypted Dosya S3'den Indirildi.")

    # Metadatamizi aliyoruz.
    obj = s3.get_object(Bucket=S3_BUCKET, Key=outfile_name)
    metadata = obj['Metadata']['key']

    # Metadatada bulunan ciphered dataKeyimizi KMS'te bulunan Customer Master Keyi kullanarak decrypt ediyoruz.
    dataKey = base64.b64decode(metadata)
    key = kms.decrypt(CiphertextBlob=dataKey)
    keyPlain = key['Plaintext']
    print("Datakey bilgisi alindi!")

    # Encrypted Dosyayi okuyoruz..
    with open(outfile_name, 'rb') as file:
        _file = file.read()

    # Fernet'e metadatan aldigimiz datakey'i verip Decrypt islemimizi gerceklestiriyoruz.

    f = Fernet(base64.b64encode(keyPlain))
    file_contents_decrypted = f.decrypt(_file)
    print("Dosya decrypt edildi!")

    # Decrypt edilmis dosyamizi yaziyoruz.
    with open('dec_' + filename, 'wb') as file_decrypted:
        file_decrypted.write(file_contents_decrypted)
    print("Dosyaniz hazir!")
    # Encryptli dosyayi siliyoruz
    os.remove(filename + '.enc')
示例#37
0
def connect(account_name, connection_type, **args):
    """

    Examples of use:
    ec2 = sts_connect.connect(environment, 'ec2', region=region, validate_certs=False)
    ec2 = sts_connect.connect(environment, 'ec2', validate_certs=False, debug=1000)
    ec2 = sts_connect.connect(environment, 'ec2')
    where environment is ( test, prod, dev )
    s3  = sts_connect.connect(environment, 's3')
    ses = sts_connect.connect(environment, 'ses')

    :param account: Account to connect with (i.e. test, prod, dev)

    :raises Exception: RDS Region not valid
                       AWS Tech not supported.

    :returns: STS Connection Object for given tech

    :note: To use this method a SecurityMonkey role must be created
            in the target account with full read only privileges.
    """
    if 'assumed_role' in args:
        role = args['assumed_role']
    else:
        account = Account.query.filter(Account.name == account_name).first()
        sts = boto3.client('sts')
        role_name = 'SecurityMonkey'
        if account.role_name and account.role_name != '':
            role_name = account.role_name
        role = sts.assume_role(RoleArn='arn:aws:iam::' + account.number +
                               ':role/' + role_name, RoleSessionName='secmonkey')

    from security_monkey import app


    if connection_type == 'botocore':
        botocore_session = botocore.session.get_session()
        botocore_session.set_credentials(
            role['Credentials']['AccessKeyId'],
            role['Credentials']['SecretAccessKey'],
            token=role['Credentials']['SessionToken']
        )
        return botocore_session

    region = 'us-east-1'
    if 'region' in args:
        region = args.pop('region')
        if hasattr(region, 'name'):
            region = region.name

    if 'boto3' in connection_type:
        # Should be called in this format: boto3.iam.client
        _, tech, api = connection_type.split('.')
        session = boto3.Session(
            aws_access_key_id=role['Credentials']['AccessKeyId'],
            aws_secret_access_key=role['Credentials']['SecretAccessKey'],
            aws_session_token=role['Credentials']['SessionToken'],
            region_name=region
        )
        if api == 'resource':
            return session.resource(tech)
        return session.client(tech)

    module = __import__("boto.{}".format(connection_type))
    for subm in connection_type.split('.'):
        module = getattr(module, subm)

    return module.connect_to_region(
        region,
        aws_access_key_id=role['Credentials']['AccessKeyId'],
        aws_secret_access_key=role['Credentials']['SecretAccessKey'],
        security_token=role['Credentials']['SessionToken']
    )
示例#38
0
def connect(account_name, connection_type, **args):
    """

    Examples of use:
    ec2 = sts_connect.connect(environment, 'ec2', region=region, validate_certs=False)
    ec2 = sts_connect.connect(environment, 'ec2', validate_certs=False, debug=1000)
    ec2 = sts_connect.connect(environment, 'ec2')
    where environment is ( test, prod, dev )
    s3  = sts_connect.connect(environment, 's3')
    ses = sts_connect.connect(environment, 'ses')

    :param account: Account to connect with (i.e. test, prod, dev)

    :raises Exception: RDS Region not valid
                       AWS Tech not supported.

    :returns: STS Connection Object for given tech

    :note: To use this method a SecurityMonkey role must be created
            in the target account with full read only privileges.
    """
    if 'assumed_role' in args:
        role = args['assumed_role']
    else:
        account = Account.query.filter(Account.name == account_name).first()
        sts = boto3.client('sts')
        role_name = 'SecurityMonkey'
        if account.getCustom(
                "role_name") and account.getCustom("role_name") != '':
            role_name = account.getCustom("role_name")
        role = sts.assume_role(RoleArn='arn:aws:iam::' + account.identifier +
                               ':role/' + role_name,
                               RoleSessionName='secmonkey')

    from security_monkey import app

    if connection_type == 'botocore':
        botocore_session = botocore.session.get_session()
        botocore_session.set_credentials(
            role['Credentials']['AccessKeyId'],
            role['Credentials']['SecretAccessKey'],
            token=role['Credentials']['SessionToken'])
        return botocore_session

    region = 'us-east-1'
    if 'region' in args:
        region = args.pop('region')
        if hasattr(region, 'name'):
            region = region.name

    if 'boto3' in connection_type:
        # Should be called in this format: boto3.iam.client
        _, tech, api = connection_type.split('.')
        session = boto3.Session(
            aws_access_key_id=role['Credentials']['AccessKeyId'],
            aws_secret_access_key=role['Credentials']['SecretAccessKey'],
            aws_session_token=role['Credentials']['SessionToken'],
            region_name=region)
        if api == 'resource':
            return session.resource(tech)
        return session.client(tech)

    module = __import__("boto.{}".format(connection_type))
    for subm in connection_type.split('.'):
        module = getattr(module, subm)

    return module.connect_to_region(
        region,
        aws_access_key_id=role['Credentials']['AccessKeyId'],
        aws_secret_access_key=role['Credentials']['SecretAccessKey'],
        security_token=role['Credentials']['SessionToken'])
AWS_SECRET_KEY = 'uWqtrjFds4ZhTICsyId+LM1cisZHl2HIuti1Cfk0'
AWS_BUCKET_NAME = 'memrix1'
ALLOWED_EXTENSIONS = set(['jpg', 'jpeg', 'gif', 'png']) 

GMAPS_API_KEY = "AIzaSyDEgKQ8xiwb_MHBJXT6wL_t2p_mH1REWZ4"

from boto3.session import Session
import botocore
import botocore.session

session = Session(aws_access_key_id=AWS_KEY_ID,
                  aws_secret_access_key=AWS_SECRET_KEY,
                  region_name='us-west-2')

s3 = session.resource('s3')
s3Client = session.client('s3')
bucket = s3.Bucket('mybucket')
exists = True
try:
    s3.meta.client.head_bucket(Bucket='mybucket')

except botocore.exceptions.ClientError as e:
    # If a client error is thrown, then check that it was a 404 error.
    # If it was a 404 error, then the bucket does not exist.
    error_code = int(e.response['Error']['Code'])
    if error_code == 404:
        exists = False

# for getting image urls
# session = botocore.session.get_session()
# client = session.create_client('s3')
示例#40
0
import botocore.session

PROFILE = "qa-admin"

TABLE = "productManuals"
BUCKET = "product-manual-data"
KEY = "products/passport.pdf"
EXPIRATION = 3600  # In Seconds
print('Loading function')

# USE THIS WHEN DEPLOYING ON EC2
# s3 = boto3.client('s3', config=Config(signature_version='s3v4'))

# THIS IS FOR LOCAL RUN
session = boto3.Session(profile_name=PROFILE)
s3 = session.client('s3', config=Config(signature_version='s3v4'))


def generate_url():
    # print("Received event: " + json.dumps(event, indent=2))
    # Get the object from the event and show its content type
    bucket = BUCKET
    key = KEY

    try:
        url = s3.generate_presigned_url(ClientMethod='get_object',
                                        Params={
                                            'Bucket': bucket,
                                            'Key': key
                                        },
                                        expires_in=EXPIRATION)