Ejemplo n.º 1
0
    def get_stack_info(self, stack_arn, session=None, stack_type=None, no_logging=False):
        """Gets the StackInfo for a CloudFormation stack from its arn.

        Keyword arguments:
        stack_arn -- the arn of the stack to load
        cf_client -- (optional) a CloudFormation client, e.g. boto3.client("cloudformation", region_name="us-east-1")
        """
        existing = self.__cache.get(stack_arn, None)
        if existing:
            return existing

        if not session:
            session = self.__session if self.__session is not None else boto3.Session()
        self.__session = session

        if stack_type is None:
            region = aws_utils.get_region_from_stack_arn(stack_arn)
            if no_logging:
                cf_client = aws_utils.ClientWrapper(session.client("cloudformation", region_name=region), log_level=aws_utils.LOG_LEVEL_NONE)
            else:
                cf_client = aws_utils.ClientWrapper(session.client("cloudformation", region_name=region))
            
            res = cf_client.describe_stacks(StackName = stack_arn)
            stack_description = res['Stacks'][0]
            parameters = stack_description.get('Parameters', [])

            stack_type = None
            for parameter in parameters:
                if parameter['ParameterKey'] == 'CloudCanvasStack':
                    stack_type = parameter['ParameterValue']
        else:
            stack_description = None

        if not stack_type:
            raise RuntimeError('The stack {} is not a Lumberyard Cloud Canvas managed stack.'.format(stack_arn))

        if stack_type == StackInfo.STACK_TYPE_RESOURCE_GROUP:
            stack_info = ResourceGroupInfo(self, stack_arn, session=session, stack_description=stack_description)
        elif stack_type == StackInfo.STACK_TYPE_DEPLOYMENT:
            stack_info = DeploymentInfo(self, stack_arn, session=session, stack_description=stack_description)
        elif stack_type == StackInfo.STACK_TYPE_DEPLOYMENT_ACCESS:
            stack_info = DeploymentAccessInfo(self, stack_arn, session=session, stack_description=stack_description)
        elif stack_type == StackInfo.STACK_TYPE_PROJECT:
            stack_info = ProjectInfo(self, stack_arn, session=session, stack_description=stack_description)
        else:
            raise RuntimeError('The stack {} has an unexpected Lumberyard Cloud Canvas managed stack type: {}'.format(stack_arn, stack_type))

        self.__cache[stack_arn] = stack_info

        return stack_info
Ejemplo n.º 2
0
 def resource_definitions(self):
     if self.__resource_definitions is None:
         bucket = self.project_stack.configuration_bucket
         s3_client = aws_utils.ClientWrapper(self.session.client("s3"))
         self.__resource_definitions = resource_type_info.load_resource_type_mapping(
             bucket, self, s3_client)
     return self.__resource_definitions
Ejemplo n.º 3
0
def get_iot_data_client():
    '''Returns a IotData client with proper configuration.'''
    if not hasattr(get_iot_data_client, 'client'):
        get_iot_data_client.client = aws_utils.ClientWrapper(
            boto3.client('iot-data', api_version='2015-05-28'))

    return get_iot_data_client.client
Ejemplo n.º 4
0
    def deployments(self):
        if self.__deployment_infos is None:
            deployment_infos = []

            deployments = self.project_settings.get('deployment', {})
            if (deployments == {}):
                # project-settings.json was removed in Version 2 of the cloud project settings.
                # Each deployment is stored in a seperate json file with the suffix 'dstack'.
                s3_client = aws_utils.ClientWrapper(self.session.client("s3"))
                res = s3_client.list_objects_v2(Bucket = self.configuration_bucket, Prefix = "dstack.deployment.")
                cloud_deployments = res.get('Contents', {})

                for deployment in cloud_deployments:
                    name = deployment['Key']
                    prefix, group, deployment_name, ext = name.split('.')

                    # Skip over dstack.deployment.*.json file. This file contains the default resource group settings.
                    if deployment_name != '*':
                        deployment_settings = json.load(s3_client.get_object(Bucket=self.configuration_bucket, Key=name)["Body"])
                        deployments[deployment_name] = deployment_settings

            for deployment_name, deployment_settings in deployments.iteritems():
                deployment_stack_arn = deployment_settings.get('DeploymentStackId')
                deployment_access_stack_arn = deployment_settings.get('DeploymentAccessStackId')
                if deployment_stack_arn:
                    deployment_infos.append(DeploymentInfo(self.stack_manager, deployment_stack_arn, deployment_access_stack_arn=deployment_access_stack_arn, session=self.session, project_info=self))
            
            self.__deployment_infos = deployment_infos
        return self.__deployment_infos
Ejemplo n.º 5
0
def _get_cgp_listener_policy(stack):
    account_id = aws_utils.ClientWrapper(
        boto3.client('sts')).get_caller_identity()['Account']

    iot_client_resource = "arn:aws:iot:{}:{}:client/${{cognito-identity.amazonaws.com:sub}}".format(
        stack.region, account_id)
    policy_doc = '''{
      "Version": "2012-10-17",
      "Statement": [
        {
          "Effect": "Allow",
          "Action": [
            "iot:Connect",
            "iot:Receive",
            "iot:Subscribe",
            "iot:Publish"
          ],
          "Resource": [
            "*"
          ]
        }
      ]
    }'''

    return policy_doc
Ejemplo n.º 6
0
 def resource_definitions(self):
     if self.__resource_definitions is None:
         bucket = self.project_stack.get_configuration_bucket(verbose=False)
         s3_client = aws_utils.ClientWrapper(
             self.session.client("s3"), log_level=aws_utils.LOG_LEVEL_NONE)
         self.__resource_definitions = resource_type_info.load_resource_type_mapping(bucket, self, s3_client)
     return self.__resource_definitions
Ejemplo n.º 7
0
def _get_subscription_resources(stack, client_id):
    # Custom policy built from resources will come from here

    policy_list = []
    if stack.stack_type == stack_info.StackInfo.STACK_TYPE_RESOURCE_GROUP:
        account_id = aws_utils.ClientWrapper(
            boto3.client('sts')).get_caller_identity()['Account']

        resource_group_name = stack.resource_group_name
        deployment_name = stack.deployment.deployment_name
        project_name = stack.deployment.parent_stack.project_name

        resource_group_settings = stack.deployment.get_gem_settings(
            resource_group_name)

        for gem_name, gem_settings in resource_group_settings.iteritems():
            channel_list = gem_settings.get('Channels', [])
            for requested_channel in channel_list:
                requested_name = requested_channel.get('Name')
                if not requested_name:
                    continue
                for requested_type in requested_channel.get('Types', []):
                    iot_client_resource = 'arn:aws:iot:{}:{}:topic/{}/{}/{}'.format(
                        stack.region, account_id, project_name,
                        deployment_name, requested_name)
                    if requested_channel.get('CommunicationChannel',
                                             None) != None:
                        # This channel communicates within another channel - We don't want to subscribe to it
                        continue
                    if requested_type == 'PRIVATE':
                        iot_client_resource += '/client/{}'.format(client_id)
                    policy_list.append(iot_client_resource)

    return policy_list
Ejemplo n.º 8
0
def get_listener_policy(stack, client_id):
    sts_client = aws_sts.AWSSTSUtils(stack.region).client()
    account_id = aws_utils.ClientWrapper(sts_client).get_caller_identity()['Account']

    iot_client_resource = "arn:aws:iot:{}:{}:client/{}".format(stack.region, account_id, client_id)

    policy_doc = {'Version': '2012-10-17'}

    policy_list = []
    connect_statement = {
        'Effect': 'Allow',
        'Action': ['iot:Connect'],
        'Resource': "arn:aws:iot:{}:{}:client/{}".format(stack.region, account_id, client_id)
    }
    policy_list.append(connect_statement)

    receive_statement = {
        'Effect': 'Allow',
        'Action': ['iot:Receive']
    }
    receive_list = _get_subscription_resources(stack, client_id)
    receive_statement['Resource'] = receive_list
    policy_list.append(receive_statement)

    subscribe_statement = {
        'Effect': 'Allow',
        'Action': ['iot:Subscribe']
    }
    subscribe_list = [channel.replace(":topic/", ":topicfilter/") for channel in receive_list]
    subscribe_statement['Resource'] = subscribe_list
    policy_list.append(subscribe_statement)

    policy_doc['Statement'] = policy_list

    return json.dumps(policy_doc)
Ejemplo n.º 9
0
def create_table(table_name, event):
    dynamodb = aws_utils.ClientWrapper(boto3.client('dynamodb'))
    if "FromBackup" in event["ResourceProperties"]:
        return create_from_backup(
            table_name, event["ResourceProperties"]["FromBackup"],
            event["ResourceProperties"].get("BackupArn", None))
    if "GlobalSecondaryIndexes" in event["ResourceProperties"]:
        if "LocalSecondaryIndexes" in event["ResourceProperties"]:
            return dynamodb.create_table(
                AttributeDefinitions=event["ResourceProperties"]
                ["AttributeDefinitions"],
                TableName=table_name,
                KeySchema=event["ResourceProperties"]["KeySchema"],
                ProvisionedThroughput=get_throughput_from_dict(
                    event['ResourceProperties']['ProvisionedThroughput']),
                StreamSpecification=get_stream_spec_from_dict(
                    event["ResourceProperties"].get("StreamSpecification",
                                                    {})),
                GlobalSecondaryIndexes=sanitize_secondary_indexes(
                    event["ResourceProperties"]["GlobalSecondaryIndexes"]),
                LocalSecondaryIndexes=event["ResourceProperties"]
                ["LocalSecondaryIndexes"])
        else:
            return dynamodb.create_table(
                AttributeDefinitions=event["ResourceProperties"]
                ["AttributeDefinitions"],
                TableName=table_name,
                KeySchema=event["ResourceProperties"]["KeySchema"],
                ProvisionedThroughput=get_throughput_from_dict(
                    event['ResourceProperties']['ProvisionedThroughput']),
                StreamSpecification=get_stream_spec_from_dict(
                    event["ResourceProperties"].get("StreamSpecification",
                                                    {})),
                GlobalSecondaryIndexes=sanitize_secondary_indexes(
                    event["ResourceProperties"]["GlobalSecondaryIndexes"]))

    if "LocalSecondaryIndexes" in event["ResourceProperties"]:
        return dynamodb.create_table(
            AttributeDefinitions=event["ResourceProperties"]
            ["AttributeDefinitions"],
            TableName=table_name,
            KeySchema=event["ResourceProperties"]["KeySchema"],
            ProvisionedThroughput=get_throughput_from_dict(
                event['ResourceProperties']['ProvisionedThroughput']),
            StreamSpecification=get_stream_spec_from_dict(
                event["ResourceProperties"].get("StreamSpecification", {})),
            LocalSecondaryIndexes=event["ResourceProperties"]
            ["LocalSecondaryIndexes"])

    return dynamodb.create_table(
        AttributeDefinitions=event["ResourceProperties"]
        ["AttributeDefinitions"],
        TableName=table_name,
        KeySchema=event["ResourceProperties"]["KeySchema"],
        ProvisionedThroughput=get_throughput_from_dict(
            event['ResourceProperties']['ProvisionedThroughput']),
        StreamSpecification=get_stream_spec_from_dict(
            event["ResourceProperties"].get("StreamSpecification", {})),
    )
def _create_role(role_name, props, tags):
    iam_client = aws_utils.ClientWrapper(boto3.client('iam'))
    response = iam_client.create_role(Path=props.Path,
                                      RoleName=role_name,
                                      AssumeRolePolicyDocument=json.dumps(
                                          props.AssumeRolePolicyDocument),
                                      Tags=tags)
    return response['Role']['Arn']
Ejemplo n.º 11
0
 def resource_arn(self):
     if self.__lambda_client is None:
         region = self.__stack_info.region
         self.__lambda_client = aws_utils.ClientWrapper(
             self.__stack_info.session.client('lambda'))
     return aws_utils.get_resource_arn(self.stack.resource_definitions,
                                       self.stack.stack_arn,
                                       self.type,
                                       self.physical_id,
                                       lambda_client=self.__lambda_client)
Ejemplo n.º 12
0
 def resource_group_settings(self):
     if self.__resource_group_settings is None:
         s3_client = aws_utils.ClientWrapper(self.session.client("s3"))
         settings_data = s3_client.get_object(
             Bucket=self.project_stack.configuration_bucket,
             Key='{}/{}/{}'.format(
                 constant.RESOURCE_SETTINGS_FOLDER, self.deployment_name,
                 constant.DEPLOYMENT_RESOURCE_GROUP_SETTINGS))
         self.__resource_group_settings = json.loads(
             settings_data['Body'].read())
     return self.__resource_group_settings
Ejemplo n.º 13
0
def create_from_backup(table_name, backup_name, backup_arn):
    dynamodb = aws_utils.ClientWrapper(boto3.client('dynamodb'))
    backups = get_backups(dynamodb)
    if backup_arn is None:
        backup_arn = get_latest_arn_by_name(backups, backup_name)
    if not backup_arn:
        raise Exception("Dynamodb backup arn could not be found for {}".format(
            backup_name))

    response = dynamodb.restore_table_from_backup(TargetTableName=table_name,
                                                  BackupArn=backup_arn)
    return response
Ejemplo n.º 14
0
def get_configuration_bucket():
    configuration = StackS3Configuration()

    cloud_formation_client = aws_utils.ClientWrapper(
        boto3.client('cloudformation', region_name=current_region))

    stack_definitions = cloud_formation_client.describe_stack_resources(
        PhysicalResourceId=lambda_name)
    for stack_definition in stack_definitions['StackResources']:
        if stack_definition.get('LogicalResourceId', None) == 'Configuration':
            configuration.configuration_bucket = stack_definition[
                'PhysicalResourceId']
            configuration.stack_name = stack_definition['StackName']
            break
    return configuration
Ejemplo n.º 15
0
 def get_resources(self, verbose):
     if self.__resources is None:
         if verbose:
             client = self.client
         else:
             client = aws_utils.ClientWrapper(self.session.client(
                 'cloudformation', region_name=self.region), log_level=aws_utils.LOG_LEVEL_NONE)
         resources = ResourceInfoList(self)
         res = client.list_stack_resources(StackName=self.stack_arn)
         for resource_summary in res['StackResourceSummaries']:
             resources.append(ResourceInfo(self, resource_summary))
         while('NextToken' in res):
             res = lambda : self.client.list_stack_resources(StackName=self.stack_arn, NextToken=res['NextToken'])
             for resource_summary in res['StackResourceSummaries']:
                 resources.append(ResourceInfo(self, resource_summary))
         self.__resources = resources
     return self.__resources
Ejemplo n.º 16
0
def update_table(table_name, event):
    dynamodb = aws_utils.ClientWrapper(boto3.client('dynamodb'))
    description = dynamodb.describe_table(TableName=table_name)

    update_response = update_throughput(dynamodb, table_name, event, description)

    if update_response:
        wait_for_idle_table(dynamodb, table_name)

    update_response = update_stream_spec(dynamodb, table_name, event, description)

    if update_response:
        wait_for_idle_table(dynamodb, table_name)

    update_global_secondary_indexes(dynamodb, table_name, event, description)

    description = dynamodb.describe_table(TableName=table_name)
    return {'TableDescription': description['Table']}
Ejemplo n.º 17
0
 def project_settings(self):
     if self.__project_settings is None:
         try:
             s3_client = aws_utils.ClientWrapper(self.session.client("s3"))
             res = s3_client.get_object(Bucket = self.configuration_bucket, Key='project-settings.json')
             json_string = res['Body'].read()
             print 'read project-settings.json contents: {}'.format(json_string)
             self.__project_settings = json.loads(json_string)
         except ClientError as e:
             # When the project stack is being deleted, the configuration bucket's contents 
             # will have been cleared before the stack is deleted. During the stack delete, 
             # custom resources such as AccessControl may attempt actions that cause project
             # settings to load, which will fail with either access denied or no such key,
             # depending on if we have list objects permissions on the bucket.
             if e.response.get('Error', {}).get('Code', '') not in [ 'AccessDenied', 'NoSuchKey' ]:
                 raise e
             print 'WARNING: could not read project-settings.json from the project configuration bucket.'
             self.__project_settings = {}
     return self.__project_settings
Ejemplo n.º 18
0
def get_listener_policy(stack, client_id):
    account_id = aws_utils.ClientWrapper(
        boto3.client('sts')).get_caller_identity()['Account']

    iot_client_resource = "arn:aws:iot:{}:{}:client/{}".format(
        stack.region, account_id, client_id)

    policy_doc = {}
    policy_doc['Version'] = '2012-10-17'

    policy_list = []
    connect_statement = {}
    connect_statement['Effect'] = 'Allow'
    connect_statement['Action'] = ['iot:Connect']
    connect_statement['Resource'] = "arn:aws:iot:{}:{}:client/{}".format(
        stack.region, account_id, client_id)
    policy_list.append(connect_statement)

    receive_statement = {}
    receive_statement['Effect'] = 'Allow'
    receive_statement['Action'] = ['iot:Receive']
    receive_list = _get_subscription_resources(stack, client_id)
    receive_statement['Resource'] = receive_list
    policy_list.append(receive_statement)

    subscribe_statement = {}
    subscribe_statement['Effect'] = 'Allow'
    subscribe_statement['Action'] = ['iot:Subscribe']
    subscribe_list = [
        channel.replace(":topic/", ":topicfilter/") for channel in receive_list
    ]
    subscribe_statement['Resource'] = subscribe_list
    policy_list.append(subscribe_statement)

    policy_doc['Statement'] = policy_list

    return json.dumps(policy_doc)
Ejemplo n.º 19
0
# All or portions of this file Copyright (c) Amazon.com, Inc. or its affiliates or
# its licensors.
#
# For complete copyright and license terms please see the LICENSE at the root of this
# distribution (the "License"). All use of this software is governed by the License,
# or, if provided, by the license below or the license accompanying this file. Do not
# remove or modify any license notices. This file is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#

from cgf_utils import aws_utils
from resource_manager_common import stack_info
import boto3
import json

iot_client = aws_utils.ClientWrapper(boto3.client('iot'))


def _get_subscription_resources(stack, client_id):
    # Custom policy built from resources will come from here

    policy_list = []
    if stack.stack_type == stack_info.StackInfo.STACK_TYPE_RESOURCE_GROUP:
        account_id = aws_utils.ClientWrapper(
            boto3.client('sts')).get_caller_identity()['Account']

        resource_group_name = stack.resource_group_name
        deployment_name = stack.deployment.deployment_name
        project_name = stack.deployment.parent_stack.project_name

        resource_group_settings = stack.deployment.get_gem_settings(
Ejemplo n.º 20
0
import CloudCanvas
import service
import errors
from botocore.exceptions import ClientError
from cgf_utils import custom_resource_utils, aws_utils

# import errors
#
# raise errors.ClientError(message) - results in HTTP 400 response with message
# raise errors.ForbiddenRequestError(message) - results in 403 response with message
# raise errors.NotFoundError(message) - results in HTTP 404 response with message
#
# Any other exception results in HTTP 500 with a generic internal service error message.

log_db = custom_resource_utils.get_embedded_physical_id(CloudCanvas.get_setting('LogDB'))
dynamo_client = aws_utils.ClientWrapper(boto3.client('dynamodb'))

buffer_time = 5


def _process_data(d):
    result = None
    type, data = list(d.items())[0]
    if type == 'L':
        result = [_process_data(item) for item in data]
    elif type == 'M':
        result = {k: _process_data(v) for k, v in data.iteritems()}
    elif type == 'N':
        result = int(data)
    elif type != 'NULL':
        result = data
Ejemplo n.º 21
0
# distribution (the "License"). All use of this software is governed by the License,
# or, if provided, by the license below or the license accompanying this file. Do not
# remove or modify any license notices. This file is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# $Revision: #1 $

import json
import boto3
from cgf_utils import aws_utils
from cgf_utils import properties

REFERENCE_METADATA_PATH = "reference-metadata"
REFERENCE_TYPE = "Custom::ExternalResourceReference"

s3_client = aws_utils.ClientWrapper(boto3.client('s3'))


def is_reference_type(type):
    return True if type == REFERENCE_TYPE else False


def get_reference_metadata_key(project_name, reference_name):
    return aws_utils.s3_key_join(REFERENCE_METADATA_PATH, project_name,
                                 reference_name + ".json")


def get_reference_metadata(configuration_bucket, project_name, reference_name):
    reference_metadata_key = get_reference_metadata_key(
        project_name, reference_name)
Ejemplo n.º 22
0
from cgf_utils import properties
from cgf_utils import custom_resource_response
import boto3
import json
import time

from cgf_utils import aws_utils
from cgf_utils import role_utils
from resource_manager_common import stack_info

from botocore.exceptions import ClientError

PROPAGATION_DELAY_SECONDS = 10

iam = aws_utils.ClientWrapper(boto3.client('iam'))


class ProblemList(object):
    def __init__(self):
        self.__problems = []
        self.__prefixes = []

    def __repr__(self):
        return '\n    '.join(self.__problems)

    def __nonzero__(self):
        return len(self.__problems) > 0

    def __len__(self):
        return len(self.__problems)
# Boto3
import boto3
import botocore.config as boto_config

# ResourceManagerCommon
from resource_manager_common import stack_info

# Utils
from cgf_utils import aws_utils
from cgf_utils import custom_resource_response
from cgf_utils import properties
from cgf_utils import role_utils

CLOUD_GEM_FRAMEWORK = 'CloudGemFramework'
iam = aws_utils.ClientWrapper(boto3.client('iam'))
cfg = boto_config.Config(read_timeout=70, connect_timeout=70)
s3 = aws_utils.ClientWrapper(boto3.client('s3', config=cfg),
                             do_not_log_args=['Body'])


def get_default_policy(project_service_lambda_arn):
    """
    Gets the default policy to associate with a a Lambda Configuration

    To ensure least privileges we do not attach PutLogEvents, CreateLogStream permissions here. Instead
    these are added as an inline policy on the Lambda's execution role once its created so they can be correctly scoped.

    :param project_service_lambda_arn:
    :return: The default policy document for the lambda
    """
import copy
import json
import time

from botocore.exceptions import ClientError

from cgf_utils import aws_utils
from cgf_utils import custom_resource_response
from cgf_utils import properties
from cgf_utils import role_utils

from resource_manager_common import constant
from resource_manager_common import resource_type_info
from resource_manager_common import stack_info

s3_client = aws_utils.ClientWrapper(boto3.client("s3"))
iam_client = aws_utils.ClientWrapper(boto3.client("iam"))

_inline_policy_name = "Default"

# Default custom resource lambda timeout in MB
_default_lambda_memory = 128

# Default custom resource lambda timeout in Seconds, can be between 3-900 as per Lambda spec
_default_lambda_timeout = 10  # Seconds

# Schema for _handler_properties_configuration
# Set these values to increase memory and timeout values for a given custom resource Lambda
# These are mostly Lambda configuration properties and should follow existing names and restrictions
_handler_properties_configuration = {
    'MemorySize': properties.Integer(default=_default_lambda_memory),  # MB: Must be a multiple of 64MB as per Lambda spec
def _create_lambda_client(stack_arn):
    """Create new lambda client to use. This is to support patching while testing"""
    return aws_utils.ClientWrapper(boto3.client("lambda", aws_utils.get_region_from_stack_arn(stack_arn)))
Ejemplo n.º 26
0
 def client(self):
     if self.__client is None:
         self.__client = aws_utils.ClientWrapper(
             self.session.client('cloudformation', region_name=self.region))
     return self.__client
Ejemplo n.º 27
0
def get_cloud_formation_client(stack_arn):
    region = aws_utils.get_region_from_stack_arn(stack_arn)
    return aws_utils.ClientWrapper(
        boto3.client('cloudformation', region_name=region))
Ejemplo n.º 28
0
def handler(event, context):
    dynamodb = aws_utils.ClientWrapper(boto3.client('dynamodb'))
    wait_for_account_tables()

    request_type = event['RequestType']
    table_name = get_table_name(event)

    if request_type == 'Create':
        try:
            if table_name in gather_tables(dynamodb):
                raise RuntimeError(
                    "Trying to create a Custom::DynamoDB::Table custom resource, but DynamoDB table already exists!"
                )
            try:
                response = create_table(table_name, event)
            except Exception as e:
                if isinstance(e,
                              ClientError) and e.response['Error']['Code'] in [
                                  'LimitExceededException'
                              ]:
                    wait_for_account_tables()
                    response = create_table(table_name, event)
                else:
                    raise e
            table_response = _TableResponse(response)
        except RuntimeError as e:
            return custom_resource_response.failure_response(e.message)
        tag_table(dynamodb, response, event)
    elif request_type == 'Update':
        try:
            if not table_name in gather_tables(dynamodb):
                try:
                    response = create_table(table_name, event)
                except Exception as e:
                    if isinstance(
                            e,
                            ClientError) and e.response['Error']['Code'] in [
                                'LimitExceededException'
                            ]:
                        wait_for_account_tables()
                        response = create_table(table_name, event)
                    else:
                        raise e
                table_response = _TableResponse(response)
                tag_table(dynamodb, response, event)
            else:
                try:
                    response = update_table(table_name, event)
                except Exception as e:
                    if isinstance(
                            e,
                            ClientError) and e.response['Error']['Code'] in [
                                'LimitExceededException'
                            ]:
                        wait_for_account_tables()
                        response = update_table(table_name, event)
                    else:
                        raise e
                table_response = _TableResponse(response)
        except RuntimeError as e:
            return custom_resource_response.failure_response(e.message)

    elif request_type == 'Delete':
        try:
            if table_name in gather_tables(dynamodb):
                try:
                    response = dynamodb.delete_table(TableName=table_name)
                except Exception as e:
                    if isinstance(
                            e,
                            ClientError) and e.response['Error']['Code'] in [
                                'LimitExceededException'
                            ]:
                        wait_for_account_tables()
                        response = dynamodb.delete_table(TableName=table_name)
                    else:
                        raise e
                table_response = _TableResponse(response)
            else:
                print "Custom::DynamoDB::Table is trying to delete a DynamoDB table that does not exist"
                table_response = _TableResponse(
                    {'TableDescription': {
                        'TableName': table_name
                    }})
        except RuntimeError as e:
            return custom_resource_response.failure_response(e.message)
    else:
        raise RuntimeError('Invalid RequestType: {}'.format(request_type))

    return custom_resource_response.success_response(table_response.output,
                                                     table_response.table_name)
Ejemplo n.º 29
0
def wait_for_account_tables():
    dynamodb = aws_utils.ClientWrapper(boto3.client('dynamodb'))
    attempts = 0
    while len(updates_in_progress(dynamodb, gather_tables(dynamodb))) > 7:
        backoff(attempts)
        attempts += 1
Ejemplo n.º 30
0
# Python 2.7/3.7 Compatibility
from six import iteritems

from cgf_utils import custom_resource_response
from cgf_utils import properties
from resource_manager_common import constant
from resource_manager_common import stack_info
from resource_manager_common import service_interface
from cgf_utils import role_utils
from cgf_utils import patch
from cgf_utils import aws_utils
from botocore.exceptions import ClientError
from cgf_service_directory import ServiceDirectory

cfg = boto_config.Config(read_timeout=70, connect_timeout=70)
s3 = aws_utils.ClientWrapper(boto3.client('s3', config=cfg))
api_gateway = aws_utils.ClientWrapper(boto3.client('apigateway', config=cfg),
                                      do_not_log_args=['body'])

API_GATEWAY_SERVICE_NAME = 'apigateway.amazonaws.com'
STAGE_NAME = 'api'

PROPERTY_SCHEMA = {
    'ConfigurationBucket':
    properties.String(),
    'ConfigurationKey':
    properties.String(),
    'CacheClusterEnabled':
    properties.Boolean(default=False),
    'CacheClusterSize':
    properties.String(default='0.5'),