Esempio n. 1
0
    def get_stack_info(self, stack_arn, session=None, stack_type=None, no_logging=False):
        """Gets the StackInfo for a CloudFormation stack from its arn.

        Keyword arguments:
        stack_arn -- the arn of the stack to load
        cf_client -- (optional) a CloudFormation client, e.g. boto3.client("cloudformation", region_name="us-east-1")
        """
        existing = self.__cache.get(stack_arn, None)
        if existing:
            return existing

        if not session:
            session = self.__session if self.__session is not None else boto3.Session()
        self.__session = session

        if stack_type is None:
            region = aws_utils.get_region_from_stack_arn(stack_arn)
            if no_logging:
                cf_client = aws_utils.ClientWrapper(session.client("cloudformation", region_name=region), log_level=aws_utils.LOG_LEVEL_NONE)
            else:
                cf_client = aws_utils.ClientWrapper(session.client("cloudformation", region_name=region))
            
            res = cf_client.describe_stacks(StackName = stack_arn)
            stack_description = res['Stacks'][0]
            parameters = stack_description.get('Parameters', [])

            stack_type = None
            for parameter in parameters:
                if parameter['ParameterKey'] == 'CloudCanvasStack':
                    stack_type = parameter['ParameterValue']
        else:
            stack_description = None

        if not stack_type:
            raise RuntimeError('The stack {} is not a Lumberyard Cloud Canvas managed stack.'.format(stack_arn))

        if stack_type == StackInfo.STACK_TYPE_RESOURCE_GROUP:
            stack_info = ResourceGroupInfo(self, stack_arn, session=session, stack_description=stack_description)
        elif stack_type == StackInfo.STACK_TYPE_DEPLOYMENT:
            stack_info = DeploymentInfo(self, stack_arn, session=session, stack_description=stack_description)
        elif stack_type == StackInfo.STACK_TYPE_DEPLOYMENT_ACCESS:
            stack_info = DeploymentAccessInfo(self, stack_arn, session=session, stack_description=stack_description)
        elif stack_type == StackInfo.STACK_TYPE_PROJECT:
            stack_info = ProjectInfo(self, stack_arn, session=session, stack_description=stack_description)
        else:
            raise RuntimeError('The stack {} has an unexpected Lumberyard Cloud Canvas managed stack type: {}'.format(stack_arn, stack_type))

        self.__cache[stack_arn] = stack_info

        return stack_info
Esempio n. 2
0
 def region(self):
     return aws_utils.get_region_from_stack_arn(self.stack_arn)
Esempio n. 3
0
def get_cloud_formation_client(stack_arn):
    region = aws_utils.get_region_from_stack_arn(stack_arn)
    return aws_utils.ClientWrapper(
        boto3.client('cloudformation', region_name=region))
def _create_lambda_client(stack_arn):
    """Create new lambda client to use. This is to support patching while testing"""
    return aws_utils.ClientWrapper(boto3.client("lambda", aws_utils.get_region_from_stack_arn(stack_arn)))
def handler(event, context):
    event_type = event['RequestType']
    stack_arn = event['StackId']
    stack_manager = stack_info.StackInfoManager()
    stack = stack_manager.get_stack_info(stack_arn)
    if not stack.is_project_stack:
        raise RuntimeError("Resource Types can only be defined in the project stack.")
    configuration_bucket = stack.project_stack.configuration_bucket
    source_resource_name = event['LogicalResourceId']
    props = properties.load(event, _schema)
    definitions_src = event['ResourceProperties']['Definitions']
    lambda_client = aws_utils.ClientWrapper(boto3.client("lambda", aws_utils.get_region_from_stack_arn(stack_arn)))
    created_or_updated_lambdas = {}
    lambda_roles = []

    # Build the file key as "<root directory>/<project stack>/<deployment stack>/<resource_stack>/<resource_name>.json"
    path_components = [x.stack_name for x in stack.ancestry]
    path_components.insert(0, constant.RESOURCE_DEFINITIONS_PATH)
    path_components.append(source_resource_name + ".json")
    resource_file_key = aws_utils.s3_key_join(*path_components)
    path_info = resource_type_info.ResourceTypesPathInfo(resource_file_key)

    # Load information from the JSON file if it exists.
    # (It will exist on a Create event if the resource was previously deleted and recreated.)
    try:
        contents = s3_client.get_object(Bucket=configuration_bucket, Key=resource_file_key)['Body'].read()
        existing_info = json.loads(contents)
        definitions_dictionary = existing_info['Definitions']
        existing_lambdas = existing_info['Lambdas']
        if isinstance(existing_lambdas, dict):
            lambda_dictionary = existing_lambdas
        else:
            # Backwards compatibility
            lambda_dictionary = {}
            existing_lambdas = set([x.split(":")[6] for x in existing_lambdas])  # Convert arn to function name
    except ClientError as e:
        error_code = e.response['Error']['Code']
        if error_code == 'NoSuchKey':
            definitions_dictionary = {}
            existing_lambdas = {}
            lambda_dictionary = {}
        else:
            raise e

    # Process the actual event
    if event_type == 'Delete':
        deleted_entries = set(definitions_dictionary.keys())

    else:
        definitions = props.Definitions
        lambda_config_src = event['ResourceProperties'].get('LambdaConfiguration', None)

        # Create lambdas for fetching the ARN and handling the resource creation/update/deletion
        lambdas_to_create = []

        for resource_type_name in definitions_src.keys():
            type_info = resource_type_info.ResourceTypeInfo(
                stack_arn, source_resource_name, resource_type_name, lambda_dictionary, False,
                definitions_src[resource_type_name])
            function_infos = [type_info.arn_function, type_info.handler_function]

            for function_info, field, tag, description in zip(function_infos, _lambda_fields, _lambda_tags,
                                                              _lambda_descriptions):
                if function_info is None:
                    continue

                function_handler = function_info.get('Function', None)
                if function_handler is None:
                    raise RuntimeError("Definition for '%s' in type '%s' requires a 'Function' field with the handler "
                                       "to execute." % (field, resource_type_name))

                # Create the role for the lambda(s) that will be servicing this resource type
                lambda_function_name = type_info.get_lambda_function_name(tag)
                role_name = role_utils.sanitize_role_name(lambda_function_name)
                role_path = "/%s/%s/" % (type_info.stack_name, type_info.source_resource_name)
                assume_role_policy_document = role_utils.get_assume_role_policy_document_for_service(
                    "lambda.amazonaws.com")

                try:
                    res = iam_client.create_role(
                        RoleName=role_name,
                        AssumeRolePolicyDocument=assume_role_policy_document,
                        Path=role_path)
                    role_arn = res['Role']['Arn']
                except ClientError as e:
                    if e.response["Error"]["Code"] != 'EntityAlreadyExists':
                        raise e
                    res = iam_client.get_role(RoleName=role_name)
                    role_arn = res['Role']['Arn']

                # Copy the base policy for the role and add any permissions that are specified by the type
                role_policy = copy.deepcopy(_lambda_base_policy)
                role_policy['Statement'].extend(function_info.get('PolicyStatement', []))
                iam_client.put_role_policy(RoleName=role_name, PolicyName=_inline_policy_name,
                                           PolicyDocument=json.dumps(role_policy))

                # Record this role and the type_info so we can create a lambda for it
                lambda_roles.append(role_name)
                lambdas_to_create.append({
                    'role_arn': role_arn,
                    'type_info': type_info,
                    'lambda_function_name': lambda_function_name,
                    'handler': "resource_types." + function_handler,
                    'description': description
                })

        # We create the lambdas in a separate pass because role-propagation to lambda takes a while, and we don't want
        # to have to delay multiple times for each role/lambda pair
        #
        # TODO: Replace delay (and all other instances of role/lambda creation) with exponential backoff
        time.sleep(role_utils.PROPAGATION_DELAY_SECONDS)

        for info in lambdas_to_create:
            # Create the lambda function
            arn, version = _create_or_update_lambda_function(
                lambda_client=lambda_client,
                timeout=props.LambdaTimeout,
                lambda_config_src=lambda_config_src,
                info=info,
                existing_lambdas=existing_lambdas
            )
            created_or_updated_lambdas[info['lambda_function_name']] = {'arn': arn, 'v': version}

        deleted_entries = set(definitions_dictionary.keys()) - set(definitions_src.keys())

    physical_resource_id = "-".join(path_components[1:])
    lambda_dictionary.update(created_or_updated_lambdas)
    definitions_dictionary.update(definitions_src)
    config_info = {
        'StackId': stack_arn,
        'Id': physical_resource_id,
        'Lambdas': lambda_dictionary,
        'Definitions': definitions_dictionary,
        'Deleted': list(deleted_entries)
    }
    data = {
        'ConfigBucket': configuration_bucket,
        'ConfigKey': resource_file_key
    }

    # Copy the resource definitions to the configuration bucket.
    s3_client.put_object(Bucket=configuration_bucket, Key=resource_file_key, Body=json.dumps(config_info, indent=2))
    custom_resource_response.succeed(event, context, data, physical_resource_id)