コード例 #1
0
 def test_with_too_long_name(self):
     stack_arn = 'arn:aws:cloudformation:TestRegion:TestAccount:stack/LongName9A123456789B123456789C123456789D123456789E123456789F123456789G-RandomPart/TestUUID'
     logical_role_name = 'RoleName9A123456789B123456789C123456789D123456789E123456789F123456789G'
     actual_role_name = role_utils.get_access_control_role_name(
         stack_arn, logical_role_name)
     self.assertEqual(
         actual_role_name,
         role_utils.sanitize_role_name(
             'LongName9A123456789B123456789C123456789D123456789E123456789F123456789G-RandomPart-RoleName9A123456789B123456789C123456789D123456789E123456789F123456789G'
         ))
コード例 #2
0
def handler(event, context):
    event_type = event['RequestType']
    stack_arn = event['StackId']
    stack_manager = stack_info.StackInfoManager()
    stack = stack_manager.get_stack_info(stack_arn)
    if not stack.is_project_stack:
        raise RuntimeError("Resource Types can only be defined in the project stack.")
    configuration_bucket = stack.project_stack.configuration_bucket
    source_resource_name = event['LogicalResourceId']
    props = properties.load(event, _schema)
    definitions_src = event['ResourceProperties']['Definitions']
    lambda_client = _create_lambda_client(stack_arn)
    created_or_updated_lambdas = {}
    lambda_roles = []

    # Set up tags for all resources created, must be project stack
    # Note: IAM takes an array of [ {'Key':, 'Value':}] format, Lambda take a dict of {string: string} pairs
    iam_tags = [
        {'Key': constant.PROJECT_NAME_TAG, 'Value': stack.stack_name},
        {'Key': constant.STACK_ID_TAG, 'Value': stack_arn}
    ]
    lambda_tags = {constant.PROJECT_NAME_TAG: stack.stack_name, constant.STACK_ID_TAG: stack_arn}

    # Build the file key as "<root directory>/<project stack>/<deployment stack>/<resource_stack>/<resource_name>.json"
    path_components = [x.stack_name for x in stack.ancestry]
    path_components.insert(0, constant.RESOURCE_DEFINITIONS_PATH)
    path_components.append(source_resource_name + ".json")
    resource_file_key = aws_utils.s3_key_join(*path_components)
    path_info = resource_type_info.ResourceTypesPathInfo(resource_file_key)

    # Load information from the JSON file if it exists.
    # (It will exist on a Create event if the resource was previously deleted and recreated.)
    try:
        contents = s3_client.get_object(Bucket=configuration_bucket, Key=resource_file_key)['Body'].read()
        existing_info = json.loads(contents)
        definitions_dictionary = existing_info['Definitions']
        existing_lambdas = existing_info['Lambdas']
        if isinstance(existing_lambdas, dict):
            lambda_dictionary = existing_lambdas
        else:
            # Backwards compatibility
            lambda_dictionary = {}
            existing_lambdas = set([x.split(":")[6] for x in existing_lambdas])  # Convert arn to function name
    except ClientError as e:
        error_code = e.response['Error']['Code']
        if error_code == 'NoSuchKey':
            definitions_dictionary = {}
            existing_lambdas = {}
            lambda_dictionary = {}
        else:
            raise e

    # Process the actual event
    if event_type == 'Delete':
        deleted_entries = set(definitions_dictionary.keys())

    else:
        definitions = props.Definitions
        lambda_config_src = event['ResourceProperties'].get('LambdaConfiguration', None)

        # Create lambdas for fetching the ARN and handling the resource creation/update/deletion
        lambdas_to_create = []

        for resource_type_name in definitions_src.keys():
            type_info = resource_type_info.ResourceTypeInfo(
                stack_arn, source_resource_name, resource_type_name, lambda_dictionary, False,
                definitions_src[resource_type_name])
            function_infos = [type_info.arn_function, type_info.handler_function]

            for function_info, field, tag, description in zip(function_infos, _lambda_fields, _lambda_tags,
                                                              _lambda_descriptions):
                if function_info is None:
                    continue

                function_handler = function_info.get('Function', None)
                if function_handler is None:
                    raise RuntimeError("Definition for '%s' in type '%s' requires a 'Function' field with the handler "
                                       "to execute." % (field, resource_type_name))

                # Create the role for the lambda(s) that will be servicing this resource type
                lambda_function_name = type_info.get_lambda_function_name(tag)
                role_name = role_utils.sanitize_role_name(lambda_function_name)
                role_path = "/%s/%s/" % (type_info.stack_name, type_info.source_resource_name)
                assume_role_policy_document = role_utils.get_assume_role_policy_document_for_service("lambda.amazonaws.com")

                try:
                    res = iam_client.create_role(
                        RoleName=role_name,
                        AssumeRolePolicyDocument=assume_role_policy_document,
                        Path=role_path,
                        Tags=iam_tags)
                    role_arn = res['Role']['Arn']
                except ClientError as e:
                    if e.response["Error"]["Code"] != 'EntityAlreadyExists':
                        raise e
                    res = iam_client.get_role(RoleName=role_name)
                    role_arn = res['Role']['Arn']

                # Copy the base policy for the role and add any permissions that are specified by the type
                role_policy = copy.deepcopy(_create_base_lambda_policy())
                role_policy['Statement'].extend(function_info.get('PolicyStatement', []))
                iam_client.put_role_policy(RoleName=role_name, PolicyName=_inline_policy_name,
                                           PolicyDocument=json.dumps(role_policy))

                # Record this role and the type_info so we can create a lambda for it
                lambda_roles.append(role_name)

                lambda_info = {
                    'role_arn': role_arn,
                    'type_info': type_info,
                    'lambda_function_name': lambda_function_name,
                    'handler': "resource_types." + function_handler,
                    'description': description,
                    'tags': lambda_tags
                }

                # Merge in any lambda specific configs overrides
                if 'HandlerFunctionConfiguration' in function_info:
                    lambda_override = function_info['HandlerFunctionConfiguration']
                    if lambda_override:
                        print("Found LambdaConfiguration override {}".format(lambda_override))
                        lambda_info['lambda_config_overrides'] = lambda_override

                lambdas_to_create.append(lambda_info)

        # We create the lambdas in a separate pass because role-propagation to lambda takes a while, and we don't want
        # to have to delay multiple times for each role/lambda pair
        #
        # TODO: Replace delay (and all other instances of role/lambda creation) with exponential backoff
        time.sleep(role_utils.PROPAGATION_DELAY_SECONDS)

        for info in lambdas_to_create:
            # Create the lambda function
            arn, version = _create_or_update_lambda_function(
                lambda_client=lambda_client,
                timeout=props.LambdaTimeout,
                lambda_config_src=lambda_config_src,
                info=info,
                existing_lambdas=existing_lambdas
            )
            created_or_updated_lambdas[info['lambda_function_name']] = {'arn': arn, 'v': version}

            # Finally add/update a role policy to give least privileges to the Lambdas to log events
            policy_document = _generate_lambda_log_event_policy(arn)
            iam_client.put_role_policy(RoleName=aws_utils.get_role_name_from_role_arn(info['role_arn']),
                                       PolicyDocument=json.dumps(policy_document),
                                       PolicyName='LambdaLoggingEventsPolicy')

        deleted_entries = set(definitions_dictionary.keys()) - set(definitions_src.keys())

    physical_resource_id = "-".join(path_components[1:])
    lambda_dictionary.update(created_or_updated_lambdas)
    definitions_dictionary.update(definitions_src)
    config_info = {
        'StackId': stack_arn,
        'Id': physical_resource_id,
        'Lambdas': lambda_dictionary,
        'Definitions': definitions_dictionary,
        'Deleted': list(deleted_entries)
    }
    data = {
        'ConfigBucket': configuration_bucket,
        'ConfigKey': resource_file_key
    }

    # Copy the resource definitions to the configuration bucket.
    s3_client.put_object(Bucket=configuration_bucket, Key=resource_file_key, Body=json.dumps(config_info, indent=2))
    custom_resource_response.succeed(event, context, data, physical_resource_id)
コード例 #3
0
def handler(event, context):
    event_type = event['RequestType']
    stack_arn = event['StackId']
    stack_manager = stack_info.StackInfoManager()
    stack = stack_manager.get_stack_info(stack_arn)
    if not stack.is_project_stack:
        raise RuntimeError(
            "Resource Types can only be defined in the project stack.")
    configuration_bucket = stack.project_stack.configuration_bucket
    source_resource_name = event['LogicalResourceId']
    props = properties.load(event, _schema)
    definitions_src = event['ResourceProperties']['Definitions']
    lambda_client = aws_utils.ClientWrapper(
        boto3.client("lambda", aws_utils.get_region_from_stack_arn(stack_arn)))
    lambda_arns = []
    lambda_roles = []

    # Build the file key as "<root directory>/<project stack>/<deployment stack>/<resource_stack>/<resource_name>.json"
    path_components = [x.stack_name for x in stack.ancestry]
    path_components.insert(0, constant.RESOURCE_DEFINITIONS_PATH)
    path_components.append(source_resource_name + ".json")
    resource_file_key = aws_utils.s3_key_join(*path_components)
    path_info = resource_type_info.ResourceTypesPathInfo(resource_file_key)

    # Load information from the JSON file if it exists
    if event_type != 'Create':
        contents = s3_client.get_object(Bucket=configuration_bucket,
                                        Key=resource_file_key)['Body'].read()
        existing_info = json.loads(contents)
    else:
        existing_info = None

    # Process the actual event
    if event_type == 'Delete':
        _delete_resources(existing_info['Lambdas'], existing_info['Roles'],
                          lambda_client)
        custom_resource_response.succeed(event, context, {},
                                         existing_info['Id'])

    else:
        existing_roles = set()
        existing_lambdas = set()

        if event_type == 'Update':
            existing_roles = set(
                [arn.split(":")[-1] for arn in existing_info['Roles']])
            existing_lambdas = set(
                [arn.split(":")[-1] for arn in existing_info['Lambdas']])

        definitions = props.Definitions
        lambda_config_src = event['ResourceProperties'].get(
            'LambdaConfiguration', None)

        # Create lambdas for fetching the ARN and handling the resource creation/update/deletion
        lambdas_to_create = []

        for resource_type_name in definitions_src.keys():
            type_info = resource_type_info.ResourceTypeInfo(
                stack_arn, source_resource_name, resource_type_name,
                definitions_src[resource_type_name])
            function_infos = [
                type_info.arn_function, type_info.handler_function
            ]

            for function_info, field, tag, description in zip(
                    function_infos, _lambda_fields, _lambda_tags,
                    _lambda_descriptions):
                if function_info is None:
                    continue

                function_handler = function_info.get('Function', None)
                if function_handler is None:
                    raise RuntimeError(
                        "Definition for '%s' in type '%s' requires a 'Function' field with the handler "
                        "to execute." % (field, resource_type_name))

                # Create the role for the lambda(s) that will be servicing this resource type
                lambda_function_name = type_info.get_lambda_function_name(tag)
                role_name = role_utils.sanitize_role_name(lambda_function_name)
                role_path = "/%s/%s/" % (type_info.stack_name,
                                         type_info.source_resource_name)
                assume_role_policy_document = role_utils.get_assume_role_policy_document_for_service(
                    "lambda.amazonaws.com")

                try:
                    res = iam_client.create_role(
                        RoleName=role_name,
                        AssumeRolePolicyDocument=assume_role_policy_document,
                        Path=role_path)
                    role_arn = res['Role']['Arn']
                except ClientError as e:
                    if e.response["Error"]["Code"] != 'EntityAlreadyExists':
                        raise e
                    existing_roles.discard(role_name)
                    res = iam_client.get_role(RoleName=role_name)
                    role_arn = res['Role']['Arn']

                # Copy the base policy for the role and add any permissions that are specified by the type
                role_policy = copy.deepcopy(_lambda_base_policy)
                role_policy['Statement'].extend(
                    function_info.get('PolicyStatement', []))
                iam_client.put_role_policy(
                    RoleName=role_name,
                    PolicyName=_inline_policy_name,
                    PolicyDocument=json.dumps(role_policy))

                # Record this role and the type_info so we can create a lambda for it
                lambda_roles.append(role_name)
                lambdas_to_create.append({
                    'role_arn': role_arn,
                    'type_info': type_info,
                    'lambda_function_name': lambda_function_name,
                    'handler': "resource_types." + function_handler,
                    'description': description
                })

        # We create the lambdas in a separate pass because role-propagation to lambda takes a while, and we don't want
        # to have to delay multiple times for each role/lambda pair
        #
        # TODO: Replace delay (and all other instances of role/lambda creation) with exponential backoff
        time.sleep(role_utils.PROPAGATION_DELAY_SECONDS)

        for info in lambdas_to_create:
            # Create the lambda function
            arn = _create_or_update_lambda_function(
                lambda_client=lambda_client,
                timeout=props.LambdaTimeout,
                lambda_config_src=lambda_config_src,
                info=info,
                existing_lambdas=existing_lambdas)
            lambda_arns.append(arn)

        # For Update operations, delete any lambdas and roles that previously existed and now no longer do.
        _delete_resources(existing_lambdas, existing_roles, lambda_client)

    physical_resource_id = "-".join(path_components[1:])
    config_info = {
        'StackId': stack_arn,
        'Id': physical_resource_id,
        'Lambdas': lambda_arns,
        'Roles': lambda_roles,
        'Definitions': definitions_src
    }
    data = {
        'ConfigBucket': configuration_bucket,
        'ConfigKey': resource_file_key
    }

    # Copy the resource definitions to the configuration bucket.
    s3_client.put_object(Bucket=configuration_bucket,
                         Key=resource_file_key,
                         Body=json.dumps(config_info))
    custom_resource_response.succeed(event, context, data,
                                     physical_resource_id)