def test_succeed(self): event = { 'StackId': 'test-stack-id', 'RequestId': 'test-request-id', 'LogicalResourceId': 'test-logical-resource-id', 'ResponseURL': 'https://test-host/test-path/test-path?test-arg=test-value' } context = { } data = { 'test-data-key': 'test-data-value' } physical_resource_id = 'test-physical-resoruce-id' with mock.patch('httplib.HTTPSConnection') as mock_HTTPSConnection: mock_connection = mock_HTTPSConnection.return_value mock_getresponse = mock_connection.getresponse mock_response = mock.MagicMock() mock_response.status = httplib.OK mock_getresponse.return_value = mock_response mock_request = mock_connection.request custom_resource_response.succeed(event, context, data, physical_resource_id) mock_HTTPSConnection.assert_called_once_with('test-host') mock_request.assert_called_with('PUT', '/test-path/test-path?test-arg=test-value', '{"Status": "SUCCESS", "StackId": "test-stack-id", "PhysicalResourceId": "test-physical-resoruce-id", "RequestId": "test-request-id", "Data": {"test-data-key": "test-data-value"}, "LogicalResourceId": "test-logical-resource-id"}')
def handler(event, context): props = properties.load(event, PROPERTIES_SCHEMA) request_type = event['RequestType'] stack_arn = event['StackId'] logical_role_name = props.FunctionName stack_manager = stack_info.StackInfoManager() id_data = aws_utils.get_data_from_custom_physical_resource_id( event.get('PhysicalResourceId', None)) if request_type == 'Delete': role_utils.delete_access_control_role(id_data, logical_role_name) response_data = {} else: stack = stack_manager.get_stack_info(stack_arn) if request_type == 'Create': project_service_lambda_arn = _get_project_service_lambda_arn(stack) assume_role_service = 'lambda.amazonaws.com' role_arn = role_utils.create_access_control_role( stack_manager, id_data, stack_arn, logical_role_name, assume_role_service, default_policy=get_default_policy(project_service_lambda_arn)) elif request_type == 'Update': role_arn = role_utils.get_access_control_role_arn( id_data, logical_role_name) else: raise RuntimeError( 'Unexpected request type: {}'.format(request_type)) _add_built_in_settings(props.Settings.__dict__, stack) # Check if we have a folder just for this function, if not use the default output_key = input_key = _get_input_key(props) if not props.IgnoreAppendingSettingsToZip: output_key = _inject_settings(props.Settings.__dict__, props.Runtime, props.ConfigurationBucket, input_key, props.FunctionName) cc_settings = copy.deepcopy(props.Settings.__dict__) # Remove "Services" from settings because they get injected into the python code package during _inject_settings # TODO: move handling of project-level service interfaces to the same code as cross-gem interfaces if "Services" in cc_settings: del cc_settings["Services"] response_data = { 'ConfigurationBucket': props.ConfigurationBucket, 'ConfigurationKey': output_key, 'Runtime': props.Runtime, 'Role': role_arn, 'RoleName': role_utils.get_access_control_role_name(stack_arn, logical_role_name), 'ComposedLambdaConfiguration': { 'Code': { 'S3Bucket': props.ConfigurationBucket, 'S3Key': output_key }, "Environment": { "Variables": cc_settings }, 'Role': role_arn, 'Runtime': props.Runtime }, "CCSettings": cc_settings } physical_resource_id = aws_utils.construct_custom_physical_resource_id_with_data( stack_arn, event['LogicalResourceId'], id_data) custom_resource_response.succeed(event, context, response_data, physical_resource_id)
def handler(event, context): event_type = event['RequestType'] stack_arn = event['StackId'] stack_manager = stack_info.StackInfoManager() stack = stack_manager.get_stack_info(stack_arn) if not stack.is_project_stack: raise RuntimeError("Resource Types can only be defined in the project stack.") configuration_bucket = stack.project_stack.configuration_bucket source_resource_name = event['LogicalResourceId'] props = properties.load(event, _schema) definitions_src = event['ResourceProperties']['Definitions'] lambda_client = _create_lambda_client(stack_arn) created_or_updated_lambdas = {} lambda_roles = [] # Set up tags for all resources created, must be project stack # Note: IAM takes an array of [ {'Key':, 'Value':}] format, Lambda take a dict of {string: string} pairs iam_tags = [ {'Key': constant.PROJECT_NAME_TAG, 'Value': stack.stack_name}, {'Key': constant.STACK_ID_TAG, 'Value': stack_arn} ] lambda_tags = {constant.PROJECT_NAME_TAG: stack.stack_name, constant.STACK_ID_TAG: stack_arn} # Build the file key as "<root directory>/<project stack>/<deployment stack>/<resource_stack>/<resource_name>.json" path_components = [x.stack_name for x in stack.ancestry] path_components.insert(0, constant.RESOURCE_DEFINITIONS_PATH) path_components.append(source_resource_name + ".json") resource_file_key = aws_utils.s3_key_join(*path_components) path_info = resource_type_info.ResourceTypesPathInfo(resource_file_key) # Load information from the JSON file if it exists. # (It will exist on a Create event if the resource was previously deleted and recreated.) try: contents = s3_client.get_object(Bucket=configuration_bucket, Key=resource_file_key)['Body'].read() existing_info = json.loads(contents) definitions_dictionary = existing_info['Definitions'] existing_lambdas = existing_info['Lambdas'] if isinstance(existing_lambdas, dict): lambda_dictionary = existing_lambdas else: # Backwards compatibility lambda_dictionary = {} existing_lambdas = set([x.split(":")[6] for x in existing_lambdas]) # Convert arn to function name except ClientError as e: error_code = e.response['Error']['Code'] if error_code == 'NoSuchKey': definitions_dictionary = {} existing_lambdas = {} lambda_dictionary = {} else: raise e # Process the actual event if event_type == 'Delete': deleted_entries = set(definitions_dictionary.keys()) else: definitions = props.Definitions lambda_config_src = event['ResourceProperties'].get('LambdaConfiguration', None) # Create lambdas for fetching the ARN and handling the resource creation/update/deletion lambdas_to_create = [] for resource_type_name in definitions_src.keys(): type_info = resource_type_info.ResourceTypeInfo( stack_arn, source_resource_name, resource_type_name, lambda_dictionary, False, definitions_src[resource_type_name]) function_infos = [type_info.arn_function, type_info.handler_function] for function_info, field, tag, description in zip(function_infos, _lambda_fields, _lambda_tags, _lambda_descriptions): if function_info is None: continue function_handler = function_info.get('Function', None) if function_handler is None: raise RuntimeError("Definition for '%s' in type '%s' requires a 'Function' field with the handler " "to execute." % (field, resource_type_name)) # Create the role for the lambda(s) that will be servicing this resource type lambda_function_name = type_info.get_lambda_function_name(tag) role_name = role_utils.sanitize_role_name(lambda_function_name) role_path = "/%s/%s/" % (type_info.stack_name, type_info.source_resource_name) assume_role_policy_document = role_utils.get_assume_role_policy_document_for_service("lambda.amazonaws.com") try: res = iam_client.create_role( RoleName=role_name, AssumeRolePolicyDocument=assume_role_policy_document, Path=role_path, Tags=iam_tags) role_arn = res['Role']['Arn'] except ClientError as e: if e.response["Error"]["Code"] != 'EntityAlreadyExists': raise e res = iam_client.get_role(RoleName=role_name) role_arn = res['Role']['Arn'] # Copy the base policy for the role and add any permissions that are specified by the type role_policy = copy.deepcopy(_create_base_lambda_policy()) role_policy['Statement'].extend(function_info.get('PolicyStatement', [])) iam_client.put_role_policy(RoleName=role_name, PolicyName=_inline_policy_name, PolicyDocument=json.dumps(role_policy)) # Record this role and the type_info so we can create a lambda for it lambda_roles.append(role_name) lambda_info = { 'role_arn': role_arn, 'type_info': type_info, 'lambda_function_name': lambda_function_name, 'handler': "resource_types." + function_handler, 'description': description, 'tags': lambda_tags } # Merge in any lambda specific configs overrides if 'HandlerFunctionConfiguration' in function_info: lambda_override = function_info['HandlerFunctionConfiguration'] if lambda_override: print("Found LambdaConfiguration override {}".format(lambda_override)) lambda_info['lambda_config_overrides'] = lambda_override lambdas_to_create.append(lambda_info) # We create the lambdas in a separate pass because role-propagation to lambda takes a while, and we don't want # to have to delay multiple times for each role/lambda pair # # TODO: Replace delay (and all other instances of role/lambda creation) with exponential backoff time.sleep(role_utils.PROPAGATION_DELAY_SECONDS) for info in lambdas_to_create: # Create the lambda function arn, version = _create_or_update_lambda_function( lambda_client=lambda_client, timeout=props.LambdaTimeout, lambda_config_src=lambda_config_src, info=info, existing_lambdas=existing_lambdas ) created_or_updated_lambdas[info['lambda_function_name']] = {'arn': arn, 'v': version} # Finally add/update a role policy to give least privileges to the Lambdas to log events policy_document = _generate_lambda_log_event_policy(arn) iam_client.put_role_policy(RoleName=aws_utils.get_role_name_from_role_arn(info['role_arn']), PolicyDocument=json.dumps(policy_document), PolicyName='LambdaLoggingEventsPolicy') deleted_entries = set(definitions_dictionary.keys()) - set(definitions_src.keys()) physical_resource_id = "-".join(path_components[1:]) lambda_dictionary.update(created_or_updated_lambdas) definitions_dictionary.update(definitions_src) config_info = { 'StackId': stack_arn, 'Id': physical_resource_id, 'Lambdas': lambda_dictionary, 'Definitions': definitions_dictionary, 'Deleted': list(deleted_entries) } data = { 'ConfigBucket': configuration_bucket, 'ConfigKey': resource_file_key } # Copy the resource definitions to the configuration bucket. s3_client.put_object(Bucket=configuration_bucket, Key=resource_file_key, Body=json.dumps(config_info, indent=2)) custom_resource_response.succeed(event, context, data, physical_resource_id)
def handler(event, context): """Main handler for custom resources, wired in via project-template.json as the ProjectResourceHandler""" try: print('Dispatching event {} with context {}.'.format( json.dumps(event, cls=json_utils.SafeEncoder), context)) resource_type = event.get('ResourceType', None) if resource_type is None: raise RuntimeError('No ResourceType specified.') if resource_type in _LOCAL_CUSTOM_RESOURCE_WHITELIST: # Old method for supporting custom resource code directly within the ProjectResourceHandler. # Should only be used for legacy types in the ProjectResourceHandler. module_name = resource_type.replace('Custom::', '') + 'ResourceHandler' module = sys.modules.get(module_name, None) if module is None: # First check for handler module in same directory as this module, # if not found, check for module in the resource group provided # directories. module_file_name = module_name + '.py' module_file_path = os.path.join(os.path.dirname(__file__), module_file_name) if os.path.isfile(module_file_path): module = module_utils.load_module( module_name, os.path.dirname(module_file_path)) elif os.path.isdir(PLUGIN_DIRECTORY_PATH): plugin_directory_names = [ item for item in os.listdir(PLUGIN_DIRECTORY_PATH) if os.path.isdir( os.path.join(PLUGIN_DIRECTORY_PATH, item)) ] for plugin_directory_name in plugin_directory_names: module_file_path = os.path.join( PLUGIN_DIRECTORY_PATH, plugin_directory_name, module_file_name) if os.path.isfile(module_file_path): module = module_utils.load_module( module_name, os.path.dirname(module_file_path)) break if module is not None: if not hasattr(module, 'handler'): raise RuntimeError( 'No handler function found for the {} resource type.'. format(resource_type)) print('Using {}'.format(module)) module.handler(event, context) else: # New way of instantiating custom resources. Load the dictionary of resource types. stack = stack_info.StackInfoManager().get_stack_info( event['StackId']) type_definition = stack.resource_definitions.get( resource_type, None) if type_definition is None: raise RuntimeError( 'No type definition found for the {} resource type.'. format(resource_type)) if type_definition.handler_function is None: raise RuntimeError( 'No handler function defined for custom resource type {}.'. format(resource_type)) request_type = event['RequestType'] if type_definition.deleted and request_type == "Create": raise RuntimeError( 'Attempting to Create a new resource of deleted type {}.'. format(resource_type)) create_version = type_definition.handler_function_version logical_id = event['LogicalResourceId'] embedded_physical_id = None # Access control can take over 60s so set custom timeouts config_dict = { 'region_name': stack.region, 'connect_timeout': LAMBDA_CONNECTION_TIMEOUT, 'read_timeout': LAMBDA_READ_TIMEOUT } lambda_client_config = Config(**config_dict) lambda_client = aws_utils.ClientWrapper( boto3.client("lambda", config=lambda_client_config)) cf_client = aws_utils.ClientWrapper( boto3.client("cloudformation", stack.region)) if request_type != "Create": physical_id = event['PhysicalResourceId'] embedded_physical_id = physical_id try: existing_resource_info = json.loads(physical_id) embedded_physical_id = existing_resource_info['id'] create_version = existing_resource_info['v'] except (ValueError, TypeError, KeyError): # Backwards compatibility with resources created prior to versioning support create_version = None run_version = create_version # Check the metadata on the resource to see if we're coercing to a different version resource_info = cf_client.describe_stack_resource( StackName=event['StackId'], LogicalResourceId=logical_id) metadata = aws_utils.get_cloud_canvas_metadata( resource_info['StackResourceDetail'], custom_resource_utils.METADATA_VERSION_TAG) if metadata: run_version = metadata if request_type == "Create": create_version = metadata # Configure our invocation, and invoke the handler lambda lambda_data = {'Handler': type_definition.handler_function} lambda_data.update(event) invoke_params = { 'FunctionName': type_definition.get_custom_resource_lambda_function_name(), 'Payload': json.dumps(lambda_data) } if run_version: invoke_params['Qualifier'] = run_version response = lambda_client.invoke(**invoke_params) if response['StatusCode'] == 200: response_data = json.loads(response['Payload'].read().decode()) response_success = response_data.get('Success', None) if response_success is not None: if response_success: if create_version: if request_type == "Update" and response_data[ 'PhysicalResourceId'] != embedded_physical_id: # Physical ID changed during an update, which is *technically* illegal according to the # docs, but we allow it because CloudFormation doesn't act to prevent it. print( _UPDATE_CHANGED_PHYSICAL_ID_WARNING.format( logical_id, embedded_physical_id, response_data['PhysicalResourceId'])) out_resource_id = json.dumps({ 'id': response_data['PhysicalResourceId'], 'v': create_version }) else: # Backwards compatibility with resources created prior to versioning support out_resource_id = response_data[ 'PhysicalResourceId'] custom_resource_response.succeed( event, context, response_data['Data'], out_resource_id) else: custom_resource_response.fail(event, context, response_data['Reason']) else: raise RuntimeError( "Handler lambda for resource type '%s' returned a malformed response: %s" % (resource_type, response_data)) else: raise RuntimeError( "Handler lambda for resource type '%s' failed to execute, returned HTTP status %d" % (resource_type, response['StatusCode'])) except ValidationError as e: custom_resource_response.fail(event, context, str(e)) except Exception as e: print( 'Unexpected error occurred when processing event {} with context {}. {}' .format(event, context, traceback.format_exc())) custom_resource_response.fail( event, context, 'Unexpected {} error occurred: {}. Additional details can be found in the CloudWatch log group {} stream {}' .format( type(e).__name__, str(e), context.log_group_name, context.log_stream_name))
def handler(event, context): props = properties.load(event, PROPERTIES_SCHEMA) request_type = event['RequestType'] stack_arn = event['StackId'] logical_role_name = props.FunctionName stack_manager = stack_info.StackInfoManager() id_data = aws_utils.get_data_from_custom_physical_resource_id(event.get('PhysicalResourceId', None)) if request_type == 'Delete': role_utils.delete_access_control_role( id_data, logical_role_name) response_data = {} else: stack = stack_manager.get_stack_info(stack_arn) if request_type == 'Create': project_service_lambda_arn = _get_project_service_lambda_arn(stack) assume_role_service = 'lambda.amazonaws.com' role_arn = role_utils.create_access_control_role( stack_manager, id_data, stack_arn, logical_role_name, assume_role_service, default_policy = get_default_policy(project_service_lambda_arn)) elif request_type == 'Update': role_arn = role_utils.get_access_control_role_arn( id_data, logical_role_name) else: raise RuntimeError('Unexpected request type: {}'.format(request_type)) _add_built_in_settings(props.Settings.__dict__, stack) # give access to project level ServiceDirectory APIs # Other deployment-level APIs are handled in InterfaceDependeny resolver custom resource type permitted_arns = _add_services_settings(stack, props.Settings.__dict__, props.Services) _add_service_access_policy_to_role(role_arn, permitted_arns) # Check if we have a folder just for this function, if not use the default output_key = input_key = _get_input_key(props) if not props.IgnoreAppendingSettingsToZip: output_key = _inject_settings(props.Settings.__dict__, props.Runtime, props.ConfigurationBucket, input_key, props.FunctionName) response_data = { 'ConfigurationBucket': props.ConfigurationBucket, 'ConfigurationKey': output_key, 'Runtime': props.Runtime, 'Role': role_arn, 'RoleName': role_utils.get_access_control_role_name(stack_arn, logical_role_name), 'ComposedLambdaConfiguration': { 'Code': { 'S3Bucket': props.ConfigurationBucket, 'S3Key': output_key }, 'Role': role_arn, 'Runtime': props.Runtime } } physical_resource_id = aws_utils.construct_custom_physical_resource_id_with_data(stack_arn, event['LogicalResourceId'], id_data) custom_resource_response.succeed(event, context, response_data, physical_resource_id)
def handler(event, context): try: print 'Dispatching event {} with context {}.'.format( json.dumps(event, cls=json_utils.SafeEncoder), context) resource_type = event.get('ResourceType', None) if resource_type is None: raise RuntimeError('No ResourceType specified.') if resource_type in _LOCAL_CUSTOM_RESOURCE_WHITELIST: # Old method for supporting custom resource code directly within the ProjectResourceHandler. # Should only be used for legacy types in the ProjectResourceHandler. module_name = resource_type.replace('Custom::', '') + 'ResourceHandler' module = sys.modules.get(module_name, None) if module is None: # First check for handler module in same directory as this module, # if not found, check for module in the resource group provided # directories. module_file_name = module_name + '.py' module_file_path = os.path.join(os.path.dirname(__file__), module_file_name) if os.path.isfile(module_file_path): module = module_utils.load_module( module_name, os.path.dirname(module_file_path)) elif os.path.isdir(PLUGIN_DIRECTORY_PATH): plugin_directory_names = [ item for item in os.listdir(PLUGIN_DIRECTORY_PATH) if os.path.isdir( os.path.join(PLUGIN_DIRECTORY_PATH, item)) ] for plugin_directory_name in plugin_directory_names: module_file_path = os.path.join( PLUGIN_DIRECTORY_PATH, plugin_directory_name, module_file_name) if os.path.isfile(module_file_path): module = module_utils.load_module( module_name, os.path.dirname(module_file_path)) break if module is not None: if not hasattr(module, 'handler'): raise RuntimeError( 'No handler function found for the {} resource type.'. format(resource_type)) print 'Using {}'.format(module) module.handler(event, context) else: # New way of instantiating custom resources. Load the dictionary of resource types. stack = stack_info.StackInfoManager().get_stack_info( event['StackId']) type_definition = stack.resource_definitions.get( resource_type, None) if type_definition is None: raise RuntimeError( 'No type definition found for the {} resource type.'. format(resource_type)) if type_definition.handler_function is None: raise RuntimeError( 'No handler function defined for custom resource type {}.'. format(resource_type)) lambda_client = aws_utils.ClientWrapper( boto3.client("lambda", stack.region)) lambda_data = {'Handler': type_definition.handler_function} lambda_data.update(event) response = lambda_client.invoke( FunctionName=type_definition. get_custom_resource_lambda_function_name(), Payload=json.dumps(lambda_data)) if response['StatusCode'] == 200: response_data = json.loads(response['Payload'].read().decode()) response_success = response_data.get('Success', None) if response_success is not None: if response_success: custom_resource_response.succeed( event, context, response_data['Data'], response_data['PhysicalResourceId']) else: custom_resource_response.fail(event, context, response_data['Reason']) else: raise RuntimeError( "Handler lambda for resource type '%s' returned a malformed response: %s" % (resource_type, response_data)) else: raise RuntimeError( "Handler lambda for resource type '%s' failed to execute, returned HTTP status %d" % (resource_type, response['StatusCode'])) except ValidationError as e: custom_resource_response.fail(event, context, str(e)) except Exception as e: print 'Unexpected error occured when processing event {} with context {}. {}'.format( event, context, traceback.format_exc()) custom_resource_response.fail( event, context, 'Unexpected {} error occured: {}. Additional details can be found in the CloudWatch log group {} stream {}' .format( type(e).__name__, e.message, context.log_group_name, context.log_stream_name))
def handler(event, context): event_type = event['RequestType'] stack_arn = event['StackId'] stack_manager = stack_info.StackInfoManager() stack = stack_manager.get_stack_info(stack_arn) if not stack.is_project_stack: raise RuntimeError( "Resource Types can only be defined in the project stack.") configuration_bucket = stack.project_stack.configuration_bucket source_resource_name = event['LogicalResourceId'] props = properties.load(event, _schema) definitions_src = event['ResourceProperties']['Definitions'] lambda_client = aws_utils.ClientWrapper( boto3.client("lambda", aws_utils.get_region_from_stack_arn(stack_arn))) lambda_arns = [] lambda_roles = [] # Build the file key as "<root directory>/<project stack>/<deployment stack>/<resource_stack>/<resource_name>.json" path_components = [x.stack_name for x in stack.ancestry] path_components.insert(0, constant.RESOURCE_DEFINITIONS_PATH) path_components.append(source_resource_name + ".json") resource_file_key = aws_utils.s3_key_join(*path_components) path_info = resource_type_info.ResourceTypesPathInfo(resource_file_key) # Load information from the JSON file if it exists if event_type != 'Create': contents = s3_client.get_object(Bucket=configuration_bucket, Key=resource_file_key)['Body'].read() existing_info = json.loads(contents) else: existing_info = None # Process the actual event if event_type == 'Delete': _delete_resources(existing_info['Lambdas'], existing_info['Roles'], lambda_client) custom_resource_response.succeed(event, context, {}, existing_info['Id']) else: existing_roles = set() existing_lambdas = set() if event_type == 'Update': existing_roles = set( [arn.split(":")[-1] for arn in existing_info['Roles']]) existing_lambdas = set( [arn.split(":")[-1] for arn in existing_info['Lambdas']]) definitions = props.Definitions lambda_config_src = event['ResourceProperties'].get( 'LambdaConfiguration', None) # Create lambdas for fetching the ARN and handling the resource creation/update/deletion lambdas_to_create = [] for resource_type_name in definitions_src.keys(): type_info = resource_type_info.ResourceTypeInfo( stack_arn, source_resource_name, resource_type_name, definitions_src[resource_type_name]) function_infos = [ type_info.arn_function, type_info.handler_function ] for function_info, field, tag, description in zip( function_infos, _lambda_fields, _lambda_tags, _lambda_descriptions): if function_info is None: continue function_handler = function_info.get('Function', None) if function_handler is None: raise RuntimeError( "Definition for '%s' in type '%s' requires a 'Function' field with the handler " "to execute." % (field, resource_type_name)) # Create the role for the lambda(s) that will be servicing this resource type lambda_function_name = type_info.get_lambda_function_name(tag) role_name = role_utils.sanitize_role_name(lambda_function_name) role_path = "/%s/%s/" % (type_info.stack_name, type_info.source_resource_name) assume_role_policy_document = role_utils.get_assume_role_policy_document_for_service( "lambda.amazonaws.com") try: res = iam_client.create_role( RoleName=role_name, AssumeRolePolicyDocument=assume_role_policy_document, Path=role_path) role_arn = res['Role']['Arn'] except ClientError as e: if e.response["Error"]["Code"] != 'EntityAlreadyExists': raise e existing_roles.discard(role_name) res = iam_client.get_role(RoleName=role_name) role_arn = res['Role']['Arn'] # Copy the base policy for the role and add any permissions that are specified by the type role_policy = copy.deepcopy(_lambda_base_policy) role_policy['Statement'].extend( function_info.get('PolicyStatement', [])) iam_client.put_role_policy( RoleName=role_name, PolicyName=_inline_policy_name, PolicyDocument=json.dumps(role_policy)) # Record this role and the type_info so we can create a lambda for it lambda_roles.append(role_name) lambdas_to_create.append({ 'role_arn': role_arn, 'type_info': type_info, 'lambda_function_name': lambda_function_name, 'handler': "resource_types." + function_handler, 'description': description }) # We create the lambdas in a separate pass because role-propagation to lambda takes a while, and we don't want # to have to delay multiple times for each role/lambda pair # # TODO: Replace delay (and all other instances of role/lambda creation) with exponential backoff time.sleep(role_utils.PROPAGATION_DELAY_SECONDS) for info in lambdas_to_create: # Create the lambda function arn = _create_or_update_lambda_function( lambda_client=lambda_client, timeout=props.LambdaTimeout, lambda_config_src=lambda_config_src, info=info, existing_lambdas=existing_lambdas) lambda_arns.append(arn) # For Update operations, delete any lambdas and roles that previously existed and now no longer do. _delete_resources(existing_lambdas, existing_roles, lambda_client) physical_resource_id = "-".join(path_components[1:]) config_info = { 'StackId': stack_arn, 'Id': physical_resource_id, 'Lambdas': lambda_arns, 'Roles': lambda_roles, 'Definitions': definitions_src } data = { 'ConfigBucket': configuration_bucket, 'ConfigKey': resource_file_key } # Copy the resource definitions to the configuration bucket. s3_client.put_object(Bucket=configuration_bucket, Key=resource_file_key, Body=json.dumps(config_info)) custom_resource_response.succeed(event, context, data, physical_resource_id)