def tag_table(client, create_response, event): stack_manager = stack_info.StackInfoManager() stack = stack_manager.get_stack_info(event['StackId']) if stack.stack_type == stack_info.StackInfo.STACK_TYPE_RESOURCE_GROUP: deployment_name = stack.deployment.deployment_name resource_group_name = stack.resource_group_name tags = [{ 'Key': 'Deployment', 'Value': deployment_name }, { 'Key': 'Gem', 'Value': resource_group_name }] if not tags: print "Table is not part of a resource group stack, aborting tagging operation" return table_arn = create_response.get("TableDescription", {}).get("TableArn", "") if not table_arn: print "Table name was not in the response object, cannot tag resource" return table_name = create_response.get("TableDescription", {}).get("TableName", "") wait_for_idle_table(client, table_name) client.tag_resource(ResourceArn=table_arn, Tags=tags)
def handler(event, context): if event['RequestType'] == 'Delete': return custom_resource_response.success_response({'Arn': ''}, '') props = properties.load(event, { 'ConfigurationBucket': properties.String(), 'ConfigurationKey': properties.String(), 'LogicalPoolName': properties.String(), 'RoleType': properties.String(default=""), 'Path': properties.String(), 'AssumeRolePolicyDocument': properties.Dictionary() }) stack_manager = stack_info.StackInfoManager() stack = stack_manager.get_stack_info(event['StackId']) identity_client = identity_pool.get_identity_client() cognito_pool_info = aws_utils.get_cognito_pool_from_file(props.ConfigurationBucket, props.ConfigurationKey, props.LogicalPoolName, stack) arn = '' if cognito_pool_info: response = identity_client.get_identity_pool_roles(IdentityPoolId=cognito_pool_info['PhysicalResourceId']) arn = response.get("Roles", {}).get(props.RoleType, "") else: name = "{}{}Role".format(stack.stack_name, event['LogicalResourceId']) arn=create_role(name, props) return custom_resource_response.success_response({'Arn': arn}, arn)
def _generate_table_tags(stack_id, deployment_only=False): """Generate tags for project, deployment and gem""" stack_manager = stack_info.StackInfoManager() stack = stack_manager.get_stack_info(stack_id) tags = None if stack.stack_type == stack_info.StackInfo.STACK_TYPE_RESOURCE_GROUP: deployment_name = stack.deployment.deployment_name resource_group_name = stack.resource_group_name tags = [ { 'Key': constant.DEPLOYMENT_TAG, 'Value': deployment_name }, { 'Key': constant.DEPLOYMENT_GEM_TAG, 'Value': resource_group_name } ] # Maintain legacy behavior of only generating deployment tags if deployment_only and tags is None: return tags tags = tags + [ {"Key": constant.PROJECT_NAME_TAG, "Value": stack.project_stack.project_name}, {"Key": constant.STACK_ID_TAG, "Value": stack_id} ] return tags
def handler(event, context): request_type = event['RequestType'] stack_arn = event['StackId'] physical_resource_id = aws_utils.get_stack_name_from_stack_arn(stack_arn) + '-' + event['LogicalResourceId'] data = {} stack_manager = stack_info.StackInfoManager() stack = stack_manager.get_stack_info(stack_arn) if request_type == 'Delete': _clear_interface_refs(stack) return custom_resource_response.success_response(data, physical_resource_id) if not stack.is_deployment_stack: raise RuntimeError("InterfaceDependecyResolver can only be stood up on a deployment stack") resource_groups = stack.resource_groups configuration_bucket_name = stack.project.configuration_bucket if not configuration_bucket_name: raise RuntimeError('Not adding service settings because there is no project configuration bucket.') service_directory = ServiceDirectory(configuration_bucket_name) interface_deps = event["ResourceProperties"].get("InterfaceDependencies", {}) _clear_interface_refs(stack) for gem, interface_list in interface_deps.iteritems(): for interface in interface_list: print "getting url for interface {} from gem {} to use in {}:{}".format(interface["id"], interface["gem"], gem, interface["function"]) interfaces = service_directory.get_interface_services(stack.deployment_name, interface["id"]) if len(interfaces) > 0: _add_url_to_lambda(interfaces[0], gem, interface["function"], stack) else: print "Failed to lookup interface {}".format(len(interfaces)) return custom_resource_response.success_response(data, physical_resource_id)
def arn_handler(event, context): stack = stack_info.StackInfoManager().get_stack_info(event['StackId']) reference_name = aws_utils.get_data_from_custom_physical_resource_id( event['ResourceName']).get('ReferenceName') result = {'Arn': _get_reference_arn(stack, reference_name)} return result
def __set_stack_attributes(stack_arn, use_cache=True): global project_name, deployment_name, resource_group_name if not (use_cache and project_name and deployment_name and resource_group_name): stack_manager = stack_info.StackInfoManager() stack = stack_manager.get_stack_info(stack_arn) deployment_name = stack.deployment.deployment_name project_name = stack.deployment.parent_stack.project_name resource_group_name = c.RES_GEM_NAME return project_name, deployment_name, resource_group_name
def handler(event, context): '''Entry point for the Custom::AccessControl resource handler.''' # Validate RequestType request_type = event['RequestType'] if request_type not in ['Create', 'Update', 'Delete']: raise RuntimeError('Unexpected request type: {}'.format(request_type)) # Get stack_info for the AccessControl resource's stack. stack_arn = event['StackId'] stack_manager = stack_info.StackInfoManager() stack = stack_manager.get_stack_info(stack_arn) # Physical ID is always the same. physical_resource_id = aws_utils.get_stack_name_from_stack_arn( stack_arn) + '-' + event['LogicalResourceId'] # The AccessControl resource has no output values. data = {} # Accumlate problems encountered so we can give a full report. problems = ProblemList() # Apply access control as determined by the Cloud Canvas stack type. if stack.stack_type == stack.STACK_TYPE_RESOURCE_GROUP: were_changes = _apply_resource_group_access_control( request_type, stack, problems) elif stack.stack_type == stack.STACK_TYPE_DEPLOYMENT_ACCESS: were_changes = _apply_deployment_access_control( request_type, stack, event['ResourceProperties']['Gem'], problems) elif stack.stack_type == stack.STACK_TYPE_PROJECT: were_changes = _apply_project_access_control(request_type, stack, problems) else: raise RuntimeError( 'The Custom::AccessControl resource can only be used in resource group, deployment access, or project stack templates.' ) # If there were any problems, provide an error message with all the details. if problems: raise RuntimeError( 'Found invalid AccessControl metadata:\n {}'.format(problems)) # If there were changes, wait a few seconds for them to propagate if were_changes: print 'Delaying {} seconds for change propagation'.format( PROPAGATION_DELAY_SECONDS) time.sleep(PROPAGATION_DELAY_SECONDS) # Successful execution. return custom_resource_response.success_response(data, physical_resource_id)
def handler(event, context): request_type = event['RequestType'] stack_arn = event['StackId'] physical_resource_id = aws_utils.get_stack_name_from_stack_arn(stack_arn) + '-' + event['LogicalResourceId'] data = {} stack_manager = stack_info.StackInfoManager() stack = stack_manager.get_stack_info(stack_arn) if request_type == 'Delete': _clear_interface_refs(stack) return custom_resource_response.success_response(data, physical_resource_id) if not stack.is_deployment_stack: raise RuntimeError("InterfaceDependencyResolver can only be stood up on a deployment stack") configuration_bucket_name = stack.project.configuration_bucket if not configuration_bucket_name: raise RuntimeError('Not adding service settings because there is no project configuration bucket.') service_directory = ServiceDirectory(configuration_bucket_name) interface_deps = event["ResourceProperties"].get("InterfaceDependencies", {}) # start by clearing the refs this function and role have to make sure no old permissions/interfaces linger _clear_interface_refs(stack) for gem, interface_list in iteritems(interface_deps): gem_function_info = {} for interface in interface_list: if not interface['function'] in gem_function_info: gem_function_info[interface['function']] = {"interfaces": []} interface_function_info = {"id": interface['id'], "gem": interface.get("gem", "CloudGemFramework")} if interface_function_info["gem"] == "CloudGemFramework": interface_function_info["url"] = _get_project_url( service_directory, interface['id']) interface_function_info["permittedArns"] = _get_permitted_arns( _get_resource_group(gem, stack), _get_project_interface_description( service_directory, interface["id"]) ) else: interface_function_info["url"] = _get_url( service_directory, stack, interface['id']) interface_function_info["permittedArns"] = _get_permitted_arns( _get_resource_group(gem, stack), _get_interface_description(service_directory, stack, interface["id"]) ) gem_function_info[interface['function']]["interfaces"].append( interface_function_info) _put_gem_function_info(gem, gem_function_info, stack) return custom_resource_response.success_response(data, physical_resource_id)
def get_deployment_access_resource_info(request, deployment_name, resource_name): project_stack_arn = CloudCanvas.get_setting('ProjectStackArn') stack_info_manager = stack_info.StackInfoManager() project = stack_info.ProjectInfo(stack_info_manager, project_stack_arn) for deployment in project.deployments: if deployment.deployment_name == deployment_name: physical_id = deployment.deployment_access.resources.get_by_logical_id( resource_name).physical_id if physical_id: return {'PhysicalId': physical_id} raise errors.NotFoundError( 'Resource {} not found.'.format(resource_name)) raise errors.NotFoundError( 'Deployment {} not found'.format(deployment_name))
def handler(event, context): """Entry point for the Custom::CognitoIdPoolSharedRole resource handler.""" stack_id = event['StackId'] if event['RequestType'] == 'Delete': return custom_resource_response.success_response({'Arn': ''}, '') props = properties.load( event, { 'ConfigurationBucket': properties.String(), 'ConfigurationKey': properties.String(), 'LogicalPoolName': properties.String(), 'RoleType': properties.String(default=""), 'Path': properties.String(), 'AssumeRolePolicyDocument': properties.Dictionary() }) stack_manager = stack_info.StackInfoManager() stack = stack_manager.get_stack_info(stack_id) identity_client = identity_pool.get_identity_client() cognito_pool_info = aws_utils.get_cognito_pool_from_file( props.ConfigurationBucket, props.ConfigurationKey, props.LogicalPoolName, stack) arn = '' if cognito_pool_info: response = identity_client.get_identity_pool_roles( IdentityPoolId=cognito_pool_info['PhysicalResourceId']) arn = response.get("Roles", {}).get(props.RoleType, "") else: # Set up resource tags for all resources created tags = [{ "Key": constant.PROJECT_NAME_TAG, "Value": stack.project_stack.project_name }, { "Key": constant.STACK_ID_TAG, "Value": stack_id }] name = "{}{}Role".format(stack.stack_name, event['LogicalResourceId']) arn = _create_role(name, props, tags) return custom_resource_response.success_response({'Arn': arn}, arn)
def handler(event, context): stack_arn = event['StackId'] stack = stack_info.StackInfoManager().get_stack_info(stack_arn) props = properties.load(event, {'ReferenceName': properties.String()}) request_type = event['RequestType'] if request_type not in ['Create', 'Update', 'Delete']: raise RuntimeError('Unexpected request type: {}'.format(request_type)) data = {} if request_type != 'Delete': data = { 'PhysicalId': _get_reference_physical_id(stack, props.ReferenceName) } physical_resource_id = aws_utils.construct_custom_physical_resource_id_with_data( stack_arn, event['LogicalResourceId'], {'ReferenceName': props.ReferenceName}) return custom_resource_response.success_response(data, physical_resource_id)
def handler(event, context): stack_arn = event['StackId'] stack = stack_info.StackInfoManager().get_stack_info(stack_arn) if not stack.is_project_stack: raise RuntimeError( "Custom::ExternalResourceInstance can only be defined in the project stack." ) request_type = event['RequestType'] if request_type not in ['Create', 'Update', 'Delete']: raise RuntimeError('Unexpected request type: {}'.format(request_type)) if request_type in ['Create', 'Update']: _create_reference_metadata(event, stack) else: _delete_reference_metadata(event['LogicalResourceId'], stack) physical_resource_id = aws_utils.get_stack_name_from_stack_arn( stack_arn) + '-' + event['LogicalResourceId'] return custom_resource_response.success_response({}, physical_resource_id)
def handler(event, context): # Validate RequestType request_type = event['RequestType'] if request_type not in ['Create', 'Update', 'Delete']: raise RuntimeError('Unexpected request type: {}'.format(request_type)) stack_arn = event['StackId'] stack_manager = stack_info.StackInfoManager() stack = stack_manager.get_stack_info(stack_arn) # Physical ID is always the same. physical_resource_id = aws_utils.get_stack_name_from_stack_arn(stack_arn) + '-' + event['LogicalResourceId'] if request_type == 'Delete': _delete_iot_policy(physical_resource_id) elif request_type == 'Update': _update_iot_policy(physical_resource_id, stack) elif request_type == 'Create': _create_iot_policy(physical_resource_id, stack) return custom_resource_response.success_response({}, physical_resource_id)
def handler(event, context): ''' Invoked when AWS Lambda service executes. ''' stack_manager = stack_info.StackInfoManager() access_stack_arn = event['StackId'] access_stack = stack_manager.get_stack_info(access_stack_arn) if not access_stack.resources: print( 'Skipping setting CloudCanvasIdentityPool: access stack not found.' ) else: pool = access_stack.resources.get_by_logical_id( 'PlayerAccessIdentityPool', 'Custom::CognitoIdentityPool', optional=True) custom_auth_flow_lambda = __get_resource( access_stack, event['ResourceProperties'].get('GemName', ''), 'CustomAuthFlowLambda', 'AWS::Lambda::Function') if not pool: print( 'Skipping setting CloudCanvasIdentityPool: PlayerAccessIdentityPool not found.' ) elif not custom_auth_flow_lambda: print( 'Skipping setting CloudCanvasIdentityPool: CustomAuthFlowLambda not found.' ) else: print('Adding setting CloudCanvasIdentityPool = {}'.format( pool.physical_id)) cloud_canvas_identity_pool_mapping = { 'CloudCanvasIdentityPool': pool.physical_id } __add_environment_variables(custom_auth_flow_lambda.physical_id, cloud_canvas_identity_pool_mapping) return custom_resource_response.success_response({}, '*')
def list_deployment_resources(request, deployment_name): project_stack_arn = CloudCanvas.get_setting('ProjectStackArn') stack_info_manager = stack_info.StackInfoManager() project = stack_info.ProjectInfo(stack_info_manager, project_stack_arn) for deployment in project.deployments: if deployment.deployment_name == deployment_name: resources = {} for resource_group in deployment.resource_groups: for resource in resource_group.resources: full_logical_id = '.'.join([ resource_group.resource_group_name, resource.logical_id ]) if resource.type == 'Custom::ServiceApi': resources[full_logical_id] = __get_service_api_mapping( resource_group, resource) else: resources[full_logical_id] = { 'PhysicalResourceId': resource.physical_id, 'ResourceType': resource.type } return {'Resources': resources} raise errors.NotFoundError( 'Deployment {} not found'.format(deployment_name))
def get_stack_resources(arn): stack_manager = stack_info.StackInfoManager() stack = stack_manager.get_stack_info(arn) #one common database name so that all deployments show in the same database under different tables return stack.resources
def __init__(self, context): self.__context = context self.__stack_info_manager = stack_info.StackInfoManager()
def handler(event, context): try: print 'Dispatching event {} with context {}.'.format( json.dumps(event, cls=json_utils.SafeEncoder), context) resource_type = event.get('ResourceType', None) if resource_type is None: raise RuntimeError('No ResourceType specified.') if resource_type in _LOCAL_CUSTOM_RESOURCE_WHITELIST: # Old method for supporting custom resource code directly within the ProjectResourceHandler. # Should only be used for legacy types in the ProjectResourceHandler. module_name = resource_type.replace('Custom::', '') + 'ResourceHandler' module = sys.modules.get(module_name, None) if module is None: # First check for handler module in same directory as this module, # if not found, check for module in the resource group provided # directories. module_file_name = module_name + '.py' module_file_path = os.path.join(os.path.dirname(__file__), module_file_name) if os.path.isfile(module_file_path): module = module_utils.load_module( module_name, os.path.dirname(module_file_path)) elif os.path.isdir(PLUGIN_DIRECTORY_PATH): plugin_directory_names = [ item for item in os.listdir(PLUGIN_DIRECTORY_PATH) if os.path.isdir( os.path.join(PLUGIN_DIRECTORY_PATH, item)) ] for plugin_directory_name in plugin_directory_names: module_file_path = os.path.join( PLUGIN_DIRECTORY_PATH, plugin_directory_name, module_file_name) if os.path.isfile(module_file_path): module = module_utils.load_module( module_name, os.path.dirname(module_file_path)) break if module is not None: if not hasattr(module, 'handler'): raise RuntimeError( 'No handler function found for the {} resource type.'. format(resource_type)) print 'Using {}'.format(module) module.handler(event, context) else: # New way of instantiating custom resources. Load the dictionary of resource types. stack = stack_info.StackInfoManager().get_stack_info( event['StackId']) type_definition = stack.resource_definitions.get( resource_type, None) if type_definition is None: raise RuntimeError( 'No type definition found for the {} resource type.'. format(resource_type)) if type_definition.handler_function is None: raise RuntimeError( 'No handler function defined for custom resource type {}.'. format(resource_type)) lambda_client = aws_utils.ClientWrapper( boto3.client("lambda", stack.region)) lambda_data = {'Handler': type_definition.handler_function} lambda_data.update(event) response = lambda_client.invoke( FunctionName=type_definition. get_custom_resource_lambda_function_name(), Payload=json.dumps(lambda_data)) if response['StatusCode'] == 200: response_data = json.loads(response['Payload'].read().decode()) response_success = response_data.get('Success', None) if response_success is not None: if response_success: custom_resource_response.succeed( event, context, response_data['Data'], response_data['PhysicalResourceId']) else: custom_resource_response.fail(event, context, response_data['Reason']) else: raise RuntimeError( "Handler lambda for resource type '%s' returned a malformed response: %s" % (resource_type, response_data)) else: raise RuntimeError( "Handler lambda for resource type '%s' failed to execute, returned HTTP status %d" % (resource_type, response['StatusCode'])) except ValidationError as e: custom_resource_response.fail(event, context, str(e)) except Exception as e: print 'Unexpected error occured when processing event {} with context {}. {}'.format( event, context, traceback.format_exc()) custom_resource_response.fail( event, context, 'Unexpected {} error occured: {}. Additional details can be found in the CloudWatch log group {} stream {}' .format( type(e).__name__, e.message, context.log_group_name, context.log_stream_name))
def get_table_name(event): stack_manager = stack_info.StackInfoManager() owning_stack_info = stack_manager.get_stack_info(event['StackId']) return owning_stack_info.stack_name + '-' + event['LogicalResourceId']
def handler(event, context): """Entry point for the Custom::ServiceApi resource handler.""" stack_id = event['StackId'] request_type = event['RequestType'] logical_resource_id = event['LogicalResourceId'] logical_role_name = logical_resource_id stack_manager = stack_info.StackInfoManager() stack = stack_manager.get_stack_info(stack_id) rest_api_resource_name = stack.stack_name + '-' + logical_resource_id id_data = aws_utils.get_data_from_custom_physical_resource_id( event.get('PhysicalResourceId', None)) response_data = {} project_tags = { constant.PROJECT_NAME_TAG: stack.project_stack.project_name, constant.STACK_ID_TAG: stack_id } if request_type == 'Create': props = properties.load(event, PROPERTY_SCHEMA) role_arn = role_utils.create_access_control_role( stack_manager, id_data, stack.stack_arn, logical_role_name, API_GATEWAY_SERVICE_NAME) swagger_content = get_configured_swagger_content( stack, props, role_arn, rest_api_resource_name) rest_api_id = create_api_gateway(rest_api_resource_name, props, swagger_content) service_url = get_service_url(rest_api_id, stack.region) register_service_interfaces(stack, service_url, swagger_content) update_api_gateway_tags(rest_api_id, event, project_tags) response_data['Url'] = service_url id_data['RestApiId'] = rest_api_id elif request_type == 'Update': rest_api_id = id_data.get('RestApiId', None) if not rest_api_id: raise RuntimeError( 'No RestApiId found in id_data: {}'.format(id_data)) props = properties.load(event, PROPERTY_SCHEMA) role_arn = role_utils.get_access_control_role_arn( id_data, logical_role_name) swagger_content = get_configured_swagger_content( stack, props, role_arn, rest_api_resource_name) update_api_gateway(rest_api_id, props, swagger_content) service_url = get_service_url(rest_api_id, stack.region) register_service_interfaces(stack, service_url, swagger_content) update_api_gateway_tags(rest_api_id, event, project_tags) response_data['Url'] = service_url elif request_type == 'Delete': if not id_data: # The will be no data in the id if Cloud Formation cancels a resource creation # (due to a failure in another resource) before it processes the resource create # response. Apparently Cloud Formation has an internal temporary id for the # resource and uses it for the delete request. # # Unfortunately there isn't a good way to deal with this case. We don't have the # id data, so we can't clean up the things it identifies. At best we can allow the # stack cleanup to continue, leaving the rest API behind and role behind. print('WARNING: No id_data provided on delete.') else: rest_api_id = id_data.get('RestApiId', None) if not rest_api_id: raise RuntimeError( 'No RestApiId found in id_data: {}'.format(id_data)) delete_api_gateway(rest_api_id) service_url = get_service_url(rest_api_id, stack.region) unregister_service_interfaces(stack, service_url) del id_data['RestApiId'] role_utils.delete_access_control_role(id_data, logical_role_name) else: raise RuntimeError('Invalid RequestType: {}'.format(request_type)) physical_resource_id = aws_utils.construct_custom_physical_resource_id_with_data( event['StackId'], logical_resource_id, id_data) return custom_resource_response.success_response(response_data, physical_resource_id)
def handler(event, context): """Entry point for the Custom::CognitoIdentityPool resource handler.""" stack_id = event['StackId'] props = properties.load( event, { 'ConfigurationBucket': properties.String(), 'ConfigurationKey': properties.String( ), # this is only here to force the resource handler to execute on each update to the deployment 'IdentityPoolName': properties.String(), 'UseAuthSettingsObject': properties.String(), 'AllowUnauthenticatedIdentities': properties.String(), 'DeveloperProviderName': properties.String(default=''), 'ShareMode': properties.String( default='' ), # SHARED when the pool from the file should be used 'Roles': properties.Object(default={}, schema={'*': properties.String()}), 'RoleMappings': properties.Object( default={}, schema={ 'Cognito': properties.Object( default={}, schema={ 'Type': properties.String(''), 'AmbiguousRoleResolution': properties.String('') }) }) }) # give the identity pool a unique name per stack stack_manager = stack_info.StackInfoManager() stack = stack_manager.get_stack_info(stack_id) # Set up resource tags for all resources created tags = { constant.PROJECT_NAME_TAG: stack.project_stack.project_name, constant.STACK_ID_TAG: stack_id } shared_pool = aws_utils.get_cognito_pool_from_file( props.ConfigurationBucket, props.ConfigurationKey, event['LogicalResourceId'], stack) identity_pool_name = stack.stack_name + props.IdentityPoolName identity_pool_name = identity_pool_name.replace('-', ' ') identity_client = identity_pool.get_identity_client() identity_pool_id = custom_resource_utils.get_embedded_physical_id( event.get('PhysicalResourceId')) found_pool = identity_pool.get_identity_pool(identity_pool_id) request_type = event['RequestType'] if shared_pool and props.ShareMode == 'SHARED': data = { 'IdentityPoolName': identity_pool_name, 'IdentityPoolId': shared_pool['PhysicalResourceId'] } return custom_resource_response.success_response( data, shared_pool['PhysicalResourceId']) if request_type == 'Delete': if found_pool is not None: identity_client.delete_identity_pool( IdentityPoolId=identity_pool_id) data = {} else: use_auth_settings_object = props.UseAuthSettingsObject.lower( ) == 'true' supported_login_providers = {} if use_auth_settings_object: # download the auth settings from s3 player_access_key = 'player-access/' + constant.AUTH_SETTINGS_FILENAME auth_doc = json.loads( _load_doc_from_s3(props.ConfigurationBucket, player_access_key)) # if the doc has entries add them to the supported_login_providers dictionary if len(auth_doc) > 0: for key, value in six.iteritems(auth_doc): supported_login_providers[ value['provider_uri']] = value['app_id'] cognito_identity_providers = identity_pool.get_cognito_identity_providers( stack_manager, stack_id, event['LogicalResourceId']) print('Identity Providers: {}'.format(cognito_identity_providers)) allow_anonymous = props.AllowUnauthenticatedIdentities.lower( ) == 'true' # if the pool exists just update it, otherwise create a new one args = { 'IdentityPoolName': identity_pool_name, 'AllowUnauthenticatedIdentities': allow_anonymous, 'SupportedLoginProviders': supported_login_providers, 'CognitoIdentityProviders': cognito_identity_providers, 'IdentityPoolTags': tags } if props.DeveloperProviderName: args['DeveloperProviderName'] = props.DeveloperProviderName if found_pool is not None: identity_client.update_identity_pool( IdentityPoolId=identity_pool_id, **args) else: response = identity_client.create_identity_pool(**args) identity_pool_id = response['IdentityPoolId'] # update the roles for the pool role_mappings = {} if props.RoleMappings.Cognito.Type and len( cognito_identity_providers) > 0: print('Adding role mappings for Cognito {}'.format( props.RoleMappings.Cognito.__dict__)) role_mappings['{}:{}'.format( cognito_identity_providers[0]['ProviderName'], cognito_identity_providers[0] ['ClientId'])] = props.RoleMappings.Cognito.__dict__ print("Role Mappings: {}".format(role_mappings)) identity_client.set_identity_pool_roles( IdentityPoolId=identity_pool_id, Roles=props.Roles.__dict__, RoleMappings=role_mappings) data = { 'IdentityPoolName': identity_pool_name, 'IdentityPoolId': identity_pool_id } physical_resource_id = identity_pool_id return custom_resource_response.success_response(data, physical_resource_id)
def handler(event, context): event_type = event['RequestType'] stack_arn = event['StackId'] stack_manager = stack_info.StackInfoManager() stack = stack_manager.get_stack_info(stack_arn) if not stack.is_project_stack: raise RuntimeError("Resource Types can only be defined in the project stack.") configuration_bucket = stack.project_stack.configuration_bucket source_resource_name = event['LogicalResourceId'] props = properties.load(event, _schema) definitions_src = event['ResourceProperties']['Definitions'] lambda_client = _create_lambda_client(stack_arn) created_or_updated_lambdas = {} lambda_roles = [] # Set up tags for all resources created, must be project stack # Note: IAM takes an array of [ {'Key':, 'Value':}] format, Lambda take a dict of {string: string} pairs iam_tags = [ {'Key': constant.PROJECT_NAME_TAG, 'Value': stack.stack_name}, {'Key': constant.STACK_ID_TAG, 'Value': stack_arn} ] lambda_tags = {constant.PROJECT_NAME_TAG: stack.stack_name, constant.STACK_ID_TAG: stack_arn} # Build the file key as "<root directory>/<project stack>/<deployment stack>/<resource_stack>/<resource_name>.json" path_components = [x.stack_name for x in stack.ancestry] path_components.insert(0, constant.RESOURCE_DEFINITIONS_PATH) path_components.append(source_resource_name + ".json") resource_file_key = aws_utils.s3_key_join(*path_components) path_info = resource_type_info.ResourceTypesPathInfo(resource_file_key) # Load information from the JSON file if it exists. # (It will exist on a Create event if the resource was previously deleted and recreated.) try: contents = s3_client.get_object(Bucket=configuration_bucket, Key=resource_file_key)['Body'].read() existing_info = json.loads(contents) definitions_dictionary = existing_info['Definitions'] existing_lambdas = existing_info['Lambdas'] if isinstance(existing_lambdas, dict): lambda_dictionary = existing_lambdas else: # Backwards compatibility lambda_dictionary = {} existing_lambdas = set([x.split(":")[6] for x in existing_lambdas]) # Convert arn to function name except ClientError as e: error_code = e.response['Error']['Code'] if error_code == 'NoSuchKey': definitions_dictionary = {} existing_lambdas = {} lambda_dictionary = {} else: raise e # Process the actual event if event_type == 'Delete': deleted_entries = set(definitions_dictionary.keys()) else: definitions = props.Definitions lambda_config_src = event['ResourceProperties'].get('LambdaConfiguration', None) # Create lambdas for fetching the ARN and handling the resource creation/update/deletion lambdas_to_create = [] for resource_type_name in definitions_src.keys(): type_info = resource_type_info.ResourceTypeInfo( stack_arn, source_resource_name, resource_type_name, lambda_dictionary, False, definitions_src[resource_type_name]) function_infos = [type_info.arn_function, type_info.handler_function] for function_info, field, tag, description in zip(function_infos, _lambda_fields, _lambda_tags, _lambda_descriptions): if function_info is None: continue function_handler = function_info.get('Function', None) if function_handler is None: raise RuntimeError("Definition for '%s' in type '%s' requires a 'Function' field with the handler " "to execute." % (field, resource_type_name)) # Create the role for the lambda(s) that will be servicing this resource type lambda_function_name = type_info.get_lambda_function_name(tag) role_name = role_utils.sanitize_role_name(lambda_function_name) role_path = "/%s/%s/" % (type_info.stack_name, type_info.source_resource_name) assume_role_policy_document = role_utils.get_assume_role_policy_document_for_service("lambda.amazonaws.com") try: res = iam_client.create_role( RoleName=role_name, AssumeRolePolicyDocument=assume_role_policy_document, Path=role_path, Tags=iam_tags) role_arn = res['Role']['Arn'] except ClientError as e: if e.response["Error"]["Code"] != 'EntityAlreadyExists': raise e res = iam_client.get_role(RoleName=role_name) role_arn = res['Role']['Arn'] # Copy the base policy for the role and add any permissions that are specified by the type role_policy = copy.deepcopy(_create_base_lambda_policy()) role_policy['Statement'].extend(function_info.get('PolicyStatement', [])) iam_client.put_role_policy(RoleName=role_name, PolicyName=_inline_policy_name, PolicyDocument=json.dumps(role_policy)) # Record this role and the type_info so we can create a lambda for it lambda_roles.append(role_name) lambda_info = { 'role_arn': role_arn, 'type_info': type_info, 'lambda_function_name': lambda_function_name, 'handler': "resource_types." + function_handler, 'description': description, 'tags': lambda_tags } # Merge in any lambda specific configs overrides if 'HandlerFunctionConfiguration' in function_info: lambda_override = function_info['HandlerFunctionConfiguration'] if lambda_override: print("Found LambdaConfiguration override {}".format(lambda_override)) lambda_info['lambda_config_overrides'] = lambda_override lambdas_to_create.append(lambda_info) # We create the lambdas in a separate pass because role-propagation to lambda takes a while, and we don't want # to have to delay multiple times for each role/lambda pair # # TODO: Replace delay (and all other instances of role/lambda creation) with exponential backoff time.sleep(role_utils.PROPAGATION_DELAY_SECONDS) for info in lambdas_to_create: # Create the lambda function arn, version = _create_or_update_lambda_function( lambda_client=lambda_client, timeout=props.LambdaTimeout, lambda_config_src=lambda_config_src, info=info, existing_lambdas=existing_lambdas ) created_or_updated_lambdas[info['lambda_function_name']] = {'arn': arn, 'v': version} # Finally add/update a role policy to give least privileges to the Lambdas to log events policy_document = _generate_lambda_log_event_policy(arn) iam_client.put_role_policy(RoleName=aws_utils.get_role_name_from_role_arn(info['role_arn']), PolicyDocument=json.dumps(policy_document), PolicyName='LambdaLoggingEventsPolicy') deleted_entries = set(definitions_dictionary.keys()) - set(definitions_src.keys()) physical_resource_id = "-".join(path_components[1:]) lambda_dictionary.update(created_or_updated_lambdas) definitions_dictionary.update(definitions_src) config_info = { 'StackId': stack_arn, 'Id': physical_resource_id, 'Lambdas': lambda_dictionary, 'Definitions': definitions_dictionary, 'Deleted': list(deleted_entries) } data = { 'ConfigBucket': configuration_bucket, 'ConfigKey': resource_file_key } # Copy the resource definitions to the configuration bucket. s3_client.put_object(Bucket=configuration_bucket, Key=resource_file_key, Body=json.dumps(config_info, indent=2)) custom_resource_response.succeed(event, context, data, physical_resource_id)
def handler(event, context): props = properties.load( event, { 'ConfigurationBucket': properties.String(), 'ConfigurationKey': properties.String( ), ##this is only here to force the resource handler to execute on each update to the deployment 'IdentityPoolName': properties.String(), 'UseAuthSettingsObject': properties.String(), 'AllowUnauthenticatedIdentities': properties.String(), 'DeveloperProviderName': properties.String(default=''), 'Roles': properties.Object(default={}, schema={'*': properties.String()}), 'RoleMappings': properties.Object( default={}, schema={ 'Cognito': properties.Object( default={}, schema={ 'Type': properties.String(''), 'AmbiguousRoleResolution': properties.String('') }) }) }) #give the identity pool a unique name per stack stack_manager = stack_info.StackInfoManager() stack_name = aws_utils.get_stack_name_from_stack_arn(event['StackId']) identity_pool_name = stack_name + props.IdentityPoolName identity_pool_name = identity_pool_name.replace('-', ' ') identity_client = identity_pool.get_identity_client() identity_pool_id = custom_resource_utils.get_embedded_physical_id( event.get('PhysicalResourceId')) found_pool = identity_pool.get_identity_pool(identity_pool_id) request_type = event['RequestType'] if request_type == 'Delete': if found_pool != None: identity_client.delete_identity_pool( IdentityPoolId=identity_pool_id) data = {} else: use_auth_settings_object = props.UseAuthSettingsObject.lower( ) == 'true' supported_login_providers = {} if use_auth_settings_object == True: #download the auth settings from s3 player_access_key = 'player-access/' + constant.AUTH_SETTINGS_FILENAME auth_doc = json.loads( _load_doc_from_s3(props.ConfigurationBucket, player_access_key)) #if the doc has entries add them to the supported_login_providers dictionary if len(auth_doc) > 0: for key, value in auth_doc.iteritems(): supported_login_providers[ value['provider_uri']] = value['app_id'] cognito_identity_providers = identity_pool.get_cognito_identity_providers( stack_manager, event['StackId'], event['LogicalResourceId']) print 'Identity Providers: ', cognito_identity_providers allow_anonymous = props.AllowUnauthenticatedIdentities.lower( ) == 'true' #if the pool exists just update it, otherwise create a new one args = { 'IdentityPoolName': identity_pool_name, 'AllowUnauthenticatedIdentities': allow_anonymous, 'SupportedLoginProviders': supported_login_providers, 'CognitoIdentityProviders': cognito_identity_providers } if props.DeveloperProviderName: args['DeveloperProviderName'] = props.DeveloperProviderName if found_pool != None: identity_client.update_identity_pool( IdentityPoolId=identity_pool_id, **args) else: response = identity_client.create_identity_pool(**args) identity_pool_id = response['IdentityPoolId'] #update the roles for the pool role_mappings = {} if props.RoleMappings.Cognito.Type and len( cognito_identity_providers) > 0: print 'Adding role mappings for cognito', props.RoleMappings.Cognito.__dict__ role_mappings['{}:{}'.format( cognito_identity_providers[0]['ProviderName'], cognito_identity_providers[0] ['ClientId'])] = props.RoleMappings.Cognito.__dict__ print "Role Mappings: ", role_mappings identity_client.set_identity_pool_roles( IdentityPoolId=identity_pool_id, Roles=props.Roles.__dict__, RoleMappings=role_mappings) data = { 'IdentityPoolName': identity_pool_name, 'IdentityPoolId': identity_pool_id } physical_resource_id = identity_pool_id return custom_resource_response.success_response(data, physical_resource_id)
def handler(event, context): """Entry point for the Custom::CognitoUserPool resource handler.""" stack_id = event['StackId'] props = properties.load( event, { 'ClientApps': properties.StringOrListOfString(), 'ExplicitAuthFlows': properties.StringOrListOfString(default=[]), 'RefreshTokenValidity': properties.String('30'), 'ConfigurationKey': properties.String( ), # this is only here to force the resource handler to execute on each update to the deployment 'LambdaConfig': properties.Dictionary({}), 'PoolName': properties.String(), 'Groups': properties.ObjectOrListOfObject( default=[], schema={ 'Name': properties.String(), 'Description': properties.String(''), 'Role': properties.String(), 'Precedence': properties.String('99') }), 'AllowAdminCreateUserOnly': properties.String('') }) # give the identity pool a unique name per stack stack_manager = stack_info.StackInfoManager() stack = stack_manager.get_stack_info(stack_id) stack_name = stack.stack_name pool_name = props.PoolName.replace('-', ' ') pool_name = stack_name + pool_name cognito_idp_client = user_pool.get_idp_client() pool_id = custom_resource_utils.get_embedded_physical_id( event.get('PhysicalResourceId')) found_pool = user_pool.get_user_pool(pool_id) # Set up tags for all resources created tags = { constant.PROJECT_NAME_TAG: stack.project_stack.project_name, constant.STACK_ID_TAG: stack_id } request_type = event['RequestType'] if request_type == 'Delete': if found_pool is not None: cognito_idp_client.delete_user_pool(UserPoolId=pool_id) data = {} else: # if the pool exists just update it, otherwise create a new one mfa_config = 'OFF' # MFA is currently unsupported by Lumberyard # Users are automatically prompted to verify these things. # At least one auto-verified thing (email or phone) is required to allow password recovery. auto_verified_attributes = ['email'] client_app_data = {} lambda_config = props.LambdaConfig user_pool.validate_identity_metadata(stack_manager, stack_id, event['LogicalResourceId'], props.ClientApps) admin_create_user_config = __get_admin_create_user_config( props.AllowAdminCreateUserOnly) print(json.dumps(admin_create_user_config)) if found_pool is not None: # Update response = cognito_idp_client.update_user_pool( UserPoolId=pool_id, MfaConfiguration=mfa_config, AutoVerifiedAttributes=auto_verified_attributes, LambdaConfig=lambda_config, AdminCreateUserConfig=admin_create_user_config, UserPoolTags=tags) existing_client_apps = user_pool.get_client_apps(pool_id) client_app_data = update_client_apps(pool_id, props.ClientApps, existing_client_apps, False, props.ExplicitAuthFlows, props.RefreshTokenValidity) response = cognito_idp_client.list_groups(UserPoolId=pool_id) found_groups = {} for actual_group in response['Groups']: group_name = actual_group['GroupName'] for requested_group in props.Groups: # does the group exist in the resource template if group_name == requested_group.Name: found_groups.update({group_name: True}) break # delete the group as it is no longer in the resource template if group_name not in found_groups: cognito_idp_client.delete_group( GroupName=actual_group['GroupName'], UserPoolId=pool_id) print("Found groups=>{}".format(json.dumps(found_groups))) # iterate the groups defined in the user pool resource template for group in props.Groups: # update the group as it is currently a group in the user pool group_definition = __generate_group_definition(pool_id, group) print("Group '{}' is defined by {}".format( group.Name, json.dumps(group_definition))) if group.Name in found_groups: cognito_idp_client.update_group(**group_definition) else: # group is a new group on the user pool cognito_idp_client.create_group(**group_definition) else: # Create response = cognito_idp_client.create_user_pool( PoolName=pool_name, MfaConfiguration=mfa_config, AutoVerifiedAttributes=auto_verified_attributes, LambdaConfig=lambda_config, AdminCreateUserConfig=admin_create_user_config, UserPoolTags=tags) pool_id = response['UserPool']['Id'] print('User pool creation response: {}'.format(response)) for group in props.Groups: group_definition = __generate_group_definition(pool_id, group) print("Group '{}' is defined by {}".format( group.Name, json.dumps(group_definition))) cognito_idp_client.create_group(**group_definition) client_app_data = update_client_apps(pool_id, props.ClientApps, [], False, props.ExplicitAuthFlows, props.RefreshTokenValidity) updated_resources = { stack_id: { event['LogicalResourceId']: { 'physical_id': pool_id, 'client_apps': { client_app['ClientName']: { 'client_id': client_app['ClientId'] } for client_app in client_app_data['Created'] + client_app_data['Updated'] } } } } identity_pool.update_cognito_identity_providers( stack_manager, stack_id, pool_id, updated_resources) data = { 'UserPoolName': pool_name, 'UserPoolId': pool_id, 'ClientApps': client_app_data, } physical_resource_id = pool_id return custom_resource_response.success_response(data, physical_resource_id)
def handler(event, context): """Main handler for custom resources, wired in via project-template.json as the ProjectResourceHandler""" try: print('Dispatching event {} with context {}.'.format( json.dumps(event, cls=json_utils.SafeEncoder), context)) resource_type = event.get('ResourceType', None) if resource_type is None: raise RuntimeError('No ResourceType specified.') if resource_type in _LOCAL_CUSTOM_RESOURCE_WHITELIST: # Old method for supporting custom resource code directly within the ProjectResourceHandler. # Should only be used for legacy types in the ProjectResourceHandler. module_name = resource_type.replace('Custom::', '') + 'ResourceHandler' module = sys.modules.get(module_name, None) if module is None: # First check for handler module in same directory as this module, # if not found, check for module in the resource group provided # directories. module_file_name = module_name + '.py' module_file_path = os.path.join(os.path.dirname(__file__), module_file_name) if os.path.isfile(module_file_path): module = module_utils.load_module( module_name, os.path.dirname(module_file_path)) elif os.path.isdir(PLUGIN_DIRECTORY_PATH): plugin_directory_names = [ item for item in os.listdir(PLUGIN_DIRECTORY_PATH) if os.path.isdir( os.path.join(PLUGIN_DIRECTORY_PATH, item)) ] for plugin_directory_name in plugin_directory_names: module_file_path = os.path.join( PLUGIN_DIRECTORY_PATH, plugin_directory_name, module_file_name) if os.path.isfile(module_file_path): module = module_utils.load_module( module_name, os.path.dirname(module_file_path)) break if module is not None: if not hasattr(module, 'handler'): raise RuntimeError( 'No handler function found for the {} resource type.'. format(resource_type)) print('Using {}'.format(module)) module.handler(event, context) else: # New way of instantiating custom resources. Load the dictionary of resource types. stack = stack_info.StackInfoManager().get_stack_info( event['StackId']) type_definition = stack.resource_definitions.get( resource_type, None) if type_definition is None: raise RuntimeError( 'No type definition found for the {} resource type.'. format(resource_type)) if type_definition.handler_function is None: raise RuntimeError( 'No handler function defined for custom resource type {}.'. format(resource_type)) request_type = event['RequestType'] if type_definition.deleted and request_type == "Create": raise RuntimeError( 'Attempting to Create a new resource of deleted type {}.'. format(resource_type)) create_version = type_definition.handler_function_version logical_id = event['LogicalResourceId'] embedded_physical_id = None # Access control can take over 60s so set custom timeouts config_dict = { 'region_name': stack.region, 'connect_timeout': LAMBDA_CONNECTION_TIMEOUT, 'read_timeout': LAMBDA_READ_TIMEOUT } lambda_client_config = Config(**config_dict) lambda_client = aws_utils.ClientWrapper( boto3.client("lambda", config=lambda_client_config)) cf_client = aws_utils.ClientWrapper( boto3.client("cloudformation", stack.region)) if request_type != "Create": physical_id = event['PhysicalResourceId'] embedded_physical_id = physical_id try: existing_resource_info = json.loads(physical_id) embedded_physical_id = existing_resource_info['id'] create_version = existing_resource_info['v'] except (ValueError, TypeError, KeyError): # Backwards compatibility with resources created prior to versioning support create_version = None run_version = create_version # Check the metadata on the resource to see if we're coercing to a different version resource_info = cf_client.describe_stack_resource( StackName=event['StackId'], LogicalResourceId=logical_id) metadata = aws_utils.get_cloud_canvas_metadata( resource_info['StackResourceDetail'], custom_resource_utils.METADATA_VERSION_TAG) if metadata: run_version = metadata if request_type == "Create": create_version = metadata # Configure our invocation, and invoke the handler lambda lambda_data = {'Handler': type_definition.handler_function} lambda_data.update(event) invoke_params = { 'FunctionName': type_definition.get_custom_resource_lambda_function_name(), 'Payload': json.dumps(lambda_data) } if run_version: invoke_params['Qualifier'] = run_version response = lambda_client.invoke(**invoke_params) if response['StatusCode'] == 200: response_data = json.loads(response['Payload'].read().decode()) response_success = response_data.get('Success', None) if response_success is not None: if response_success: if create_version: if request_type == "Update" and response_data[ 'PhysicalResourceId'] != embedded_physical_id: # Physical ID changed during an update, which is *technically* illegal according to the # docs, but we allow it because CloudFormation doesn't act to prevent it. print( _UPDATE_CHANGED_PHYSICAL_ID_WARNING.format( logical_id, embedded_physical_id, response_data['PhysicalResourceId'])) out_resource_id = json.dumps({ 'id': response_data['PhysicalResourceId'], 'v': create_version }) else: # Backwards compatibility with resources created prior to versioning support out_resource_id = response_data[ 'PhysicalResourceId'] custom_resource_response.succeed( event, context, response_data['Data'], out_resource_id) else: custom_resource_response.fail(event, context, response_data['Reason']) else: raise RuntimeError( "Handler lambda for resource type '%s' returned a malformed response: %s" % (resource_type, response_data)) else: raise RuntimeError( "Handler lambda for resource type '%s' failed to execute, returned HTTP status %d" % (resource_type, response['StatusCode'])) except ValidationError as e: custom_resource_response.fail(event, context, str(e)) except Exception as e: print( 'Unexpected error occurred when processing event {} with context {}. {}' .format(event, context, traceback.format_exc())) custom_resource_response.fail( event, context, 'Unexpected {} error occurred: {}. Additional details can be found in the CloudWatch log group {} stream {}' .format( type(e).__name__, str(e), context.log_group_name, context.log_stream_name))
def handler(event, context): props = properties.load(event, PROPERTIES_SCHEMA) request_type = event['RequestType'] stack_arn = event['StackId'] logical_role_name = props.FunctionName stack_manager = stack_info.StackInfoManager() id_data = aws_utils.get_data_from_custom_physical_resource_id(event.get('PhysicalResourceId', None)) if request_type == 'Delete': role_utils.delete_access_control_role( id_data, logical_role_name) response_data = {} else: stack = stack_manager.get_stack_info(stack_arn) if request_type == 'Create': project_service_lambda_arn = _get_project_service_lambda_arn(stack) assume_role_service = 'lambda.amazonaws.com' role_arn = role_utils.create_access_control_role( stack_manager, id_data, stack_arn, logical_role_name, assume_role_service, default_policy = get_default_policy(project_service_lambda_arn)) elif request_type == 'Update': role_arn = role_utils.get_access_control_role_arn( id_data, logical_role_name) else: raise RuntimeError('Unexpected request type: {}'.format(request_type)) _add_built_in_settings(props.Settings.__dict__, stack) # give access to project level ServiceDirectory APIs # Other deployment-level APIs are handled in InterfaceDependeny resolver custom resource type permitted_arns = _add_services_settings(stack, props.Settings.__dict__, props.Services) _add_service_access_policy_to_role(role_arn, permitted_arns) # Check if we have a folder just for this function, if not use the default output_key = input_key = _get_input_key(props) if not props.IgnoreAppendingSettingsToZip: output_key = _inject_settings(props.Settings.__dict__, props.Runtime, props.ConfigurationBucket, input_key, props.FunctionName) response_data = { 'ConfigurationBucket': props.ConfigurationBucket, 'ConfigurationKey': output_key, 'Runtime': props.Runtime, 'Role': role_arn, 'RoleName': role_utils.get_access_control_role_name(stack_arn, logical_role_name), 'ComposedLambdaConfiguration': { 'Code': { 'S3Bucket': props.ConfigurationBucket, 'S3Key': output_key }, 'Role': role_arn, 'Runtime': props.Runtime } } physical_resource_id = aws_utils.construct_custom_physical_resource_id_with_data(stack_arn, event['LogicalResourceId'], id_data) custom_resource_response.succeed(event, context, response_data, physical_resource_id)
def handler(event, context): props = properties.load(event, PROPERTIES_SCHEMA) request_type = event['RequestType'] stack_arn = event['StackId'] logical_role_name = props.FunctionName stack_manager = stack_info.StackInfoManager() id_data = aws_utils.get_data_from_custom_physical_resource_id( event.get('PhysicalResourceId', None)) if request_type == 'Delete': role_utils.delete_access_control_role(id_data, logical_role_name) response_data = {} else: stack = stack_manager.get_stack_info(stack_arn) if request_type == 'Create': project_service_lambda_arn = _get_project_service_lambda_arn(stack) assume_role_service = 'lambda.amazonaws.com' role_arn = role_utils.create_access_control_role( stack_manager, id_data, stack_arn, logical_role_name, assume_role_service, default_policy=get_default_policy(project_service_lambda_arn)) elif request_type == 'Update': role_arn = role_utils.get_access_control_role_arn( id_data, logical_role_name) else: raise RuntimeError( 'Unexpected request type: {}'.format(request_type)) _add_built_in_settings(props.Settings.__dict__, stack) # Check if we have a folder just for this function, if not use the default output_key = input_key = _get_input_key(props) if not props.IgnoreAppendingSettingsToZip: output_key = _inject_settings(props.Settings.__dict__, props.Runtime, props.ConfigurationBucket, input_key, props.FunctionName) cc_settings = copy.deepcopy(props.Settings.__dict__) # Remove "Services" from settings because they get injected into the python code package during _inject_settings # TODO: move handling of project-level service interfaces to the same code as cross-gem interfaces if "Services" in cc_settings: del cc_settings["Services"] response_data = { 'ConfigurationBucket': props.ConfigurationBucket, 'ConfigurationKey': output_key, 'Runtime': props.Runtime, 'Role': role_arn, 'RoleName': role_utils.get_access_control_role_name(stack_arn, logical_role_name), 'ComposedLambdaConfiguration': { 'Code': { 'S3Bucket': props.ConfigurationBucket, 'S3Key': output_key }, "Environment": { "Variables": cc_settings }, 'Role': role_arn, 'Runtime': props.Runtime }, "CCSettings": cc_settings } physical_resource_id = aws_utils.construct_custom_physical_resource_id_with_data( stack_arn, event['LogicalResourceId'], id_data) custom_resource_response.succeed(event, context, response_data, physical_resource_id)
def manager(self): if self.__stack_info_manager == None: self.__stack_info_manager = stack_info.StackInfoManager( default_session=self.__context.aws.session) return self.__stack_info_manager
REST_API_ID, MockResourceGroupInfo.MOCK_REGION, Custom_ServiceApi.STAGE_NAME) CUSTOM_DOMAIN_NAME_A = 'TestCustomDomainName_A' CUSTOM_DOMAIN_NAME_B = 'TestCustomDomainName_B' EXPECTED_ALTERNATIVE_URL = 'https://{}/{}.{}.{}'.format( CUSTOM_DOMAIN_NAME_A, MockResourceGroupInfo.MOCK_REGION, Custom_ServiceApi.STAGE_NAME, REST_API_ID) SWAGGER_CONTENT = 'TestSwaggerContent' RESOURCE_GROUP_INFO = MockResourceGroupInfo() SWAGGER_DIGEST = 'TestSwaggerDigest' SWAGGER_DIGEST_A = 'TestSwaggerDigestA' SWAGGER_DIGEST_B = 'TestSwaggerDigestB' MOCK_PATCH_OPERATIONS = ['Mock Patch Operation'] EMPTY_PATCH_OPERATIONS = [] REST_API_DEPLOYMENT_ID = 'TEstRestApiDeploymentId' STACK_MANAGER = stack_info.StackInfoManager() FULL_RESOURCE_PROPERTIES = { 'ConfigurationBucket': CONFIGURATION_BUCKET, 'ConfigurationKey': CONFIGURATION_KEY, 'SwaggerSettings': { 'SwaggerSettings': '', 'DeploymentName': '', 'ResourceGroupName': '' }, 'MethodSettings': { 'MethodSettingPath': { 'MethodSettingMethod': { 'cacheDataEncrypted': True, 'cacheTtlInSeconds': 10, 'cachingEnabled': True,
def handler(event, context): event_type = event['RequestType'] stack_arn = event['StackId'] stack_manager = stack_info.StackInfoManager() stack = stack_manager.get_stack_info(stack_arn) if not stack.is_project_stack: raise RuntimeError( "Resource Types can only be defined in the project stack.") configuration_bucket = stack.project_stack.configuration_bucket source_resource_name = event['LogicalResourceId'] props = properties.load(event, _schema) definitions_src = event['ResourceProperties']['Definitions'] lambda_client = aws_utils.ClientWrapper( boto3.client("lambda", aws_utils.get_region_from_stack_arn(stack_arn))) lambda_arns = [] lambda_roles = [] # Build the file key as "<root directory>/<project stack>/<deployment stack>/<resource_stack>/<resource_name>.json" path_components = [x.stack_name for x in stack.ancestry] path_components.insert(0, constant.RESOURCE_DEFINITIONS_PATH) path_components.append(source_resource_name + ".json") resource_file_key = aws_utils.s3_key_join(*path_components) path_info = resource_type_info.ResourceTypesPathInfo(resource_file_key) # Load information from the JSON file if it exists if event_type != 'Create': contents = s3_client.get_object(Bucket=configuration_bucket, Key=resource_file_key)['Body'].read() existing_info = json.loads(contents) else: existing_info = None # Process the actual event if event_type == 'Delete': _delete_resources(existing_info['Lambdas'], existing_info['Roles'], lambda_client) custom_resource_response.succeed(event, context, {}, existing_info['Id']) else: existing_roles = set() existing_lambdas = set() if event_type == 'Update': existing_roles = set( [arn.split(":")[-1] for arn in existing_info['Roles']]) existing_lambdas = set( [arn.split(":")[-1] for arn in existing_info['Lambdas']]) definitions = props.Definitions lambda_config_src = event['ResourceProperties'].get( 'LambdaConfiguration', None) # Create lambdas for fetching the ARN and handling the resource creation/update/deletion lambdas_to_create = [] for resource_type_name in definitions_src.keys(): type_info = resource_type_info.ResourceTypeInfo( stack_arn, source_resource_name, resource_type_name, definitions_src[resource_type_name]) function_infos = [ type_info.arn_function, type_info.handler_function ] for function_info, field, tag, description in zip( function_infos, _lambda_fields, _lambda_tags, _lambda_descriptions): if function_info is None: continue function_handler = function_info.get('Function', None) if function_handler is None: raise RuntimeError( "Definition for '%s' in type '%s' requires a 'Function' field with the handler " "to execute." % (field, resource_type_name)) # Create the role for the lambda(s) that will be servicing this resource type lambda_function_name = type_info.get_lambda_function_name(tag) role_name = role_utils.sanitize_role_name(lambda_function_name) role_path = "/%s/%s/" % (type_info.stack_name, type_info.source_resource_name) assume_role_policy_document = role_utils.get_assume_role_policy_document_for_service( "lambda.amazonaws.com") try: res = iam_client.create_role( RoleName=role_name, AssumeRolePolicyDocument=assume_role_policy_document, Path=role_path) role_arn = res['Role']['Arn'] except ClientError as e: if e.response["Error"]["Code"] != 'EntityAlreadyExists': raise e existing_roles.discard(role_name) res = iam_client.get_role(RoleName=role_name) role_arn = res['Role']['Arn'] # Copy the base policy for the role and add any permissions that are specified by the type role_policy = copy.deepcopy(_lambda_base_policy) role_policy['Statement'].extend( function_info.get('PolicyStatement', [])) iam_client.put_role_policy( RoleName=role_name, PolicyName=_inline_policy_name, PolicyDocument=json.dumps(role_policy)) # Record this role and the type_info so we can create a lambda for it lambda_roles.append(role_name) lambdas_to_create.append({ 'role_arn': role_arn, 'type_info': type_info, 'lambda_function_name': lambda_function_name, 'handler': "resource_types." + function_handler, 'description': description }) # We create the lambdas in a separate pass because role-propagation to lambda takes a while, and we don't want # to have to delay multiple times for each role/lambda pair # # TODO: Replace delay (and all other instances of role/lambda creation) with exponential backoff time.sleep(role_utils.PROPAGATION_DELAY_SECONDS) for info in lambdas_to_create: # Create the lambda function arn = _create_or_update_lambda_function( lambda_client=lambda_client, timeout=props.LambdaTimeout, lambda_config_src=lambda_config_src, info=info, existing_lambdas=existing_lambdas) lambda_arns.append(arn) # For Update operations, delete any lambdas and roles that previously existed and now no longer do. _delete_resources(existing_lambdas, existing_roles, lambda_client) physical_resource_id = "-".join(path_components[1:]) config_info = { 'StackId': stack_arn, 'Id': physical_resource_id, 'Lambdas': lambda_arns, 'Roles': lambda_roles, 'Definitions': definitions_src } data = { 'ConfigBucket': configuration_bucket, 'ConfigKey': resource_file_key } # Copy the resource definitions to the configuration bucket. s3_client.put_object(Bucket=configuration_bucket, Key=resource_file_key, Body=json.dumps(config_info)) custom_resource_response.succeed(event, context, data, physical_resource_id)