def add_resource(self, resource_name, resource_group, arn, download, context): ResourceImporter.add_resource(self, resource_name, resource_group, arn, download, context) function_region = util.get_region_from_arn(arn) original_name = arn.split(':')[-1] for table_dependency in self.table_dependencies: dynamodb_region = util.get_region_from_arn(table_dependency['arn']) dynamodb_importer = DynamoDBImporter(dynamodb_region, context) dynamodb_importer.function_accesses.append( self.get_permissions(original_name, resource_name, context, function_region)) #Add the event source mapping resource event_source = self._generate_table_event_source( resource_name, table_dependency['name']) dynamodb_importer.templates.append( {table_dependency['name'] + 'EventSource': event_source}) dynamodb_importer.add_resource(table_dependency['name'], resource_group, table_dependency['arn'], False, context) context.view.auto_added_resource(table_dependency['name']) #Download the code dir = os.path.join(context.config.base_resource_group_directory_path, resource_group) description = self.client.get_function(FunctionName=arn.split(':')[-1]) location = description['Code']['Location'] zipcontent = urllib.urlopen(location) zfile = zipfile.ZipFile(StringIO.StringIO(zipcontent.read())) zfile.extractall(dir + '/lambda-function-code/') context.view.import_resource(resource_name)
def __set_default_section_from_project_stack_id(self, from_version): # if the local project setting is initialized # parse the region from the PendingProjectStackId or the ProjectStackId settings = self.__context.config.local_project_settings.raw_dict() if constant.PROJECT_STACK_ID in settings: project_stack_id = settings[constant.PROJECT_STACK_ID] region = util.get_region_from_arn(project_stack_id) #create the region section self.__context.config.local_project_settings.default(region) self.__context.config.local_project_settings[ constant.PROJECT_STACK_ID] = project_stack_id if constant.PENDING_PROJECT_STACK_ID in settings: pending_project_stack_id = settings[ constant.PENDING_PROJECT_STACK_ID] region = util.get_region_from_arn(pending_project_stack_id) #create the region section self.__context.config.local_project_settings.default(region) # if the local project setting is NOT initialized # locate the region in the request context and set the default to that region if not self.__context.config.local_project_settings.is_default_set_to_region( ): #move to migration section defined by the version self.__context.config.local_project_settings.default( str(from_version)) self.__context.config.local_project_settings[ constant.LAZY_MIGRATION] = True
def delete(self, stack_id, pending_resource_status=None): stack_name = util.get_stack_name_from_arn(stack_id) self.context.view.deleting_stack(stack_name, stack_id) self.__clean_undeltable_resources( stack_id, pending_resource_status=pending_resource_status) monitor = Monitor(self.context, stack_id, 'DELETE') cf = self.context.aws.client('cloudformation', region=util.get_region_from_arn(stack_id)) try: res = cf.delete_stack(StackName=stack_id) except ClientError as e: raise HandledError( 'Could not start delete of {} stack ({}).'.format( stack_name, stack_id), e) monitor.wait() self.__clean_log_groups( stack_id, pending_resource_status=pending_resource_status)
def _get_notification_configuration(self, bucket_name, resource_name, context): notification = self.client.get_bucket_notification_configuration( Bucket = bucket_name) notification_configuration = {} #Deal with LambdaFunctionConfigurations notification_configuration_detail = self._get_notification_configuration_detail('LambdaFunction', resource_name, notification) lambda_configurations = notification_configuration_detail['configurations'] self.lambda_dependencies = notification_configuration_detail['dependencies'] #Get the permissions of the related lambda functions for lambda_function in self.lambda_dependencies: region = util.get_region_from_arn(lambda_function['arn']) lambda_importer = LambdaImporter(region, context) original_name = lambda_function['arn'].split(':')[-1] self.function_accesses.append(lambda_importer.get_permissions(original_name, lambda_function['name'], context, region)) if lambda_configurations: notification_configuration['LambdaConfigurations'] = lambda_configurations #Deal with TopicConfigurations notification_configuration_detail = self._get_notification_configuration_detail('Topic', resource_name, notification) topic_configurations = notification_configuration_detail['configurations'] self.topic_dependencies = notification_configuration_detail['dependencies'] if topic_configurations: notification_configuration['TopicConfigurations'] = topic_configurations #Deal with QueueConfigurations notification_configuration_detail = self._get_notification_configuration_detail('Queue', resource_name, notification) queue_configurations = notification_configuration_detail['configurations'] self.queue_dependencies = notification_configuration_detail['dependencies'] if queue_configurations: notification_configuration['QueueConfigurations'] = queue_configurations return notification_configuration
def __delete_custom_resource_lambdas(context, args): context.view.deleting_custom_resource_lambdas() stack_id = context.config.project_stack_id project_name = util.get_stack_name_from_arn(stack_id) region = util.get_region_from_arn(stack_id) lambda_client = context.aws.client('lambda', region=region) iam_client = context.aws.client('iam') delete_functions = [] delete_roles = [] prefixes = [ "{}-{}-".format(project_name, prefix) for prefix in resource_type_info.LAMBDA_TAGS ] # Iterate through all lambda functions and generate a list that begin with any of the prefixes associated with # custom resource handlers for response in lambda_client.get_paginator('list_functions').paginate(): for entry in response['Functions']: function_name = entry['FunctionName'] if any(function_name.startswith(prefix) for prefix in prefixes): delete_functions.append(function_name) delete_roles.append( aws_utils.get_role_name_from_role_arn(entry['Role'])) # Delete the functions and roles related to custom resource handlers for function_name, role_name in zip(delete_functions, delete_roles): lambda_client.delete_function(FunctionName=function_name) iam_client.delete_role_policy(RoleName=role_name, PolicyName="Default") iam_client.delete_role(RoleName=role_name) context.view.deleting_lambdas_completed(len(delete_functions))
def update(self, stack_id, template_url, parameters={}, pending_resource_status={}, capabilities = []): stack_name = util.get_stack_name_from_arn(stack_id) self.context.view.updating_stack(stack_name, template_url, parameters) if pending_resource_status is not None: self.__clean_undeltable_resources(stack_id, pending_resource_status=pending_resource_status) monitor = Monitor(self.context, stack_id, 'UPDATE') cf = self.context.aws.client('cloudformation', region=util.get_region_from_arn(stack_id)) parameter_list = [ { 'ParameterKey': k, 'ParameterValue': v } for k, v in parameters.iteritems() ] try: res = cf.update_stack( StackName = stack_id, TemplateURL = template_url, Capabilities = capabilities, Parameters = parameter_list ) except ClientError as e: raise HandledError('Could not start update of {} stack ({}).'.format(stack_name, stack_id), e) monitor.wait() self.__clean_log_groups(stack_id, pending_resource_status = pending_resource_status)
def get_physical_resource_id(self, stack_id, logical_resource_id, expected_type = None, optional=False): '''Map a logical resource id to a physical resource id.''' if stack_id is None: if optional: return None else: raise ValueError('No stack_id provided.') cf = self.context.aws.client('cloudformation', region=util.get_region_from_arn(stack_id)) try: res = cf.describe_stack_resource( StackName=stack_id, LogicalResourceId=logical_resource_id) except ClientError as e: if optional and e.response['Error']['Code'] == 'ValidationError': return None raise HandledError('Could not get the id for the {} resource from the {} stack.'.format( logical_resource_id, stack_id ), e) physical_id = res['StackResourceDetail'].get('PhysicalResourceId', None) if physical_id is None: if not optional: raise HandledError('Could not get the id for the {} resource from the {} stack.'.format(logical_resource_id, stack_id)) else: if expected_type: if res['StackResourceDetail'].get('ResourceType', None) != expected_type: raise HandledError('The {} resource in stack {} does not have type {} (it has type {})'.format(logical_resource_id, stack_id, expected_type, res['StackResourceDetail'].get('ResourceType', '(unknown)'))) return physical_id
def describe_stack(self, stack_id, optional=False): cf = self.context.aws.client('cloudformation', region=util.get_region_from_arn(stack_id)) self.context.view.describing_stack(stack_id) try: res = cf.describe_stacks(StackName=stack_id) except ClientError as e: if optional and e.response['Error']['Code'] == 'ValidationError': return None if e.response['Error']['Code'] == 'AccessDenied': return { 'StackStatus': 'UNKNOWN', 'StackStatusReason': 'Access denied.' } raise HandledError( 'Could not get stack {} description.'.format(stack_id), e) stack_description = res['Stacks'][0] return { 'StackId': stack_description.get('StackId', None), 'StackName': stack_description.get('StackName', None), 'CreationTime': stack_description.get('CreationTime', None), 'LastUpdatedTime': stack_description.get('LastUpdatedTime', None), 'StackStatus': stack_description.get('StackStatus', None), 'StackStatusReason': stack_description.get('StackStatusReason', None), 'Outputs': stack_description.get('Outputs', None) }
def delete(self, stack_id, pending_resource_status = None): stack_name = util.get_stack_name_from_arn(stack_id) self.context.view.deleting_stack(stack_name, stack_id) self.__clean_undeltable_resources(stack_id, pending_resource_status = pending_resource_status) monitor = Monitor(self.context, stack_id, 'DELETE') cf = self.context.aws.client('cloudformation', region=util.get_region_from_arn(stack_id)) failed_resources=[] attempts =0; while attempts < 5: try: res = cf.delete_stack(StackName = stack_id, RetainResources=list(failed_resources)) except ClientError as e: raise HandledError('Could not start delete of {} stack ({}).'.format(stack_name, stack_id), e) failed_resources = monitor.wait() if len(failed_resources)==0: break attempts+=1 self.__clean_log_groups(stack_id, pending_resource_status = pending_resource_status)
def describe_resources( self, stack_id, recursive=True, optional=False ): region = util.get_region_from_arn(stack_id) cf = self.context.aws.client('cloudformation', region=region) self.context.view.describing_stack_resources(stack_id) try: res = cf.describe_stack_resources(StackName=stack_id) except ClientError as e: if optional and e.response['Error']['Code'] == 'ValidationError': return {} message = e.message if e.response['Error']['Code'] == 'ValidationError': message += ' Make sure the AWS credentials you are using have access to the project\'s resources.' raise HandledError('Could not get stack {} resource data. {}'.format( util.get_stack_name_from_arn(stack_id), message)) resource_descriptions = {} for entry in res['StackResources']: resource_descriptions[entry['LogicalResourceId']] = entry if recursive and entry['ResourceType'] == 'AWS::CloudFormation::Stack': physical_resource_id = entry.get('PhysicalResourceId', None) if physical_resource_id is not None: logical_resource_id = entry['LogicalResourceId'] nested_map = self.describe_resources(physical_resource_id) for k,v in nested_map.iteritems(): resource_descriptions[entry['LogicalResourceId'] + '.' + k] = v elif entry['ResourceType'] == 'Custom::CognitoUserPool': # User Pools require extra information (client id/secret) resource_descriptions[entry['LogicalResourceId']]['UserPoolClients'] = [] idp = self.context.aws.client('cognito-idp', region=region) pool_id = entry.get('PhysicalResourceId', None) # Lookup client IDs if the pool ID is valid. Valid pool ids must contain an underscore. # CloudFormation initializes the physical ID to a UUID without an underscore before the resource is created. # If the pool creation doesn't happen or it fails, the physical ID isn't updated to a valid value. if pool_id is not None and pool_id.find('_') >= 0: try: client_list = idp.list_user_pool_clients(UserPoolId=pool_id, MaxResults=60)['UserPoolClients'] except ClientError as e: client_list = {} if e.response['Error']['Code'] == 'ResourceNotFoundException': continue collected_details = {} for client in client_list: client_name = client['ClientName'] client_id = client['ClientId'] client_description = idp.describe_user_pool_client(UserPoolId=pool_id, ClientId=client_id)['UserPoolClient'] collected_details[client_name] = { 'ClientId': client_id } resource_descriptions[entry['LogicalResourceId']]['UserPoolClients'] = collected_details return resource_descriptions
def list_aws_resources(context, args): if args.region != None: region = args.region elif context.config.project_stack_id: region = util.get_region_from_arn(context.config.project_stack_id) else: raise HandledError('Region is required.') resource_importer = importer_generator(args.type, region, context) resource_importer.list_resources(context)
def add_resource(self, resource_name, resource_group, arn, download, context): ResourceImporter.add_resource(self, resource_name, resource_group, arn, download, context) #Import the related Lambda functions for lambda_dependency in self.lambda_dependencies: region = util.get_region_from_arn(lambda_dependency['arn']) lambda_importer = LambdaImporter(region, context) lambda_importer.add_resource(lambda_dependency['name'], resource_group, lambda_dependency['arn'], False, context) context.view.auto_added_resource(lambda_dependency['name']) context.view.import_resource(resource_name)
def __remove_log_group(self, stack_id, logical_resource_id): physical_lambda_id = self.get_physical_resource_id(stack_id, logical_resource_id, optional=True) if physical_lambda_id is not None: log_group_name = '/aws/lambda/{}'.format(physical_lambda_id) region = util.get_region_from_arn(stack_id) logs = self.context.aws.client('logs', region=region) try: logs.delete_log_group(logGroupName = log_group_name) except ClientError as e: if e.response['Error']['Code'] != 'ResourceNotFoundException': raise HandledError('Could not delete log group {}.'.format(log_group_name), e)
def get_current_template(self, stack_id): cf = self.context.aws.client('cloudformation', region=util.get_region_from_arn(stack_id)) self.context.view.getting_stack_template(stack_id) try: res = cf.get_template(StackName=stack_id) except ClientError as e: raise HandledError('Could not get stack {} template.'.format(stack_id), e) return res['TemplateBody']
def token_exchange_handler_id(self): token_exchange_handler_name = self.project_resources.get('PlayerAccessTokenExchange', {}).get('PhysicalResourceId', None) if token_exchange_handler_name is None: raise HandledError('The project stack {} is missing the required PlayerAccessTokenExchange resource. Has {} been modified to remove this resource?'.format( self.project_stack_id, self.project_template_aggregator.base_file_path)) return 'arn:aws:lambda:{region}:{account_id}:function:{function_name}'.format( region=util.get_region_from_arn(self.project_stack_id), account_id=util.get_account_id_from_arn(self.project_stack_id), function_name=token_exchange_handler_name)
def generate_templates(self, arn, resource_name, resource_group, context): #Check whether the resource is an SNS topic if arn.find(':sns:') == -1: raise HandledError( 'Types of the ARN and the resource do not match.') #Check whether the topic exists. if {'TopicArn': arn} not in self.queue_list['Topics']: raise HandledError( 'Resource with ARN {} does not exist.'.format(arn)) description = self.client.get_topic_attributes(TopicArn=arn) #Generate the template for the resource output = {} output['Type'] = 'AWS::SNS::Topic' output['Properties'] = {} #DisplayName is not required if description['Attributes'].get('DisplayName'): output['Properties']['DisplayName'] = description[ 'Attributes'].get('DisplayName') #Subscription is not required subscriptions = [] depends_on = [] count = 0 for subscription in self.client.list_subscriptions_by_topic( TopicArn=arn)['Subscriptions']: subscriptions.append({ 'Endpoint': subscription['Endpoint'], 'Protocol': subscription['Protocol'] }) #Get the related lambda functions if subscription['Protocol'] == 'lambda': function_name = resource_name + 'AutoAddedLambdaFunction' + str( count) self.lambda_dependencies.append({ 'arn': subscription['Endpoint'], 'name': function_name }) depends_on.extend( [function_name, function_name + 'Configuration']) region = util.get_region_from_arn(subscription['Endpoint']) original_name = subscription['Endpoint'].split(':')[-1] lambda_importer = LambdaImporter(region, context) self.function_accesses.append( lambda_importer.get_permissions(original_name, function_name, context, region)) count = count + 1 if subscriptions: output['Properties']['Subscription'] = subscriptions if depends_on: output['DependsOn'] = depends_on self.templates.append({resource_name: output}) return self.templates
def get_stack_status(self, stack_id): if stack_id is not None: cf = self.context.aws.client('cloudformation', region=util.get_region_from_arn(stack_id)) try: res = cf.describe_stacks(StackName=stack_id) for summary in res['Stacks']: return summary['StackStatus'] except ClientError as e: if e.response['Error']['Code'] != 'ValidationError': # does not exist or can't access it raise e return None
def _import_related_resources(self, service_type, service_dependencies, resource_group, context): policy_arguments = {'sqs': {'policy_type': 'AWS::SQS::QueuePolicy', 'action': 'SQS:SendMessage', 'service_type': 'Queues'}, 'sns': {'policy_type': 'AWS::SNS::TopicPolicy', 'action': 'SNS:Publish', 'service_type': 'Topics'}, 'lambda': {'policy_type': 'AWS::Lambda::Permission', 'action': 'lambda:InvokeFunction', 'service_type': 'Lambda'}} for service_dependency in service_dependencies: #Create an importer according to the resource type region = util.get_region_from_arn(service_dependency['arn']) service_importer = self._generate_importer(service_type, region, context) #Generate the policy and add it to the template policy = self._generate_policy(service_dependency['name'], policy_arguments[service_type]['action'], policy_arguments[service_type]['policy_type'], policy_arguments[service_type]['service_type']) service_importer.templates.append({service_dependency['name'] + 'Permission': policy}) #Import the related resource to the resource group service_importer.add_resource(service_dependency['name'], resource_group, service_dependency['arn'], False, context) context.view.auto_added_resource(service_dependency['name'])
def get_current_parameters(self, stack_id): cf = self.context.aws.client('cloudformation', region=util.get_region_from_arn(stack_id)) self.context.view.describing_stack(stack_id) try: res = cf.describe_stacks(StackName=stack_id) except ClientError as e: raise HandledError('Could not get stack {} description.'.format(stack_id), e) stack_description = res['Stacks'][0]; parameter_list = stack_description['Parameters'] parameter_map = { p['ParameterKey']:p['ParameterValue'] for p in parameter_list } return parameter_map
def get_resource_arn(self, stack_id, logical_resource_id): cf = self.context.aws.client('cloudformation', region=util.get_region_from_arn(stack_id)) try: res = cf.describe_stack_resource( StackName=stack_id, LogicalResourceId=logical_resource_id) except ClientError as e: if optional and e.response['Error']['Code'] == 'ValidationError': return None raise HandledError('Could not get the id for the {} resource from the {} stack.'.format( logical_resource_id, stack_id ), e) resource_name = res['StackResourceDetail']['PhysicalResourceId'] resource_type = res['StackResourceDetail']['ResourceType'] type_definitions = self.context.resource_types.get_type_definitions_for_stack_id(stack_id) return aws_utils.get_resource_arn(type_definitions, stack_id, resource_type, resource_name, True)
def __upload_lambda_code(context, stack_id, uploader, function_name, keep): # Create a client representing the lambda resource resource_region = util.get_region_from_arn(stack_id) client = context.aws.client('lambda', region=resource_region) #Get the resources description in the stack resources = context.stack.describe_resources(stack_id) lambda_function_descriptions = [] if function_name: description = resources.get(function_name, None) if description is not None and description[ 'ResourceType'] == 'AWS::Lambda::Function': lambda_function_descriptions.append(description) else: raise HandledError( 'Lambda function {} does not exist.'.format(function_name)) else: #If the function name isn't given, find the descriptions for all the Lambda functions for logical_name, description in resources.iteritems(): if description['ResourceType'] == 'AWS::Lambda::Function': lambda_function_descriptions.append(description) for lambda_function_description in lambda_function_descriptions: # get settings content settings_path, settings_content = __get_settings_content( context, client, lambda_function_description) aggregated_content = {} if settings_path: aggregated_content[settings_path] = settings_content # zip and send it to s3 in preparation for lambdas function_name = lambda_function_description['LogicalResourceId'] key = uploader.upload_lambda_function_code( function_name, function_runtime="python2.7", aggregated_content=aggregated_content, keep=keep) # update the lambda function client.update_function_code( FunctionName=lambda_function_description['PhysicalResourceId'], S3Bucket=uploader.bucket, S3Key=key)
def import_resource(context, args): if args.type != None: type = args.type else: try: type = args.arn.split(':')[2] except IndexError: raise HandledError('Invalid ARN {}.'.format(args.arn)) validationInfo = context.config.validate_resource(type, 'name', args.resource_name) if validationInfo['isValid'] is False: raise HandledError('Invalid resource name: {}'.format( validationInfo['help'])) region = util.get_region_from_arn(args.arn) resource_importer = importer_generator(type, region, context) resource_importer.add_resource(args.resource_name, args.resource_group, args.arn, args.download, context)
def __init__(self, context, stack_id, operation): self.context = context self.stack_id = stack_id self.stack_name = util.get_stack_name_from_arn(stack_id) self.operation = operation self.events_seen = {} self.success_status = operation + '_COMPLETE' self.finished_status = [ self.success_status, operation + '_FAILED', operation + '_ROLLBACK_COMPLETE', operation + '_ROLLBACK_FAILED', context.stack.STATUS_ROLLBACK_COMPLETE, context.stack.STATUS_ROLLBACK_FAILED ] self.client = self.context.aws.client('cloudformation', region=util.get_region_from_arn( self.stack_id)) self.client.verbose = False self.start_nested_stack_status = [ context.stack.STATUS_UPDATE_IN_PROGRESS, context.stack.STATUS_CREATE_IN_PROGRESS, context.stack.STATUS_DELETE_IN_PROGRESS ] self.end_nested_stack_status = [ context.stack.STATUS_UPDATE_COMPLETE, context.stack.STATUS_UPDATE_FAILED, context.stack.STATUS_CREATE_COMPLETE, context.stack.STATUS_CREATE_FAILED, context.stack.STATUS_DELETE_COMPLETE, context.stack.STATUS_DELETE_FAILED, context.stack.STATUS_ROLLBACK_COMPLETE, context.stack.STATUS_ROLLBACK_FAILED ] self.monitored_stacks = [stack_id] if operation != 'CREATE': self.__load_existing_events()
def get_function_log(context, args): # Assume role explicitly because we don't read any project config, and # that is what usually triggers it (project config must be read before # assuming the role). context.config.assume_role() project_stack_id = context.config.project_stack_id if not project_stack_id: project_stack_id = context.config.get_pending_project_stack_id() if not project_stack_id: raise HandledError('A project stack must be created first.') if args.deployment and args.resource_group: target_stack_id = context.config.get_resource_group_stack_id( args.deployment, args.resource_group) elif args.deployment or args.resource_group: raise HandledError( 'Both the --deployment option and --resource-group must be provided if either is provided.' ) else: target_stack_id = project_stack_id function_id = context.stack.get_physical_resource_id( target_stack_id, args.function) log_group_name = '/aws/lambda/{}'.format(function_id) region = util.get_region_from_arn(target_stack_id) logs = context.aws.client('logs', region=region) if args.log_stream_name: limit = 50 else: limit = 1 log_stream_name = None try: res = logs.describe_log_streams(logGroupName=log_group_name, orderBy='LastEventTime', descending=True, limit=limit) except ClientError as e: if e.response['Error']['Code'] == 'ResourceNotFoundException': raise HandledError('No logs found.') raise e for log_stream in res['logStreams']: # partial log stream name matches are ok if not args.log_stream_name or args.log_stream_name in log_stream[ 'logStreamName']: log_stream_name = log_stream['logStreamName'] break if not log_stream_name: if args.log_stream_name: raise HandledError( 'No log stream name with {} found in the first {} log streams.' .format(args.log_stream_name, limit)) else: raise HandledError('No log stream was found.') res = logs.get_log_events(logGroupName=log_group_name, logStreamName=log_stream_name, startFromHead=True) while res['events']: for event in res['events']: time_stamp = datetime.datetime.fromtimestamp( event['timestamp'] / 1000.0).strftime("%Y-%m-%d %H:%M:%S") message = event['message'][:-1] context.view.log_event(time_stamp, message) nextForwardToken = res.get('nextForwardToken', None) if not nextForwardToken: break res = logs.get_log_events(logGroupName=log_group_name, logStreamName=log_stream_name, startFromHead=True, nextToken=nextForwardToken)
def __get_mappings(context, deployment_name, exclusions, role, args=None): mappings = {} deployment_stack_id = context.config.get_deployment_stack_id( deployment_name) region = util.get_region_from_arn(deployment_stack_id) account_id = util.get_account_id_from_arn(deployment_stack_id) context.view.retrieving_mappings(deployment_name, deployment_stack_id, role) player_accessible_arns = __get_player_accessible_arns( context, deployment_name, role, args) resources = context.stack.describe_resources(deployment_stack_id, recursive=True) for logical_name, description in resources.iteritems(): if logical_name in exclusions: continue physical_resource_id = description.get('PhysicalResourceId') if physical_resource_id: if __is_user_pool_resource(description): mappings[logical_name] = { 'PhysicalResourceId': physical_resource_id, 'ResourceType': description['ResourceType'], 'UserPoolClients': description[ 'UserPoolClients'] # include client id / secret } else: resource_arn = util.get_resource_arn( description['StackId'], description['ResourceType'], physical_resource_id, optional=True, context=context) if resource_arn and resource_arn in player_accessible_arns: if __is_service_api_resource(description): __add_service_api_mapping(context, logical_name, description, mappings) else: mappings[logical_name] = { 'PhysicalResourceId': physical_resource_id, 'ResourceType': description['ResourceType'] } k_exchange_token_handler_name = 'PlayerAccessTokenExchange' if k_exchange_token_handler_name not in exclusions: login_exchange_handler = context.stack.get_physical_resource_id( context.config.project_stack_id, k_exchange_token_handler_name) if login_exchange_handler != None: mappings[k_exchange_token_handler_name] = { 'PhysicalResourceId': login_exchange_handler, 'ResourceType': 'AWS::Lambda::Function' } #now let's grab the player identity stuff and make sure we add it to the mappings. access_stack_arn = context.config.get_deployment_access_stack_id( deployment_name, True if args is not None and args.is_gui else False) if access_stack_arn != None: access_resources = context.stack.describe_resources(access_stack_arn, recursive=True) for logical_name, description in access_resources.iteritems(): if description['ResourceType'] == 'Custom::CognitoIdentityPool': if logical_name in exclusions: continue mappings[logical_name] = { 'PhysicalResourceId': description['PhysicalResourceId'], 'ResourceType': description['ResourceType'] } if 'region' not in exclusions: mappings['region'] = { 'PhysicalResourceId': region, 'ResourceType': 'Configuration' } if 'account_id' not in exclusions: mappings['account_id'] = { 'PhysicalResourceId': account_id, 'ResourceType': 'Configuration' } return mappings
def __get_mappings(context, deployment_name, exclusions, role, args=None): iam = context.aws.client('iam') mappings = {} deployment_stack_id = context.config.get_deployment_stack_id(deployment_name) region = util.get_region_from_arn(deployment_stack_id) account_id = util.get_account_id_from_arn(deployment_stack_id) # Assemble and add the iam role ARN to the server mappings deployment_access_stack_id = context.config.get_deployment_access_stack_id(deployment_name, True if args is not None and args.is_gui else False) server_role_id = context.stack.get_physical_resource_id(deployment_access_stack_id, role, optional=True) server_role_arn = iam.get_role(RoleName=server_role_id).get('Role', {}).get('Arn', '') context.view.retrieving_mappings(deployment_name, deployment_stack_id, role) player_accessible_arns = __get_player_accessible_arns(context, deployment_name, role, args) lambda_client = context.aws.client('lambda', region=region) resources = context.stack.describe_resources(deployment_stack_id, recursive=True) for logical_name, description in resources.iteritems(): if logical_name in exclusions: continue physical_resource_id = custom_resource_utils.get_embedded_physical_id(description.get('PhysicalResourceId')) if physical_resource_id: if __is_user_pool_resource(description): mappings[logical_name] = { 'PhysicalResourceId': physical_resource_id, 'ResourceType': description['ResourceType'], 'UserPoolClients': description['UserPoolClients'] # include client id / secret } else: stack_id = description['StackId'] s3_client = context.aws.client('s3') type_definitions = context.resource_types.get_type_definitions_for_stack_id(stack_id, s3_client) resource_arn = aws_utils.get_resource_arn( type_definitions=type_definitions, stack_arn=stack_id, resource_type=description['ResourceType'], physical_id=physical_resource_id, optional = True, lambda_client=lambda_client ) if resource_arn and resource_arn in player_accessible_arns: if __is_service_api_resource(description): __add_service_api_mapping(context, logical_name, description, mappings) else: mappings[logical_name] = { 'PhysicalResourceId': physical_resource_id, 'ResourceType': description['ResourceType'] } k_exchange_token_handler_name = 'PlayerAccessTokenExchange' if k_exchange_token_handler_name not in exclusions: login_exchange_handler = context.stack.get_physical_resource_id(context.config.project_stack_id, k_exchange_token_handler_name) if login_exchange_handler != None: mappings[k_exchange_token_handler_name] = { 'PhysicalResourceId': login_exchange_handler, 'ResourceType': 'AWS::Lambda::Function' } #now let's grab the player identity stuff and make sure we add it to the mappings. access_stack_arn = context.config.get_deployment_access_stack_id(deployment_name, True if args is not None and args.is_gui else False) if access_stack_arn != None: access_resources = context.stack.describe_resources(access_stack_arn, recursive=True) for logical_name, description in access_resources.iteritems(): if description['ResourceType'] == 'Custom::CognitoIdentityPool': if logical_name in exclusions: continue mappings[logical_name] = { 'PhysicalResourceId': custom_resource_utils. get_embedded_physical_id(description['PhysicalResourceId']), 'ResourceType': description['ResourceType'] } if 'region' not in exclusions: mappings['region'] = { 'PhysicalResourceId': region, 'ResourceType': 'Configuration' } if 'account_id' not in exclusions: mappings['account_id'] = { 'PhysicalResourceId': account_id, 'ResourceType': 'Configuration' } if 'server_role_arn' not in exclusions and role is not 'AuthenticatedPlayer': mappings['server_role_arn'] = { 'PhysicalResourceId': server_role_arn, 'ResourceType': 'Configuration' } return mappings
def clean_custom_resource_handlers(context, args): if context.config.project_stack_id is None: raise HandledError("Project stack does not exist.") context.view.deleting_custom_resource_lambdas() lambda_client = context.aws.client('lambda', region=util.get_region_from_arn( context.config.project_stack_id)) project_info = context.stack_info.manager.get_stack_info( context.config.project_stack_id) resource_types_used_versions = {} delete_count = 0 def add_resource_versions(stack_info): for resource_info in stack_info.resources: if resource_info.type.startswith("Custom::"): info = custom_resource_utils.get_custom_resource_info( resource_info.physical_id) if info.create_version: resource_types_used_versions.setdefault( resource_info.type, set()).add(info.create_version) metadata_version = resource_info.get_cloud_canvas_metadata( custom_resource_utils.METADATA_VERSION_TAG) if metadata_version: resource_types_used_versions[resource_info.type].add( metadata_version) # Add the resources from the project stack, the deployment stacks, and all the resource groups add_resource_versions(project_info) for deployment_info in project_info.deployments: add_resource_versions(deployment_info) for resource_group_info in deployment_info.resource_groups: add_resource_versions(resource_group_info) # Iterate over the custom resource types for resource_type_name, resource_type_info in project_info.resource_definitions.iteritems( ): if resource_type_info.handler_function: # Obtain a list of all versions of the function lambda_function_name = resource_type_info.get_custom_resource_lambda_function_name( ) versions = [] for result in aws_utils.paginate( lambda_client.list_versions_by_function, {'FunctionName': lambda_function_name}): versions.extend( [entry['Version'] for entry in result['Versions']]) # Walk through all versions older than the current version, and delete them if they are not in use assert (len(versions) >= 2) assert (versions[0] == "$LATEST") assert (int(versions[-1]) == max([int(x) for x in versions[1:]]) ) # Last entry should be greatest version in_use_versions = resource_types_used_versions.get( resource_type_name, set()) for version in versions[1:-1]: if version not in in_use_versions: context.view.deleting_lambda(lambda_function_name, version) lambda_client.delete_function( FunctionName=lambda_function_name, Qualifier=version) delete_count += 1 context.view.deleting_lambdas_completed(delete_count)
def create_stack(context, args): # Has the project been initialized? if not context.config.project_initialized: raise HandledError('The project has not been initialized.') # Does a deployment with that name already exist? if context.config.deployment_stack_exists(args.deployment): raise HandledError('The project already has a {} deployment.'.format(args.deployment)) # Does deployment-template.json include resource group from a gem which isn't enabled for the project? for resource_group_name in context.resource_groups.keys(): __check_resource_group_gem_status(context, resource_group_name) # Is the project settings file writable? context.config.validate_writable(context.config.local_project_settings_path) # Is the deployment name valid? util.validate_stack_name(args.deployment) # If there is no project default deployment, make this the project default deployment if context.config.project_default_deployment is None: args.make_project_default = True # If there is no release deployment, make this the release deployment if context.config.release_deployment is None: args.make_release_deployment = True # Need to handle situations where the deployment and/or access stack were # not successfully created on previous attempts. pending_deployment_stack_id = context.config.get_pending_deployment_stack_id(args.deployment) pending_deployment_access_stack_id = context.config.get_pending_deployment_access_stack_id(args.deployment) pending_deployment_stack_status = context.stack.get_stack_status(pending_deployment_stack_id) pending_deployment_access_stack_status = context.stack.get_stack_status(pending_deployment_access_stack_id) # Does a stack with the name already exist? It's ok if a previous attempt # at creation left a stack with this name behind, we'll deal with that later. deployment_stack_name = args.stack_name or context.config.get_default_deployment_stack_name(args.deployment) deployment_region = util.get_region_from_arn(context.config.project_stack_id) if pending_deployment_stack_id is None or deployment_stack_name != util.get_stack_name_from_arn(pending_deployment_stack_id): if context.stack.name_exists(deployment_stack_name, deployment_region): raise HandledError('An AWS Cloud Formation stack with the name {} already exists in region {}. Use the --stack-name option to provide a different name.'.format(deployment_stack_name, deployment_region)) # Resource group (and other) file write checks create_and_validate_writable_list(context) # Is it ok to use AWS? pending_resource_status = __get_pending_combined_resource_status(context, args.deployment) capabilities = context.stack.confirm_stack_operation( None, # stack id 'deployment {}'.format(args.deployment), args, pending_resource_status, ignore_resource_types = [ 'Custom::EmptyDeployment' ] ) # We have the following scenerios to deal with: # # 1) This is the first attempt to create the deployment, or previous attempts didn't # get as far as creating any stacks. # # 2) The previous attempt failed to create or update the deployment stack, which was # left in a ROLLBACK_COMPLETED, UPDATE_ROLLBACK_FAILED, or ROLLBACK_FAILED state. This # stack must be deleted and a new one created. # # 3) The previous attempt created the deployment stack but failed to create the access # stack, leaving it in the ROLLBACK_COMPLETED state. In this case we update the deployment # stack (to make sure it reflects any changes that may have been made), delete the access # stack and attempt to create a new one. # # 4) Both the deployment and access stacks were created successfully, but the pending # stack id properites in the config were not replaced with the non-pending properties # (this could happen if someone kills the client during the access stack creation # process, which then runs to a successful completion). In this case we update both # stacks to make sure they reflect any changes, then replace the "pending" stack id # properties. project_uploader = ProjectUploader(context) deployment_uploader = project_uploader.get_deployment_uploader(args.deployment) template_url = before_update(context, deployment_uploader) deployment_stack_parameters = __get_deployment_stack_parameters(context, args.deployment, uploader = deployment_uploader) # wait a bit for S3 to help insure that templates can be read by cloud formation time.sleep(constant.STACK_UPDATE_DELAY_TIME) try: if pending_deployment_stack_status not in [None, context.stack.STATUS_ROLLBACK_COMPLETE, context.stack.STATUS_DELETE_COMPLETE, context.stack.STATUS_UPDATE_ROLLBACK_FAILED, context.stack.STATUS_ROLLBACK_FAILED]: # case 3 or 4 - deployment stack was previously created successfully, update it context.stack.update( pending_deployment_stack_id, template_url, deployment_stack_parameters, capabilities = capabilities ) deployment_stack_id = pending_deployment_stack_id else: if pending_deployment_stack_status in [context.stack.STATUS_ROLLBACK_COMPLETE, context.stack.STATUS_ROLLBACK_FAILED, context.stack.STATUS_UPDATE_ROLLBACK_FAILED]: # case 2 - deployment stack failed to create previously, delete it context.stack.delete(pending_deployment_stack_id) # case 1 and 2 - deployment stack wasn't creatred previously or was just # deleted, attempt to create it deployment_stack_id = context.stack.create_using_url( deployment_stack_name, template_url, deployment_region, deployment_stack_parameters, created_callback=lambda id: context.config.set_pending_deployment_stack_id(args.deployment, id), capabilities = capabilities) # Now create or update the access stack... context.view.processing_template('{} deployment'.format(args.deployment)) access_template_url = deployment_uploader.upload_content( constant.DEPLOYMENT_ACCESS_TEMPLATE_FILENAME, json.dumps(context.config.deployment_access_template_aggregator.effective_template, indent=4, sort_keys=True), 'processed deployment access temmplate') access_stack_parameters = __get_access_stack_parameters( context, args.deployment, deployment_stack_id = deployment_stack_id, uploader = deployment_uploader ) if pending_deployment_access_stack_status not in [None, context.stack.STATUS_ROLLBACK_COMPLETE, context.stack.STATUS_DELETE_COMPLETE]: # case 4 - access stack was previously created successfully but the pending # stack id properties were not replaced. Update the stack. context.stack.update( pending_deployment_access_stack_id, access_template_url, deployment_stack_parameters, capabilities = capabilities ) deployment_access_stack_id = pending_deployment_access_stack_id else: if pending_deployment_access_stack_status == context.stack.STATUS_ROLLBACK_COMPLETE: # case 3 - access stack failed to create previously, delete it context.stack.delete(pending_deployment_access_stack_id) # case 1 or 3 - access stack wasn't created before, or was just deleted. Attempt # to create. deployment_access_stack_name = deployment_stack_name + '-Access' deployment_access_stack_id = context.stack.create_using_url( deployment_access_stack_name, access_template_url, deployment_region, parameters = access_stack_parameters, created_callback=lambda id: context.config.set_pending_deployment_access_stack_id(args.deployment, id), capabilities = capabilities) except: context.config.force_gui_refresh() raise context.config.force_gui_refresh() context.config.finalize_deployment_stack_ids(args.deployment) context.view.deployment_stack_created(args.deployment, deployment_stack_id, deployment_access_stack_id) # Should the new deployment become the project default deployment or the release deployment? if args.make_project_default: context.config.set_project_default_deployment(args.deployment) mappings.update(context, util.Args()) context.view.default_deployment(context.config.user_default_deployment, context.config.project_default_deployment) if args.make_release_deployment: context.config.set_release_deployment(args.deployment) temp_args = util.Args() temp_args.release = True mappings.update(context, temp_args) context.view.release_deployment(context.config.release_deployment) after_update(context, deployment_uploader)