def delete_bucket(self, bucket_name, *args, **kwargs): bucket_name = s3_listener.normalize_bucket_name(bucket_name) try: s3_listener.remove_bucket_notification(bucket_name) return delete_bucket_orig(bucket_name, *args, **kwargs) except s3_exceptions.MissingBucket: pass
def add_defaults(resource, stack_name: str): role_name = resource.get("Properties", {}).get("BucketName") if not role_name: resource["Properties"][ "BucketName"] = s3_listener.normalize_bucket_name( generate_default_name(stack_name, resource["LogicalResourceId"]))
def Bucket_create_from_cloudformation_json(cls, resource_name, cloudformation_json, region_name): result = create_from_cloudformation_json_orig(resource_name, cloudformation_json, region_name) # remove the bucket from the backend, as our template_deployer will take care of creating the resource resource_name = s3_listener.normalize_bucket_name(resource_name) s3_models.s3_backend.buckets.pop(resource_name) return result
def convert_s3_to_local_url(url): if not is_real_s3_url(url): return url url_parsed = urlparse.urlparse(url) path = url_parsed.path bucket_name, _, key = path.lstrip('/').replace('//', '/').partition('/') # note: make sure to normalize the bucket name here! bucket_name = s3_listener.normalize_bucket_name(bucket_name) local_url = '%s/%s/%s' % (config.TEST_S3_URL, bucket_name, key) return local_url
def convert_s3_to_local_url(url): url_parsed = urlparse.urlparse(url) path = url_parsed.path headers = CaseInsensitiveDict({"Host": url_parsed.netloc}) bucket_name = s3_utils.extract_bucket_name(headers, path) key_name = s3_utils.extract_key_name(headers, path) # note: make sure to normalize the bucket name here! bucket_name = s3_listener.normalize_bucket_name(bucket_name) local_url = "%s/%s/%s" % (config.TEST_S3_URL, bucket_name, key_name) return local_url
def convert_s3_to_local_url(url): url_parsed = urlparse(url) path = url_parsed.path headers = CaseInsensitiveDict({"Host": url_parsed.netloc}) bucket_name = s3_utils.extract_bucket_name(headers, path) key_name = s3_utils.extract_key_name(headers, path) # note: make sure to normalize the bucket name here! bucket_name = s3_listener.normalize_bucket_name(bucket_name) local_url = f"{config.service_url('s3')}/{bucket_name}/{key_name}" return local_url
def get_resource_name(resource): res_type = get_resource_type(resource) properties = resource.get('Properties') or {} name = properties.get('Name') if name: return name # try to extract name from attributes if res_type == 'S3::Bucket': name = s3_listener.normalize_bucket_name(properties.get('BucketName')) elif res_type == 'SQS::Queue': name = properties.get('QueueName') elif res_type == 'Cognito::UserPool': name = properties.get('PoolName') elif res_type == 'StepFunctions::StateMachine': name = properties.get('StateMachineName') elif res_type == 'IAM::Role': name = properties.get('RoleName') else: LOG.warning('Unable to extract name for resource type "%s"' % res_type) return name
def delete_bucket(self, bucket_name, *args, **kwargs): bucket_name = s3_listener.normalize_bucket_name(bucket_name) s3_listener.remove_bucket_notification(bucket_name) return delete_bucket_orig(bucket_name, *args, **kwargs)
def get_bucket(self, bucket_name, *args, **kwargs): bucket_name = s3_listener.normalize_bucket_name(bucket_name) if bucket_name == config.BUCKET_MARKER_LOCAL: return None return get_bucket_orig(bucket_name, *args, **kwargs)
def create_bucket(self, bucket_name, region_name, *args, **kwargs): bucket_name = s3_listener.normalize_bucket_name(bucket_name) return create_bucket_orig(bucket_name, region_name, *args, **kwargs)
def delete_bucket(self, bucket_name, *args, **kwargs): bucket_name = s3_listener.normalize_bucket_name(bucket_name) return delete_bucket_orig(bucket_name, *args, **kwargs)
def retrieve_resource_details(resource_id, resource_status, resources, stack_name): resource = resources.get(resource_id) resource_id = resource_status.get('PhysicalResourceId') or resource_id if not resource: resource = {} resource_type = get_resource_type(resource) resource_props = resource.get('Properties') try: if resource_type == 'Lambda::Function': resource_props['FunctionName'] = (resource_props.get('FunctionName') or '{}-lambda-{}'.format(stack_name[:45], common.short_uid())) resource_id = resource_props['FunctionName'] if resource else resource_id return aws_stack.connect_to_service('lambda').get_function(FunctionName=resource_id) elif resource_type == 'Lambda::Version': name = resource_props.get('FunctionName') if not name: return None func_name = aws_stack.lambda_function_name(name) func_version = name.split(':')[7] if len(name.split(':')) > 7 else '$LATEST' versions = aws_stack.connect_to_service('lambda').list_versions_by_function(FunctionName=func_name) return ([v for v in versions['Versions'] if v['Version'] == func_version] or [None])[0] elif resource_type == 'Lambda::EventSourceMapping': resource_id = resource_props['FunctionName'] if resource else resource_id source_arn = resource_props.get('EventSourceArn') resource_id = resolve_refs_recursively(stack_name, resource_id, resources) source_arn = resolve_refs_recursively(stack_name, source_arn, resources) if not resource_id or not source_arn: raise Exception('ResourceNotFound') mappings = aws_stack.connect_to_service('lambda').list_event_source_mappings( FunctionName=resource_id, EventSourceArn=source_arn) mapping = list(filter(lambda m: m['EventSourceArn'] == source_arn and m['FunctionArn'] == aws_stack.lambda_function_arn(resource_id), mappings['EventSourceMappings'])) if not mapping: raise Exception('ResourceNotFound') return mapping[0] elif resource_type == 'IAM::Role': role_name = resource_props.get('RoleName') or resource_id role_name = resolve_refs_recursively(stack_name, role_name, resources) return aws_stack.connect_to_service('iam').get_role(RoleName=role_name)['Role'] elif resource_type == 'DynamoDB::Table': table_name = resource_props.get('TableName') or resource_id table_name = resolve_refs_recursively(stack_name, table_name, resources) return aws_stack.connect_to_service('dynamodb').describe_table(TableName=table_name) elif resource_type == 'ApiGateway::RestApi': apis = aws_stack.connect_to_service('apigateway').get_rest_apis()['items'] api_name = resource_props['Name'] if resource else resource_id api_name = resolve_refs_recursively(stack_name, api_name, resources) result = list(filter(lambda api: api['name'] == api_name, apis)) return result[0] if result else None elif resource_type == 'ApiGateway::Resource': api_id = resource_props['RestApiId'] if resource else resource_id api_id = resolve_refs_recursively(stack_name, api_id, resources) parent_id = resolve_refs_recursively(stack_name, resource_props['ParentId'], resources) if not api_id or not parent_id: return None api_resources = aws_stack.connect_to_service('apigateway').get_resources(restApiId=api_id)['items'] target_resource = list(filter(lambda res: res.get('parentId') == parent_id and res['pathPart'] == resource_props['PathPart'], api_resources)) if not target_resource: return None path = aws_stack.get_apigateway_path_for_resource(api_id, target_resource[0]['id'], resources=api_resources) result = list(filter(lambda res: res['path'] == path, api_resources)) return result[0] if result else None elif resource_type == 'ApiGateway::Deployment': api_id = resource_props['RestApiId'] if resource else resource_id api_id = resolve_refs_recursively(stack_name, api_id, resources) if not api_id: return None result = aws_stack.connect_to_service('apigateway').get_deployments(restApiId=api_id)['items'] # TODO possibly filter results by stage name or other criteria return result[0] if result else None elif resource_type == 'ApiGateway::Method': api_id = resolve_refs_recursively(stack_name, resource_props['RestApiId'], resources) res_id = resolve_refs_recursively(stack_name, resource_props['ResourceId'], resources) if not api_id or not res_id: return None res_obj = aws_stack.connect_to_service('apigateway').get_resource(restApiId=api_id, resourceId=res_id) match = [v for (k, v) in res_obj['resourceMethods'].items() if resource_props['HttpMethod'] in (v.get('httpMethod'), k)] int_props = resource_props.get('Integration') if int_props: match = [m for m in match if m.get('methodIntegration', {}).get('type') == int_props.get('Type') and m.get('methodIntegration', {}).get('httpMethod') == int_props.get('IntegrationHttpMethod')] return any(match) or None elif resource_type == 'ApiGateway::GatewayResponse': api_id = resolve_refs_recursively(stack_name, resource_props['RestApiId'], resources) client = aws_stack.connect_to_service('apigateway') result = client.get_gateway_response(restApiId=api_id, responseType=resource_props['ResponseType']) return result if 'responseType' in result else None elif resource_type == 'SQS::Queue': sqs_client = aws_stack.connect_to_service('sqs') queues = sqs_client.list_queues() result = list(filter(lambda item: # TODO possibly find a better way to compare resource_id with queue URLs item.endswith('/%s' % resource_id), queues.get('QueueUrls', []))) if not result: return None result = sqs_client.get_queue_attributes(QueueUrl=result[0], AttributeNames=['All'])['Attributes'] result['Arn'] = result['QueueArn'] return result elif resource_type == 'SNS::Topic': topics = aws_stack.connect_to_service('sns').list_topics() result = list(filter(lambda item: item['TopicArn'] == resource_id, topics.get('Topics', []))) return result[0] if result else None elif resource_type == 'S3::Bucket': bucket_name = resource_props.get('BucketName') or resource_id bucket_name = resolve_refs_recursively(stack_name, bucket_name, resources) bucket_name = s3_listener.normalize_bucket_name(bucket_name) s3_client = aws_stack.connect_to_service('s3') response = s3_client.get_bucket_location(Bucket=bucket_name) notifs = resource_props.get('NotificationConfiguration') if not response or not notifs: return response configs = s3_client.get_bucket_notification_configuration(Bucket=bucket_name) has_notifs = (configs.get('TopicConfigurations') or configs.get('QueueConfigurations') or configs.get('LambdaFunctionConfigurations')) if notifs and not has_notifs: return None return response elif resource_type == 'S3::BucketPolicy': bucket_name = resource_props.get('Bucket') or resource_id bucket_name = resolve_refs_recursively(stack_name, bucket_name, resources) return aws_stack.connect_to_service('s3').get_bucket_policy(Bucket=bucket_name) elif resource_type == 'Logs::LogGroup': # TODO implement raise Exception('ResourceNotFound') elif resource_type == 'Kinesis::Stream': stream_name = resolve_refs_recursively(stack_name, resource_props['Name'], resources) result = aws_stack.connect_to_service('kinesis').describe_stream(StreamName=stream_name) return result elif resource_type == 'StepFunctions::StateMachine': sm_name = resource_props.get('StateMachineName') or resource_id sm_name = resolve_refs_recursively(stack_name, sm_name, resources) sfn_client = aws_stack.connect_to_service('stepfunctions') state_machines = sfn_client.list_state_machines()['stateMachines'] sm_arn = [m['stateMachineArn'] for m in state_machines if m['name'] == sm_name] if not sm_arn: return None result = sfn_client.describe_state_machine(stateMachineArn=sm_arn[0]) return result elif resource_type == 'StepFunctions::Activity': act_name = resource_props.get('Name') or resource_id act_name = resolve_refs_recursively(stack_name, act_name, resources) sfn_client = aws_stack.connect_to_service('stepfunctions') activities = sfn_client.list_activities()['activities'] result = [a['activityArn'] for a in activities if a['name'] == act_name] if not result: return None return result[0] if is_deployable_resource(resource): LOG.warning('Unexpected resource type %s when resolving references of resource %s: %s' % (resource_type, resource_id, resource)) except Exception as e: check_not_found_exception(e, resource_type, resource, resource_status) return None