def test_get_cloudformation_template_vpc_config(security_group_ids, subnet_ids): config = Config() if security_group_ids == "fake_none": config["aws"]["security_group_ids"] = None elif security_group_ids is not None: config["aws"]["security_group_ids"] = security_group_ids if subnet_ids == "fake_none": config["aws"]["subnet_ids"] = None elif subnet_ids is not None: config["aws"]["subnet_ids"] = subnet_ids cloudformation_template = _get_cloudformation_template_with_test_setup( config=config, task_kwargs=dict()) function_resource = cloudformation_template.resources[ "TestsUnitTestDeployerSayHello"] if security_group_ids in [None, "fake_none", [] ] and subnet_ids in [None, "fake_none", []]: assert "VpcConfig" not in function_resource.to_dict()["Properties"] else: expected_vpc_config = awslambda.VPCConfig() if security_group_ids in [None, "fake_none"]: expected_vpc_config.SecurityGroupIds = list() else: expected_vpc_config.SecurityGroupIds = security_group_ids if subnet_ids in [None, "fake_none"]: expected_vpc_config.SubnetIds = list() else: expected_vpc_config.SubnetIds = subnet_ids assert function_resource.VpcConfig.to_dict( ) == expected_vpc_config.to_dict()
def setup(self, key, name): import_name = key['ImportName'] if 'ImportName' in key else name if 'Code' not in key: self.Code = lbd.Code() try: self.Code.ZipFile = Join('', import_lambda(import_name)) except: self.Code.ZipFile = ( 'print("Use Code parameter in yaml ' f'or create file lib/lambas/{import_name}.code ' 'with lambda code to execute.")') auto_get_props(self, key, recurse=True) self.FunctionName = Sub('${AWS::StackName}-${EnvRole}-%s' % name) if 'Handler' not in key: self.Handler = 'index.lambda_handler' self.Role = GetAtt(f'RoleLambda{name}', 'Arn') if all(k in key for k in ['SecurityGroupIds', 'SubnetIds']): self.VpcConfig = lbd.VPCConfig('') auto_get_props(self.VpcConfig, key, mapname=self.title) # Variables - skip if atEdge - always set Env, EnvRole try: key['AtEdge'] except Exception as e: self.Environment = lbd.Environment(Variables={ 'Env': Ref('EnvShort'), 'EnvRole': Ref('EnvRole'), }) if 'Variables' in key: self.Environment.Variables.update({ varname: get_endvalue(f'{self.title}Variables{varname}') for varname in key['Variables'] })
def vpc_config(self): vpc_config = self.get_variables()["VpcConfig"] config = NoValue if vpc_config: if isinstance(vpc_config['SubnetIds'], str): vpc_config['SubnetIds'] = vpc_config['SubnetIds'].split(',') config = awslambda.VPCConfig(**vpc_config) return config
def _create_lambda_function(self, code_property, task_function, role, runtime): # type: (awslambda.Code, TaskFunction, iam.Role, str) -> None # TODO add support for versioning function_handler = self._get_function_handler_string( task_function.func) title = self._get_function_logical_id(function_handler) function_kwargs = { "Code": code_property, "Handler": function_handler, "Role": GetAtt(role, "Arn"), "Runtime": runtime, "Environment": awslambda.Environment( Variables=task_function.environment_variables), "Tags": troposphere.Tags(task_function.tags), } if self._app.kms_key_arn is not None and len( self._app.kms_key_arn) > 0: function_kwargs["KmsKeyArn"] = self._app.kms_key_arn if len(self._app.subnet_ids) > 0 or len( self._app.security_group_ids) > 0: function_kwargs["VpcConfig"] = awslambda.VPCConfig( SubnetIds=self._app.subnet_ids, SecurityGroupIds=self._app.security_group_ids) if task_function.memory is not None: function_kwargs["MemorySize"] = task_function.memory if task_function.timeout is not None: function_kwargs["Timeout"] = task_function.timeout if task_function.activate_tracing: function_kwargs["TracingConfig"] = awslambda.TracingConfig( Mode="Active") # TODO specify the function name? Maybe we don't care? return awslambda.Function(title, **function_kwargs)
def register_resources_template(self, template): """Register the lambda Function into the troposphere template. If this function requires a custom Role, register it too.""" role = self.get_role() depends_on = [] if isinstance(role, iam.Role): template.add_resource(role) depends_on.append(role.name) role = troposphere.GetAtt(role, 'Arn') template.add_parameter( troposphere.Parameter( utils.valid_cloudformation_name(self.name, "s3version"), Type="String", )) extra = {} if self.settings.get('vpc'): vpc = self.project.get_resource('vpc::{}'.format( self.settings.get('vpc'))) if isinstance(vpc.settings['security-groups'], troposphere.Ref): vpc.settings[ 'security-groups']._type = 'List<AWS::EC2::SecurityGroup::Id>' if isinstance(vpc.settings['subnet-ids'], troposphere.Ref): vpc.settings['subnet-ids']._type = 'List<AWS::EC2::Subnet::Id>' extra['VpcConfig'] = awslambda.VPCConfig( SecurityGroupIds=vpc.settings['security-groups'], SubnetIds=vpc.settings['subnet-ids']) function = template.add_resource( awslambda.Function(self.in_project_cf_name, DependsOn=depends_on, Code=awslambda.Code( S3Bucket=troposphere.Ref("CodeBucket"), S3Key=self.get_bucket_key(), S3ObjectVersion=troposphere.Ref( utils.valid_cloudformation_name( self.name, "s3version")), ), Description=self.settings.get( 'description', ''), Handler=self.get_handler(), MemorySize=self.get_memory(), Role=role, Runtime=self.get_runtime(), Timeout=self.get_timeout(), **extra)) lambda_version = 'lambda:contrib_lambdas:version' lambda_ref = troposphere.GetAtt(self.project.reference(lambda_version), 'Arn') if not self.in_project_name.startswith('lambda:contrib_lambdas:'): lambda_version = '{}:current'.format(lambda_version) lambda_ref = troposphere.Ref( self.project.reference(lambda_version)) version = template.add_resource( LambdaVersion.create_with( utils.valid_cloudformation_name(self.name, "Version"), DependsOn=[ self.project.reference(lambda_version), function.name ], lambda_arn=lambda_ref, FunctionName=troposphere.Ref(function), S3ObjectVersion=troposphere.Ref( utils.valid_cloudformation_name(self.name, "s3version")), )) alias = template.add_resource( awslambda.Alias( self.current_alias_cf_name, DependsOn=[version.name], FunctionName=troposphere.Ref(function), FunctionVersion=troposphere.GetAtt(version, "Version"), Name="current", )) if self._get_true_false('cli-output', 't'): template.add_output([ troposphere.Output( utils.valid_cloudformation_name("Clioutput", self.in_project_name), Value=troposphere.Ref(alias), ) ])
def vpc_config(self): vpc_config = self.get_variables()["VpcConfig"] config = NoValue if vpc_config: config = awslambda.VPCConfig(**vpc_config) return config
def build_template(self): deploy_alias = False t = self._init_template() self.jinja_env_vars() role_param = t.add_parameter( Parameter(self.role.output_role_arn(), Type='String')) bucket_ref = t.add_parameter( Parameter(self.s3_bucket.output_bucket_name(), Type='String')) if self._deploying: if not self.uploaded and self._bucket_name(): deploy_alias = True self._determine_code_versions() logger.info("S3 Key: {}".format(self.zip_name)) func = t.add_resource( awslambda.Function( '{}Function'.format(self.get_stack_name()), FunctionName=self.get_stack_name(), Handler=self.handler, MemorySize=self.memory, Timeout=self.timeout, Runtime=self.runtime, Role=Ref(role_param), Environment=awslambda.Environment(Variables=self.vars), Code=awslambda.Code(S3Bucket=Ref(bucket_ref), S3Key=self.zip_name))) if self.s3_version: func.Code.S3ObjectVersion = self.s3_version # vpc mode if self.vpc_stack is not None: if self.public_subnet: subnets = self.vpc_stack.output_public_subnets() else: subnets = self.vpc_stack.output_private_subnets() subnet_refs = [ Ref(utils.ensure_param(t, val, 'String')) for val in subnets ] func.VpcConfig = awslambda.VPCConfig(SubnetIds=subnet_refs, SecurityGroupIds=[]) for sg in self.security_groups: sg_ref = Ref( utils.ensure_param(t, sg.output_security_group(), 'String')) func.VpcConfig.SecurityGroupIds.append(sg_ref) if deploy_alias is True: for v in self.aliases: t.add_resource( awslambda.Alias('{}Alias'.format(v['name']), FunctionName=Ref(func), Name=v['name'], FunctionVersion=v['version'])) if len(self.event_sources) > 0: for s in self.event_sources: src = s['src'] args = s['args'] if isinstance(src, dynamodb.DynamoTable): p = t.add_parameter( Parameter(src.output_stream(), Type='String')) t.add_resource( awslambda.EventSourceMapping( 'LambdaDynamo{}'.format(src.name), FunctionName=Ref(func), EventSourceArn=Ref(p), StartingPosition='LATEST')) if isinstance(src, sqs.Queue): p = t.add_parameter( Parameter(src.output_queue_arn(), Type='String')) t.add_resource( awslambda.EventSourceMapping( 'LambdaSQS{}'.format(src.name), FunctionName=Ref(func), EventSourceArn=Ref(p), BatchSize=args.get('BatchSize', 1))) for k, v in enumerate(self.perms): v.build(t, funv, k) t.add_output([ Output('FunctionName', Value=Ref(func)), Output('FunctionArn', Value=GetAtt(func, "Arn")) ]) return t
t.add_parameter(Parameter('MySQLDbName', Type='String')) t.add_parameter(Parameter('MySQLUser', Type='String')) t.add_parameter(Parameter('MySQLPass', Type='String')) t.add_parameter(Parameter('NodeEnv', Type='String')) t.add_parameter(Parameter('MockExchangeService', Type='String')) # Lambda Variables lambdaSrcPath = '../.' lambdaHandlerPath = 'src/lambda/' nodeRuntime = 'nodejs8.10' lambdaVpcConfig = awslambda.VPCConfig( None, SecurityGroupIds=[ ImportValue(Sub('${CoreStack}-RDS-Access-SG-ID')), ImportValue(Sub('${CoreStack}-Redis-Access-SG-ID')), ], SubnetIds=[ImportValue(Sub('${CoreStack}-SubnetID'))], ) importRedisAddress = ImportValue(Sub('${CoreStack}-Redis-Address')) importRedisPort = ImportValue(Sub('${CoreStack}-Redis-Port')) lambdaEnvVars = { 'DATABASE_PORT': ImportValue(Sub('${CoreStack}-MySQL-Port')), 'DATABASE_HOST': ImportValue(Sub('${CoreStack}-MySQL-Address')), 'DATABASE_NAME': Ref('MySQLDbName'), 'DATABASE_USER': Ref('MySQLUser'), 'DATABASE_PASSWORD': Ref('MySQLPass'), 'DATABASE_POOL_MIN': 1, 'DATABASE_POOL_MAX': 2, 'REDIS_URL': Join('',
) lambda_handler = awslambda.Function( "LambdaHandler", Description="test lambda with access to RDS, SNS, DynamoDB. Will be triggered by SNS", Handler="lambda.handler", MemorySize=128, Code=awslambda.Code( S3Bucket=Ref(source_code_s3_bucket), S3Key=Ref(source_code_s3_bucket_key), ), Role=GetAtt(lambda_handler_role, "Arn"), Runtime="python2.7", Timeout=3, VpcConfig=awslambda.VPCConfig( SubnetIds=Ref(subnet_ids), SecurityGroupIds=[GetAtt(lambda_security_group, "GroupId")], ), Environment=awslambda.Environment( Variables={ "DB_HOST": GetAtt(rds_instance, "Endpoint.Address"), "DB_PORT": GetAtt(rds_instance, "Endpoint.Port"), "DB_NAME": Ref(rds_db_name), "DB_USER": Ref(rds_master_username), "DB_PASSWORD": Ref(rds_master_password), "SNS_TOPIC_ARN": emails_sns_topic, "DYNAMO_DB_TABLE_NAME": Ref(dynamodb_table), } ), )
S3Key="init-db-lambda.zip"), Handler="init-db-lambda.handler", Role=GetAtt("LambdaExecutionRole", "Arn"), Runtime="python2.7", MemorySize="128", Timeout="180", Environment=awslambda.Environment( Variables={ 'dbuser': '******', 'awsregion': Ref("AWS::Region"), 'dbname': 'ghost', 'dbhost': GetAtt(ghost_db, 'Endpoint.Address'), 'dbid': Ref(ghost_db) }), VpcConfig=awslambda.VPCConfig( SecurityGroupIds=[Ref(ghost_host_security_group)], SubnetIds=[Ref(db_subnet), Ref(db_subnet2)]), DependsOn=LambdaExecutionPolicy)) # Add the application ELB GhostALB = t.add_resource( elasticloadbalancingv2.LoadBalancer( "GhostALB", Scheme="internet-facing", Subnets=[Ref(alb_subnet), Ref(alb_subnet2)], SecurityGroups=[Ref(alb_security_group)])) GhostTargetGroup = t.add_resource( elasticloadbalancingv2.TargetGroup( "GhostTargetGroup", HealthCheckIntervalSeconds="30",