def __init__(self, scope: core.Construct, id: str, downstream: _lambda.Function, **kwargs): super().__init__(scope, id, **kwargs) self._table = ddb.Table(self, 'Hits', partition_key={ 'name': 'path', 'type': ddb.AttributeType.STRING }) self._handler = _lambda.Function(self, 'HitCounterHandler', runtime=_lambda.Runtime.PYTHON_3_7, handler='hitcount.handler', code=_lambda.Code.asset('lambda'), environment={ 'DOWNSTREAM_FUNCTION_NAME': downstream.function_name, 'HITS_TABLE_NAME': self._table.table_name }) self._table.grant_read_write_data(self.handler) downstream.grant_invoke(self.handler)
def __init__(self, scope: cdk.Construct, construct_id: str, stage: str, explain_bot_lambda: _lambda.Function, add_meaning_lambda: _lambda.Function, **kwargs) -> None: super().__init__(scope, construct_id, **kwargs) # Define dynamoDb table acronym_denied_table = _dynamo.Table( self, id="explainDeniedAcronymTable", table_name="explaindeniedacronymstable" + stage.lower(), partition_key=_dynamo.Attribute(name="Acronym", type=_dynamo.AttributeType.STRING), sort_key=_dynamo.Attribute(name="Deleted_at", type=_dynamo.AttributeType.STRING), removal_policy=cdk.RemovalPolicy.DESTROY, ) self.table = acronym_denied_table # Add the table name as an environment variable explain_bot_lambda.add_environment("TABLE_DENIED_NAME", acronym_denied_table.table_name) add_meaning_lambda.add_environment("TABLE_DENIED_NAME", acronym_denied_table.table_name) # Give lambdas the ability to read and write to the database table acronym_denied_table.grant_full_access(explain_bot_lambda) acronym_denied_table.grant_full_access(add_meaning_lambda)
def add_policy(self, lambda_function: _lambda.Function) -> None: """Add necessary permissions to the lambda function. If you need your functions to have more permissions, you can add them below. Args: lambda_function: The lambda function Returns: None. """ smt1 = _iam.PolicyStatement( resources=[self.db.secret.secret_arn], actions=["secretsmanager:GetSecretValue"], ) lambda_function.add_to_role_policy(smt1) smt2 = _iam.PolicyStatement( resources=[ f"arn:aws:rds:{self.region}:{self.account}:cluster:{self.db.db.ref}" ], actions=[ "rds-data:ExecuteStatement", "rds-data:BatchExecuteStatement", "rds-data:BeginTransaction", "rds-data:CommitTransaction", "rds-data:ExecuteSql", "rds-data:RollbackTransaction", "rds:DescribeDBClusters", ], ) lambda_function.add_to_role_policy(smt2)
def add_sns_subscription(self, lambda_function: _lambda.Function, squid_alarm_topic: sns.Topic): lambda_function.add_environment(key="TOPIC_ARN", value=squid_alarm_topic.topic_arn) lambda_function.add_permission( "squid-lambda-permission", principal=iam.ServicePrincipal("sns.amazonaws.com"), action='lambda:InvokeFunction', source_arn=squid_alarm_topic.topic_arn) squid_alarm_topic.add_subscription( sns_subscriptions.LambdaSubscription(lambda_function))
class Stack(core.Stack): def __init__(self, scope: core.Construct, construct_id: str, **kwargs) -> None: super().__init__(scope, construct_id, **kwargs) self.event_bus = EventBus(scope=self, id='CustomEventBus', event_bus_name='CustomEventBus') self.source = Function( scope=self, id=f'SourceFunction', function_name=f'SourceFunction', code=Code.from_asset(path='./code_source/'), handler='index.handler', runtime=Runtime.PYTHON_3_6, ) self.source.add_to_role_policy(statement=PolicyStatement( actions=['events:PutEvents'], resources=[self.event_bus.event_bus_arn])) """ Define rule. """ self.rule = Rule( scope=self, id='EventBusRule', description='Sample description.', enabled=True, event_bus=self.event_bus, event_pattern=EventPattern(detail={ 'Domain': ["MedInfo"], 'Reason': ["InvokeTarget"] }), rule_name='EventBusRule', ) """ Add target. """ self.target = Function( scope=self, id=f'TargetFunction', function_name=f'TargetFunction', code=Code.from_asset(path='./code_target/'), handler='index.handler', runtime=Runtime.PYTHON_3_6, ) self.target: Union[IRuleTarget, LambdaFunction] = LambdaFunction( handler=self.target) self.rule.add_target(target=self.target)
def deploy_aws_ecs_public_dns(self): code_path = join(dirname(dirname(__file__)), 'build', 'aws-ecs-public-dns.zip') func = Function(self._stack, 'public_dns', runtime=Runtime.NODEJS_12_X, handler='src/update-task-dns.handler', memory_size=128, code=Code.from_asset(path=code_path)) self._tag_it(func) func.add_to_role_policy( statement=self.get_public_dns_policy_statement()) self.create_event_rule(func)
def create_function(self, other_stack: Stack, id, *, code: AssetCode, handler: str, runtime: Runtime) -> IVersion: func = Function( self, id, code=code, handler=handler, runtime=runtime, role=self._role, ) # If code/runtime changes, CDK doesn't re-evaluate the version. In # result, we store an old version, and things don't work. But we also # don't want to generate a new version every run. The compromise: use # the sha256 hash of the index file. with open(f"{code.path}/index.js", "rb") as f: sha256 = hashlib.sha256(f.read()).hexdigest() version = func.add_version(f"Version-{sha256}") # Create an entry in the parameter-store that tells the arn of this lambda parameter_name = parameter_store.get_parameter_name( f"/LambdaEdge/{id}") StringParameter( self, parameter_name, string_value=Fn.join( ":", [ func.function_arn, version.version, ], ), parameter_name=parameter_name, ) other_stack.add_dependency(self) # Create a custom resource that fetches the arn of the lambda cross_region_func = LambdaEdgeFunction( other_stack, f"LambdaEdgeFunction-{sha256}", parameter_name=parameter_name, policy=AwsCustomResourcePolicy.from_sdk_calls( resources=AwsCustomResourcePolicy.ANY_RESOURCE), ) # Create the lambda function based on this arn return Version.from_version_arn(other_stack, id, cross_region_func.get_arn())
def __init__(self, scope: Construct, construct_id: str, env: Environment) -> None: super().__init__(scope, construct_id, env=env) smol_table = SmolTable(self, "SmolTable", table_name=TABLE_NAME) smol_vpc = Vpc.from_lookup(self, "CoreVPC", vpc_name=VPC_NAME) smol_subnets = SubnetSelection( one_per_az=True, subnet_type=SubnetType.PRIVATE, ) smol_lambda = Function( self, "SmolAPI", code=Code.from_asset_image(directory=abspath("./")), environment={ "CAPTCHA_KEY": environ["CAPTCHA_KEY"], "SAFE_BROWSING_KEY": environ["SAFE_BROWSING_KEY"], }, function_name=FUNCTION_NAME, handler=Handler.FROM_IMAGE, log_retention=RetentionDays.ONE_WEEK, memory_size=MEMORY_ALLOCATION, reserved_concurrent_executions=RESERVED_CONCURRENCY, runtime=Runtime.FROM_IMAGE, timeout=Duration.seconds(TIMEOUT_SEC), tracing=Tracing.ACTIVE, vpc=smol_vpc, vpc_subnets=smol_subnets, ) smol_table.table.grant(smol_lambda, "dynamodb:DescribeTable") smol_table.table.grant(smol_lambda, "dynamodb:GetItem") smol_table.table.grant(smol_lambda, "dynamodb:PutItem") SmolTarget(self, "SmolTarget", smol_lambda, API_HOST)
def __init__(self, scope: cdk.Construct, construct_id: str, lambda_context: str, **kwargs) -> None: super().__init__(scope, construct_id) fn = dict(self.node.try_get_context(lambda_context)) lambda_fn = Function( self, fn["fn_name"], function_name=fn["fn_name"], runtime=Runtime.PYTHON_3_8, handler=fn["fn_handler"], code=Code.from_asset(fn["fn_path"]), tracing=Tracing.ACTIVE, current_version_options={ "removal_policy": cdk.RemovalPolicy.RETAIN }, retry_attempts=fn["fn_retry_attempts"], timeout=Duration.seconds(fn["fn_timeout"]), reserved_concurrent_executions=fn["fn_reserved_concurrency"]) lambda_fn_alias = lambda_fn.current_version.add_alias(fn["fn_alias"]) # # Outputs cdk.CfnOutput(self, fn["fn_name"] + 'Arn', value=lambda_fn.function_arn) self._function = lambda_fn self._function_alias = lambda_fn_alias
def __init__( self, app: App, id: str, code_txt: str, runtime: str, handler: str, env: dict, policy: str) -> None: super().__init__(app, id) function_role = Role( self, 'NonLazyRole', assumed_by=ServicePrincipal('lambda.amazonaws.com')) self.function = Function( self, 'Function'.format('{}'.format(id)), code=Code.inline(code_txt), runtime=Runtime('python3.7', supports_inline_code=True), handler='index.create', environment=env, initial_policy=[policy], tracing=Tracing.ACTIVE, role=function_role )
def init_lambda(self): tmp_dir = install_lambda_code_requirements() lambda_code = Code.from_asset(str(tmp_dir), exclude=[ ".env", "__main*", "*.dist-info", "bin", "requirements.txt", ]) lambda_function = Function(self, "lambda", code=lambda_code, handler="main.handler", runtime=Runtime.PYTHON_3_8) lambda_function.role.assume_role_policy.add_statements( PolicyStatement( actions=["sts:AssumeRole"], principals=[ServicePrincipal("edgelambda.amazonaws.com")])) version = Version(self, "version", lambda_=lambda_function) apply_removal_policy(lambda_function, version, lambda_function.role) return version
async def create_site_function(self, id: str, domain: str, cdn_name: str) -> Function: env = { 'PROD': 'True', 'SITE_DOMAIN': domain, 'APP_VERSION': '0.02', 'STATIC_DOMAIN': cdn_name, 'PROD': 'True' } site_code_asset = Asset( self, '{}FunctionAsset'.format(id), path='site_function') site_code = S3Code( bucket=site_code_asset.bucket, key=site_code_asset.s3_object_key) return Function( self, '{}Function'.format(id), timeout=Duration.seconds(3), code=site_code, handler='site_function.handler', environment=env, tracing=Tracing.ACTIVE, initial_policy=[DDB_FUNCTION_POLICY_STATEMENT], runtime=Runtime( name='python3.7', supports_inline_code=True, ) )
def create_lambda(self, envs: EnvSettings): is_app_only = self.node.try_get_context("is_app_only") if is_app_only == "true": code = Code.from_asset( path="../backend/functions/image_resize/.serverless/main.zip") else: code = Code.from_cfn_parameters() function = Function( self, "image-resize-lambda", function_name=f"{envs.project_name}-image-resize", code=code, handler="index.handler", runtime=Runtime.NODEJS_12_X, memory_size=512, timeout=Duration.seconds(30), tracing=Tracing.ACTIVE, ) api_gateway = LambdaRestApi( self, "ImageResizeLambdaApi", rest_api_name=f"{envs.project_name}-image-resize", handler=function) return function, code, api_gateway
async def create_canary_function(self, id: str) -> Function: function = None with open('canary/canary.py', 'r') as code: canary_code = code.read() function = Function( self, '{}CanaryFunction'.format(id), timeout=Duration.seconds(3), code=InlineCode(canary_code), handler='index.handler', tracing=Tracing.ACTIVE, initial_policy=[MINIMAL_FUNCTION_POLICY_STATEMENT], runtime=Runtime( name='python3.7', supports_inline_code=True, ) ) Rule(self, '{}CanaryRule'.format(id), enabled=True, schedule=Schedule.cron(), targets=[LambdaFunction(handler=function)]) return function
def __configure_tda_auth(self, function: lambda_.Function) -> None: """ Fetches the OAuth2 values from SSM """ redirect_uri = ssm.StringParameter.from_string_parameter_name( self, 'TDA-Redirect-Parameter', string_parameter_name='/app-FinSurf/tdameritrade/redirect_uri') function.add_environment(key='TDA_REDIRECT_URI', value=redirect_uri.string_value) client_id = ssm.StringParameter.from_string_parameter_name( self, 'TDA_CLIENT_ID', string_parameter_name='/app-FinSurf/tdameritrade/client_id') function.add_environment(key='TDA_CLIENT_ID', value=client_id.string_value)
def __init__(self, scope: Construct): super().__init__(scope=scope) function = Function( scope=self, id=f'{self.global_prefix()}TestingFunction', code=Code.from_inline( 'from b_lambda_layer_common import api_gateway\n' 'from b_lambda_layer_common import exceptions\n' 'from b_lambda_layer_common import ssm\n' 'from b_lambda_layer_common import util\n' 'import boto3\n' 'import botocore\n' '\n\n' 'def handler(*args, **kwargs):\n' ' return dict(\n' ' Boto3Version=boto3.__version__,\n' ' BotocoreVersion=botocore.__version__,\n' ' )' '\n'), handler='index.handler', runtime=Runtime.PYTHON_3_6, layers=[ Layer(scope=self, name=f'{self.global_prefix()}TestingLayer', dependencies={ 'boto3': PackageVersion.from_string_version('1.16.35'), 'botocore': PackageVersion.from_string_version('1.19.35'), }) ]) self.add_output(self.LAMBDA_FUNCTION_NAME_KEY, value=function.function_name) # Create another function that is not using boto3. Function( scope=self, id=f'{self.global_prefix()}TestingFunction2', code=Code.from_inline('def handler(*args, **kwargs): return 200'), handler='index.handler', runtime=Runtime.PYTHON_3_6, layers=[ Layer(scope=self, name=f'{self.global_prefix()}TestingLayer2') ])
def _create_lambdas(self): clean_pycache() for root, dirs, files in os.walk(LAMBDAS_DIR): for f in files: if f != "__init__.py": continue parent_folder = os.path.basename(os.path.dirname(root)) lambda_folder = os.path.basename(root) name = f"{parent_folder}-{lambda_folder}" lambda_config = self.lambdas_config[name] layers = [] for layer_name in lambda_config["layers"]: layers.append(self.layers[layer_name]) lambda_role = Role( self, f"{name}_role", assumed_by=ServicePrincipal(service="lambda.amazonaws.com") ) for policy in lambda_config["policies"]: lambda_role.add_to_policy(policy) lambda_role.add_managed_policy( ManagedPolicy.from_aws_managed_policy_name("service-role/AWSLambdaBasicExecutionRole")) lambda_args = { "code": Code.from_asset(root), "handler": "__init__.handle", "runtime": Runtime.PYTHON_3_8, "layers": layers, "function_name": name, "environment": lambda_config["variables"], "role": lambda_role, "timeout": Duration.seconds(lambda_config["timeout"]), "memory_size": lambda_config["memory"], } if "concurrent_executions" in lambda_config: lambda_args["reserved_concurrent_executions"] = lambda_config["concurrent_executions"] self.lambdas[name] = Function(self, name, **lambda_args) self.lambdas["sqs_handlers-post_anime"].add_event_source(SqsEventSource(self.post_anime_queue)) Rule( self, "titles_updater", schedule=Schedule.cron(hour="2", minute="10"), targets=[LambdaFunction(self.lambdas["crons-titles_updater"])] ) Rule( self, "episodes_updater", schedule=Schedule.cron(hour="4", minute="10"), targets=[LambdaFunction(self.lambdas["crons-episodes_updater"])] )
def _create_lambda_fn(self, envs: EnvSettings, memory_size: int, queue: Queue): is_app_only = self.node.try_get_context("is_app_only") if is_app_only == "true": code = Code.from_asset( path="../backend/functions/worker/.serverless/main.zip") else: code = Code.from_cfn_parameters() function = Function( self, f"data-processing-worker-{memory_size}", function_name=f"{envs.project_name}-data-processing-{memory_size}", code=code, runtime=Runtime.PYTHON_3_8, handler="handler.main", environment={ "AWS_STORAGE_BUCKET_NAME": self.app_bucket.bucket_name, "IMAGE_SCRAPING_FETCH_TIMEOUT": "15", "AWS_IMAGE_STORAGE_BUCKET_NAME": self.resize_lambda_image_bucket.bucket_name, "AWS_IMAGE_STATIC_URL": self.resize_lambda_image_bucket.bucket_website_url, "BACKEND_URL": self.backend_url, "LAMBDA_AUTH_TOKEN": self.lambda_auth_token.secret_value.to_string(), }, memory_size=memory_size, timeout=Duration.seconds(300), tracing=Tracing.ACTIVE, ) function.add_event_source(SqsEventSource(queue, batch_size=1)) self.app_bucket.grant_read_write(function.role) self.resize_lambda_image_bucket.grant_read_write(function.role) return function, code
def __init__(self, scope: Stack): super().__init__(scope=scope, id=f'TestingStack', stack_name=f'TestingStack') Function(scope=self, id='TestingFunction', code=Code.from_inline('def handler(): return "Hello World!"'), handler='index.handler', runtime=Runtime.PYTHON_3_6, layers=[TwilioLayer(self, 'TestingTwilioLayer')])
def api_lambda_function(scope, name, handler, apigw, path, method, layer, tables, code="./backend"): _lambda = Function( scope, name, handler=handler, runtime=Runtime.PYTHON_3_8, code=Code.asset(code), tracing=Tracing.ACTIVE, layers=layer, ) _lambda.add_environment("POLL_TABLE", tables[0].table_name) _lambda.add_environment("MAIN_PAGE_GSI", "main_page_gsi") apigw.add_routes( path=path, methods=[method], integration=LambdaProxyIntegration(handler=_lambda), ) return _lambda
def create_other_lambda(self, function_name): function_path = str(self.lambda_path_base.joinpath(function_name)) return Function( self, f'id_{function_name}', code=AssetCode(function_path), handler='lambda_function.lambda_handler', runtime=Runtime.PYTHON_3_7, function_name=f'sfn_{function_name}_lambda', memory_size=128, timeout=core.Duration.seconds(10), )
def __init__(self, scope: cdk.Construct, construct_id: str, _fn1: _lambda.Function, _fn2: _lambda.Function, _db: _ddb.ITable, **kwargs) -> None: super().__init__(scope, construct_id) _fn1.add_environment("TABLE_NAME", _db.table_name) _fn1.add_environment("DOMAIN_URL", "manuchandrasekhar.com/") _fn1.add_environment("EXPIRY_TIME", "86400") _fn2.add_environment("TABLE_NAME",_db.table_name) _db.grant_read_write_data(_fn1) _db.grant_read_write_data(_fn2) _fn1.grant_invoke
def create_ecs_lambda(self, cluster: ICluster, auto_scaling_group: AutoScalingGroup): lambda_func = Function( self, "LambdaECS", code=Code.from_asset("./lambdas/nlb-ecs"), handler="index.lambda_handler", runtime=Runtime.PYTHON_3_8, timeout=Duration.seconds(30), environment={ "AUTO_SCALING_GROUP_NAME": auto_scaling_group.auto_scaling_group_name, }, ) lambda_func.add_to_role_policy( PolicyStatement( actions=[ "autoscaling:DescribeAutoScalingGroups", "ssm:SendCommand", "ssm:GetCommandInvocation", ], resources=[ "*", ], )) Rule( self, "ECS", event_pattern=EventPattern( detail_type=["ECS Task State Change"], detail={ "clusterArn": [cluster.cluster_arn], }, source=["aws.ecs"], ), targets=[LambdaFunction(lambda_func)], )
def __build(self, function: lambda_.Function, context: InfraContext): function.add_environment('EARNINGS_API', context.earnings_api.url) function.add_environment('FRIENDLY_NAME_API', context.fnapi.url) function.add_permission( id='Alexa-Trigger', action='lambda:InvokeFunction', principal=iam.ServicePrincipal(service="alexa-appkit.amazon.com"), event_source_token= 'amzn1.ask.skill.9f4cb90e-4c57-41c2-a942-c2e6685888ba', )
def _create_lambdas(self): for root, dirs, files in os.walk(LAMBDAS_DIR): for f in files: if f != "__init__.py": continue parent_folder = os.path.basename(os.path.dirname(root)) lambda_folder = os.path.basename(root) name = f"{parent_folder}-{lambda_folder}" lambda_config = self.lambdas_config[name] layers = [] for layer_name in lambda_config["layers"]: layers.append(self.layers[layer_name]) lambda_role = Role(self, f"{name}_role", assumed_by=ServicePrincipal( service="lambda.amazonaws.com")) for policy in lambda_config["policies"]: lambda_role.add_to_policy(policy) lambda_role.add_managed_policy( ManagedPolicy.from_aws_managed_policy_name( "service-role/AWSLambdaBasicExecutionRole")) self.lambdas[name] = Function( self, name, code=Code.from_asset(root), handler="__init__.handle", runtime=Runtime.PYTHON_3_8, layers=layers, function_name=name, environment=lambda_config["variables"], role=lambda_role, timeout=Duration.seconds(lambda_config["timeout"]), memory_size=lambda_config["memory"], ) Rule(self, "update_eps", schedule=Schedule.cron(hour="2", minute="10"), targets=[LambdaFunction(self.lambdas["cron-update_eps"])])
def functions_for( self, name, base, handlers, libs=None, timeout=Duration.minutes(5), runtime=Runtime.PYTHON_3_8, layers=None, ) -> Dict[str, Function]: if isinstance(handlers, str): handlers = [handlers] if not isinstance(handlers, list): raise ValueError("handlers must be a string or a list of handlers") if isinstance(libs, str): libs = [libs] if isinstance(layers, str): layers = [layers] if libs and not isinstance(libs, list): raise ValueError("libs must be a string or a list of libraries") bundling = self._get_bundling(base, libs=libs) code = Code.from_asset(str(self.source_path), bundling=bundling) role = self.build_lambda_role(name) functions = {} for handler in handlers: func_name = name + handler.split(".")[0].replace( "_", " ").title().replace(" ", "").replace("Handler", "") functions.update({ func_name: Function( self, func_name, handler=handler, code=code, runtime=runtime, timeout=timeout, role=role, layers=layers, environment={"LOG_LEVEL": self.log_level}, ) }) return functions
def __init__(self, scope: core.Construct, construct_id: str, **kwargs) -> None: super().__init__(scope, construct_id, **kwargs) bucket = Bucket( scope=self, id='WorkshopBucketWithCDK', bucket_name='cloudvisor-workshop-bucket-with-cdk' ) function = Function( scope=self, id='WorkshopFunctionWithCDK', function_name='WorkshopFunctionWithCDK', runtime=Runtime.PYTHON_3_6, handler='index.handler', code=Code.from_inline('def handler(*args, **kwargs): print(args); return 200') ) bucket.add_object_created_notification(LambdaDestination(function))
def __init__(self, scope: core.Construct, construct_id: str, lambda_context: str, **kwargs) -> None: super().__init__(scope, construct_id, **kwargs) fn = dict(self.node.try_get_context(lambda_context)) # lambda dlq lambda_fn_dlq = _sqs.Queue(self, fn["fn_dlq_name"], queue_name=fn["fn_dlq_name"]) lambda_fn = Function( self, fn["fn_name"], function_name=fn["fn_name"], runtime=Runtime.PYTHON_3_8, handler=fn["fn_handler"], code=Code.from_asset(fn["fn_path"]), tracing=Tracing.ACTIVE, current_version_options={ "removal_policy": core.RemovalPolicy.RETAIN }, environment={ "ENVIRONMENT_VALUE": "DUMMY_VALUE", }, dead_letter_queue=lambda_fn_dlq, retry_attempts=fn["fn_retry_attempts"], timeout=Duration.seconds(fn["fn_timeout"]), reserved_concurrent_executions=fn["fn_reserved_concurrency"]) lambda_fn_alias = lambda_fn.current_version.add_alias(fn["fn_alias"]) lambda_fn_dlq.grant_send_messages(lambda_fn) # # Outputs core.CfnOutput(self, fn["fn_name"] + 'Arn', value=(lambda_fn.function_arn)) self._function = lambda_fn self._function_alias = lambda_fn_alias self._function_dlq = lambda_fn_dlq
def __init__(self, scope): super().__init__(scope, "bug") bucket = Bucket.from_bucket_name( self, "artifacts", core.Fn.import_value("CodeArtifactsBucket") ) pipeline_role = Role.from_role_arn( self, "pipeline", core.Fn.import_value("CodePipelineRole") ) pipeline = Pipeline( self, "Pipeline", artifact_bucket=bucket, role=pipeline_role, stages=[ StageProps( stage_name="Source", actions=[ GitHubSourceAction( action_name="Source", run_order=1, oauth_token=core.SecretValue("something"), output=Artifact(artifact_name="SourceArtifact"), owner="me", repo="repo", branch="master", ) ], ) ], ) pipeline.add_stage( stage_name="Fails", actions=[ LambdaInvokeAction( action_name="LambdaInvokeAction", run_order=1, lambda_=Function.from_function_arn( self, "function", core.Fn.import_value("SomeFunction") ), ) ], )
def test_FUNC_hash_WITH_valid_parameters_EXPECT_hash_created(): """ Test that hashing is consistent and works as expected. :return: No return. """ stack = Stack(App(), 'TestStack') integration = LambdaIntegration( scope=stack, integration_name='TestIntegration', api=CfnApi(stack, 'TestApi'), lambda_function=Function( stack, 'TestLambdaFunction', code=Code.from_inline('def handler(*args, **kwargs): return 123'), handler='index.handler', runtime=Runtime.PYTHON_3_6)) assert integration.hash == 'ab93cecc508e529c3791ba48a1275deec88cdd6b43a7e1d443906df48fa300e4'