def test_kinesis_lambda_forward_chain(): kinesis = aws_stack.connect_to_service('kinesis') s3 = aws_stack.connect_to_service('s3') aws_stack.create_kinesis_stream(TEST_CHAIN_STREAM1_NAME, delete=True) aws_stack.create_kinesis_stream(TEST_CHAIN_STREAM2_NAME, delete=True) s3.create_bucket(Bucket=TEST_BUCKET_NAME) # deploy test lambdas connected to Kinesis streams zip_file = testutil.create_lambda_archive(load_file(TEST_LAMBDA_PYTHON), get_content=True, libs=TEST_LAMBDA_LIBS, runtime=LAMBDA_RUNTIME_PYTHON27) testutil.create_lambda_function(func_name=TEST_CHAIN_LAMBDA1_NAME, zip_file=zip_file, event_source_arn=get_event_source_arn(TEST_CHAIN_STREAM1_NAME), runtime=LAMBDA_RUNTIME_PYTHON27) testutil.create_lambda_function(func_name=TEST_CHAIN_LAMBDA2_NAME, zip_file=zip_file, event_source_arn=get_event_source_arn(TEST_CHAIN_STREAM2_NAME), runtime=LAMBDA_RUNTIME_PYTHON27) # publish test record test_data = {'test_data': 'forward_chain_data_%s' % short_uid()} data = clone(test_data) data[lambda_integration.MSG_BODY_MESSAGE_TARGET] = 'kinesis:%s' % TEST_CHAIN_STREAM2_NAME kinesis.put_record(Data=to_bytes(json.dumps(data)), PartitionKey='testId', StreamName=TEST_CHAIN_STREAM1_NAME) # check results time.sleep(5) all_objects = testutil.list_all_s3_objects() testutil.assert_objects(test_data, all_objects)
def add_authorizer(path, data): api_id = get_api_id_from_path(path) result = common.clone(data) result['id'] = common.short_uid() if '_links' not in result: result['_links'] = {} result['_links']['self'] = { 'href': '/restapis/%s/authorizers/%s' % (api_id, result['id']) } AUTHORIZERS[result['id']] = result return result
def forward_request(self, method, path, data, headers): global STREAM_CONSUMERS data = json.loads(to_str(data or '{}')) action = headers.get('X-Amz-Target') if action == '%s.RegisterStreamConsumer' % ACTION_PREFIX: consumer = clone(data) consumer['ConsumerStatus'] = 'ACTIVE' consumer['ConsumerARN'] = '%s/consumer/%s' % (data['StreamARN'], data['ConsumerName']) consumer['ConsumerCreationTimestamp'] = timestamp_millis() consumer = json_safe(consumer) STREAM_CONSUMERS.append(consumer) return {'Consumer': consumer} elif action == '%s.DeregisterStreamConsumer' % ACTION_PREFIX: def consumer_matches(c): stream_arn = data.get('StreamARN') cons_name = data.get('ConsumerName') cons_arn = data.get('ConsumerARN') return (c.get('ConsumerARN') == cons_arn or (c.get('StreamARN') == stream_arn and c.get('ConsumerName') == cons_name)) STREAM_CONSUMERS = [c for c in STREAM_CONSUMERS if not consumer_matches(c)] return {} elif action == '%s.ListStreamConsumers' % ACTION_PREFIX: result = { 'Consumers': [c for c in STREAM_CONSUMERS if c.get('StreamARN') == data.get('StreamARN')] } return result elif action == '%s.DescribeStreamConsumer' % ACTION_PREFIX: consumer_arn = data.get('ConsumerARN') or data['ConsumerName'] consumer_name = data.get('ConsumerName') or data['ConsumerARN'] result = { 'ConsumerDescription': { 'ConsumerARN': consumer_arn, # 'ConsumerCreationTimestamp': number, 'ConsumerName': consumer_name, 'ConsumerStatus': 'ACTIVE', 'StreamARN': data.get('StreamARN') } } return result if random.random() < config.KINESIS_ERROR_PROBABILITY: action = headers.get('X-Amz-Target') if action in [ACTION_PUT_RECORD, ACTION_PUT_RECORDS]: return kinesis_error_response(data, action) return True
def test_create_run_map_state_machine(self): names = ['Bob', 'Meg', 'Joe'] test_input = [{'map': name} for name in names] test_output = [{'Hello': name} for name in names] state_machines_before = self.sfn_client.list_state_machines( )['stateMachines'] role_arn = aws_stack.role_arn('sfn_role') definition = clone(MAP_STATE_MACHINE_DEF) lambda_arn_3 = aws_stack.lambda_function_arn(TEST_LAMBDA_NAME_3) definition['States']['ExampleMapState']['Iterator']['States'][ 'CallLambda']['Resource'] = lambda_arn_3 definition = json.dumps(definition) result = self.sfn_client.create_state_machine( name=MAP_STATE_MACHINE_NAME, definition=definition, roleArn=role_arn) # assert that the SM has been created state_machines_after = self.sfn_client.list_state_machines( )['stateMachines'] self.assertEqual(len(state_machines_after), len(state_machines_before) + 1) # run state machine sm_arn = [ m['stateMachineArn'] for m in state_machines_after if m['name'] == MAP_STATE_MACHINE_NAME ][0] result = self.sfn_client.start_execution(stateMachineArn=sm_arn, input=json.dumps(test_input)) self.assertTrue(result.get('executionArn')) def check_invocations(): self.assertIn(lambda_arn_3, lambda_api.LAMBDA_EXECUTOR.function_invoke_times) # assert that the result is correct result = self._get_execution_results(sm_arn) self.assertEqual(result, test_output) # assert that the lambda has been invoked by the SM execution retry(check_invocations, sleep=1, retries=10) # clean up self.sfn_client.delete_state_machine(stateMachineArn=sm_arn)
def test_kinesis_lambda_forward_chain(self): kinesis = aws_stack.connect_to_service('kinesis') s3 = aws_stack.connect_to_service('s3') aws_stack.create_kinesis_stream(TEST_CHAIN_STREAM1_NAME, delete=True) aws_stack.create_kinesis_stream(TEST_CHAIN_STREAM2_NAME, delete=True) s3.create_bucket(Bucket=TEST_BUCKET_NAME) # deploy test lambdas connected to Kinesis streams zip_file = testutil.create_lambda_archive( load_file(TEST_LAMBDA_PYTHON), get_content=True, libs=TEST_LAMBDA_LIBS, runtime=LAMBDA_RUNTIME_PYTHON27) testutil.create_lambda_function( func_name=TEST_CHAIN_LAMBDA1_NAME, zip_file=zip_file, event_source_arn=get_event_source_arn(TEST_CHAIN_STREAM1_NAME), runtime=LAMBDA_RUNTIME_PYTHON27) testutil.create_lambda_function( func_name=TEST_CHAIN_LAMBDA2_NAME, zip_file=zip_file, event_source_arn=get_event_source_arn(TEST_CHAIN_STREAM2_NAME), runtime=LAMBDA_RUNTIME_PYTHON27) # publish test record test_data = { 'test_data': 'forward_chain_data_%s with \'quotes\\"' % short_uid() } data = clone(test_data) data[lambda_integration. MSG_BODY_MESSAGE_TARGET] = 'kinesis:%s' % TEST_CHAIN_STREAM2_NAME kinesis.put_record(Data=to_bytes(json.dumps(data)), PartitionKey='testId', StreamName=TEST_CHAIN_STREAM1_NAME) # check results time.sleep(5) all_objects = testutil.list_all_s3_objects() testutil.assert_objects(test_data, all_objects) # clean up kinesis.delete_stream(StreamName=TEST_CHAIN_STREAM1_NAME) kinesis.delete_stream(StreamName=TEST_CHAIN_STREAM2_NAME)
def to_response_json(model_type, data, api_id=None, self_link=None): if isinstance(data, list) and len(data) == 1: data = data[0] result = common.clone(data) if not self_link: self_link = '/%ss/%s' % (model_type, data['id']) if api_id: self_link = '/restapis/%s/%s' % (api_id, self_link) if '_links' not in result: result['_links'] = {} result['_links']['self'] = {'href': self_link} result['_links']['curies'] = { 'href': 'https://docs.aws.amazon.com/apigateway/latest/developerguide/restapi-authorizer-latest.html', 'name': model_type, 'templated': True } result['_links']['%s:delete' % model_type] = {'href': self_link} return result
def test_create_run_state_machine(self): state_machines_before = self.sfn_client.list_state_machines( )['stateMachines'] # create state machine role_arn = aws_stack.role_arn('sfn_role') definition = clone(STATE_MACHINE_DEF) lambda_arn_1 = aws_stack.lambda_function_arn(TEST_LAMBDA_NAME_1) lambda_arn_2 = aws_stack.lambda_function_arn(TEST_LAMBDA_NAME_2) definition['States']['step1']['Resource'] = lambda_arn_1 definition['States']['step2']['Resource'] = lambda_arn_2 definition = json.dumps(definition) sm_name = 'sm-%s' % short_uid() result = self.sfn_client.create_state_machine(name=sm_name, definition=definition, roleArn=role_arn) # assert that the SM has been created self.assert_machine_created(state_machines_before) # run state machine state_machines = self.sfn_client.list_state_machines()['stateMachines'] sm_arn = [ m['stateMachineArn'] for m in state_machines if m['name'] == sm_name ][0] result = self.sfn_client.start_execution(stateMachineArn=sm_arn) self.assertTrue(result.get('executionArn')) def check_invocations(): self.assertIn(lambda_arn_1, lambda_api.LAMBDA_EXECUTOR.function_invoke_times) self.assertIn(lambda_arn_2, lambda_api.LAMBDA_EXECUTOR.function_invoke_times) # assert that the result is correct result = self._get_execution_results(sm_arn) self.assertEqual(result['result_value'], {'Hello': TEST_RESULT_VALUE}) # assert that the lambda has been invoked by the SM execution retry(check_invocations, sleep=0.7, retries=25) # clean up self.cleanup(sm_arn, state_machines_before)
def test_intrinsic_functions(self): state_machines_before = self.sfn_client.list_state_machines( )["stateMachines"] # create state machine role_arn = aws_stack.role_arn("sfn_role") definition = clone(STATE_MACHINE_INTRINSIC_FUNCS) lambda_arn_1 = aws_stack.lambda_function_arn(TEST_LAMBDA_NAME_5) lambda_arn_2 = aws_stack.lambda_function_arn(TEST_LAMBDA_NAME_5) if isinstance(definition["States"]["state1"].get("Parameters"), dict): definition["States"]["state1"]["Parameters"]["lambda_params"][ "FunctionName"] = lambda_arn_1 definition["States"]["state3"]["Resource"] = lambda_arn_2 definition = json.dumps(definition) sm_name = "intrinsic-%s" % short_uid() self.sfn_client.create_state_machine(name=sm_name, definition=definition, roleArn=role_arn) # run state machine sm_arn = self.get_machine_arn(sm_name) lambda_api.LAMBDA_EXECUTOR.function_invoke_times.clear() input = {} result = self.sfn_client.start_execution(stateMachineArn=sm_arn, input=json.dumps(input)) self.assertTrue(result.get("executionArn")) def check_invocations(): self.assertIn(lambda_arn_1, lambda_api.LAMBDA_EXECUTOR.function_invoke_times) self.assertIn(lambda_arn_2, lambda_api.LAMBDA_EXECUTOR.function_invoke_times) # assert that the result is correct result = self._get_execution_results(sm_arn) self.assertEqual({"payload": { "values": [1, "v2"] }}, result.get("result_value")) # assert that the lambda has been invoked by the SM execution retry(check_invocations, sleep=1, retries=10) # clean up self.cleanup(sm_arn, state_machines_before)
def test_try_catch_state_machine(self): if os.environ.get("AWS_DEFAULT_REGION") != "us-east-1": pytest.skip("skipping non us-east-1 temporarily") state_machines_before = self.sfn_client.list_state_machines( )["stateMachines"] # create state machine role_arn = aws_stack.role_arn("sfn_role") definition = clone(STATE_MACHINE_CATCH) lambda_arn_1 = aws_stack.lambda_function_arn(TEST_LAMBDA_NAME_1) lambda_arn_2 = aws_stack.lambda_function_arn(TEST_LAMBDA_NAME_2) definition["States"]["Start"]["Parameters"][ "FunctionName"] = lambda_arn_1 definition["States"]["ErrorHandler"]["Resource"] = lambda_arn_2 definition["States"]["Final"]["Resource"] = lambda_arn_2 definition = json.dumps(definition) sm_name = "catch-%s" % short_uid() result = self.sfn_client.create_state_machine(name=sm_name, definition=definition, roleArn=role_arn) # run state machine sm_arn = self.get_machine_arn(sm_name) lambda_api.LAMBDA_EXECUTOR.function_invoke_times.clear() result = self.sfn_client.start_execution(stateMachineArn=sm_arn) self.assertTrue(result.get("executionArn")) def check_invocations(): self.assertIn(lambda_arn_1, lambda_api.LAMBDA_EXECUTOR.function_invoke_times) self.assertIn(lambda_arn_2, lambda_api.LAMBDA_EXECUTOR.function_invoke_times) # assert that the result is correct result = self._get_execution_results(sm_arn) self.assertEqual({"Hello": TEST_RESULT_VALUE}, result.get("handled")) # assert that the lambda has been invoked by the SM execution retry(check_invocations, sleep=1, retries=10) # clean up self.cleanup(sm_arn, state_machines_before)
def test_kinesis_lambda_forward_chain(self): kinesis = aws_stack.connect_to_service("kinesis") s3 = aws_stack.connect_to_service("s3") aws_stack.create_kinesis_stream(TEST_CHAIN_STREAM1_NAME, delete=True) aws_stack.create_kinesis_stream(TEST_CHAIN_STREAM2_NAME, delete=True) s3.create_bucket(Bucket=TEST_BUCKET_NAME) # deploy test lambdas connected to Kinesis streams zip_file = testutil.create_lambda_archive( load_file(TEST_LAMBDA_PYTHON), get_content=True, libs=TEST_LAMBDA_LIBS ) testutil.create_lambda_function( func_name=TEST_CHAIN_LAMBDA1_NAME, zip_file=zip_file, event_source_arn=get_event_source_arn(TEST_CHAIN_STREAM1_NAME), ) testutil.create_lambda_function( func_name=TEST_CHAIN_LAMBDA2_NAME, zip_file=zip_file, event_source_arn=get_event_source_arn(TEST_CHAIN_STREAM2_NAME), ) # publish test record test_data = {"test_data": "forward_chain_data_%s with 'quotes\\\"" % short_uid()} data = clone(test_data) data[lambda_integration.MSG_BODY_MESSAGE_TARGET] = "kinesis:%s" % TEST_CHAIN_STREAM2_NAME kinesis.put_record( Data=to_bytes(json.dumps(data)), PartitionKey="testId", StreamName=TEST_CHAIN_STREAM1_NAME, ) def check_results(): all_objects = testutil.list_all_s3_objects() testutil.assert_objects(test_data, all_objects) # check results retry(check_results, retries=5, sleep=3) # clean up kinesis.delete_stream(StreamName=TEST_CHAIN_STREAM1_NAME) kinesis.delete_stream(StreamName=TEST_CHAIN_STREAM2_NAME)
def test_events_state_machine(self, stepfunctions_client): events = aws_stack.create_external_boto_client("events") state_machines_before = stepfunctions_client.list_state_machines( )["stateMachines"] # create event bus bus_name = f"bus-{short_uid()}" events.create_event_bus(Name=bus_name) # create state machine definition = clone(STATE_MACHINE_EVENTS) definition["States"]["step1"]["Parameters"]["Entries"][0][ "EventBusName"] = bus_name definition = json.dumps(definition) sm_name = f"events-{short_uid()}" role_arn = aws_stack.role_arn("sfn_role") stepfunctions_client.create_state_machine(name=sm_name, definition=definition, roleArn=role_arn) # run state machine events_before = len(TEST_EVENTS_CACHE) sm_arn = get_machine_arn(sm_name, stepfunctions_client) result = stepfunctions_client.start_execution(stateMachineArn=sm_arn) assert result.get("executionArn") def check_invocations(): # assert that the event is received assert events_before + 1 == len(TEST_EVENTS_CACHE) last_event = TEST_EVENTS_CACHE[-1] assert bus_name == last_event["EventBusName"] assert "TestSource" == last_event["Source"] assert "TestMessage" == last_event["DetailType"] assert { "Message": "Hello from Step Functions!" } == json.loads(last_event["Detail"]) # assert that the event bus has received an event from the SM execution retry(check_invocations, sleep=1, retries=10) # clean up cleanup(sm_arn, state_machines_before, stepfunctions_client) events.delete_event_bus(Name=bus_name)
def to_response_json(model_type, data, api_id=None, self_link=None, id_attr=None): if isinstance(data, list) and len(data) == 1: data = data[0] id_attr = id_attr or "id" result = common.clone(data) if not self_link: self_link = "/%ss/%s" % (model_type, data[id_attr]) if api_id: self_link = "/restapis/%s/%s" % (api_id, self_link) if "_links" not in result: result["_links"] = {} result["_links"]["self"] = {"href": self_link} result["_links"]["curies"] = { "href": "https://docs.aws.amazon.com/apigateway/latest/developerguide/restapi-authorizer-latest.html", "name": model_type, "templated": True, } result["_links"]["%s:delete" % model_type] = {"href": self_link} return result
def create_change_set(req_params): stack_name = req_params.get('StackName') cloudformation_listener.prepare_template_body(req_params) template = template_deployer.parse_template(req_params['TemplateBody']) template['StackName'] = stack_name template['ChangeSetName'] = req_params.get('ChangeSetName') stack = existing = find_stack(stack_name) if not existing: # automatically create (empty) stack if none exists yet state = RegionState.get() empty_stack_template = dict(template) empty_stack_template['Resources'] = {} stack = Stack(clone(req_params), empty_stack_template) state.stacks[stack.stack_id] = stack stack.set_stack_status('CREATE_COMPLETE') change_set = StackChangeSet(req_params, template) stack.change_sets.append(change_set) change_set.metadata['Status'] = 'CREATE_COMPLETE' return {'StackId': change_set.stack_id, 'Id': change_set.change_set_id}
def test_create_choice_state_machine(self): state_machines_before = self.sfn_client.list_state_machines( )['stateMachines'] role_arn = aws_stack.role_arn('sfn_role') definition = clone(CHOICE_STATE_MACHINE_DEF) lambda_arn_4 = aws_stack.lambda_function_arn(TEST_LAMBDA_NAME_4) definition['States']['Add']['Resource'] = lambda_arn_4 definition = json.dumps(definition) result = self.sfn_client.create_state_machine( name=CHOICE_STATE_MACHINE_NAME, definition=definition, roleArn=role_arn) # assert that the SM has been created state_machines_after = self.sfn_client.list_state_machines( )['stateMachines'] self.assertEqual(len(state_machines_after), len(state_machines_before) + 1) # run state machine state_machines = self.sfn_client.list_state_machines()['stateMachines'] sm_arn = [ m['stateMachineArn'] for m in state_machines if m['name'] == CHOICE_STATE_MACHINE_NAME ][0] input = {'x': '1', 'y': '2'} result = self.sfn_client.start_execution(stateMachineArn=sm_arn, input=json.dumps(input)) self.assertTrue(result.get('executionArn')) # define expected output test_output = {**input, 'added': {'Hello': TEST_RESULT_VALUE}} def check_result(): result = self._get_execution_results(sm_arn) self.assertEqual(result, test_output) # assert that the result is correct retry(check_result, sleep=2, retries=10) # clean up self.sfn_client.delete_state_machine(stateMachineArn=sm_arn)
def test_intrinsic_functions(self): state_machines_before = self.sfn_client.list_state_machines( )['stateMachines'] # create state machine role_arn = aws_stack.role_arn('sfn_role') definition = clone(STATE_MACHINE_INTRINSIC_FUNCS) lambda_arn_1 = aws_stack.lambda_function_arn(TEST_LAMBDA_NAME_5) lambda_arn_2 = aws_stack.lambda_function_arn(TEST_LAMBDA_NAME_5) if isinstance(definition['States']['state1'].get('Parameters'), dict): definition['States']['state1']['Parameters']['lambda_params'][ 'FunctionName'] = lambda_arn_1 definition['States']['state3']['Resource'] = lambda_arn_2 definition = json.dumps(definition) sm_name = 'intrinsic-%s' % short_uid() result = self.sfn_client.create_state_machine(name=sm_name, definition=definition, roleArn=role_arn) # run state machine sm_arn = self.get_machine_arn(sm_name) lambda_api.LAMBDA_EXECUTOR.function_invoke_times.clear() input = {} result = self.sfn_client.start_execution(stateMachineArn=sm_arn, input=json.dumps(input)) self.assertTrue(result.get('executionArn')) def check_invocations(): self.assertIn(lambda_arn_1, lambda_api.LAMBDA_EXECUTOR.function_invoke_times) self.assertIn(lambda_arn_2, lambda_api.LAMBDA_EXECUTOR.function_invoke_times) # assert that the result is correct result = self._get_execution_results(sm_arn) self.assertEqual({'payload': { 'values': [1, 'v2'] }}, result.get('result_value')) # assert that the lambda has been invoked by the SM execution retry(check_invocations, sleep=1, retries=10) # clean up self.cleanup(sm_arn, state_machines_before)
def handle_associate_vpc_request(method, path, data): is_associate = path.endswith("/associatevpc") region_details = Route53Backend.get() zone_id = extract_zone_id(path) req_data = clone(xmltodict.parse(to_str(data))) zone_details = region_details.vpc_hosted_zone_associations.get(zone_id) or [] if is_associate: assoc_id = short_uid() zone_data = req_data.get("AssociateVPCWithHostedZoneRequest", {}) zone_data["Id"] = assoc_id zone_data["HostedZoneId"] = zone_id zone_details.append(zone_data) response_entry = { "ChangeInfo": { "Id": assoc_id, "Status": "INSYNC", "SubmittedAt": timestamp_millis(), } } # update VPC info in hosted zone moto object - fixes required after https://github.com/spulec/moto/pull/4786 hosted_zone = route53_backend.zones.get(zone_id) if not getattr(hosted_zone, "vpcid", None): hosted_zone.vpcid = zone_data["VPC"].get("VPCId") if not getattr(hosted_zone, "vpcregion", None): hosted_zone.vpcregion = aws_stack.get_region() else: def _match(z): return z["HostedZoneId"] == zone_id and z["VPC"]["VPCId"] == zone_data["VPC"]["VPCId"] zone_data = req_data.get("DisassociateVPCFromHostedZoneRequest", {}) response_entry = [z for z in zone_details if _match(z)] zone_details = [z for z in zone_details if not _match(z)] if not response_entry: return 404 response_entry = response_entry[0] region_details.vpc_hosted_zone_associations[zone_id] = zone_details response_tag = "%sVPCWithHostedZoneResponse" % ("Associate" if is_associate else "Disassociate") return {response_tag: response_entry}
def test_events_state_machine(self): events = aws_stack.connect_to_service("events") state_machines_before = self.sfn_client.list_state_machines( )["stateMachines"] # create event bus bus_name = f"bus-{short_uid()}" events.create_event_bus(Name=bus_name) # create state machine definition = clone(STATE_MACHINE_EVENTS) definition["States"]["step1"]["Parameters"]["Entries"][0][ "EventBusName"] = bus_name definition = json.dumps(definition) sm_name = "events-%s" % short_uid() role_arn = aws_stack.role_arn("sfn_role") self.sfn_client.create_state_machine(name=sm_name, definition=definition, roleArn=role_arn) # run state machine events_before = len(TEST_EVENTS_CACHE) sm_arn = self.get_machine_arn(sm_name) result = self.sfn_client.start_execution(stateMachineArn=sm_arn) self.assertTrue(result.get("executionArn")) def check_invocations(): # assert that the event is received self.assertEqual(events_before + 1, len(TEST_EVENTS_CACHE)) last_event = TEST_EVENTS_CACHE[-1] self.assertEqual(bus_name, last_event["EventBusName"]) self.assertEqual("TestSource", last_event["Source"]) self.assertEqual("TestMessage", last_event["DetailType"]) self.assertEqual({"Message": "Hello from Step Functions!"}, json.loads(last_event["Detail"])) # assert that the event bus has received an event from the SM execution retry(check_invocations, sleep=1, retries=10) # clean up self.cleanup(sm_arn, state_machines_before) events.delete_event_bus(Name=bus_name)
def test_create_run_state_machine(self): state_machines_before = self.sfn_client.list_state_machines( )["stateMachines"] # create state machine role_arn = aws_stack.role_arn("sfn_role") definition = clone(STATE_MACHINE_BASIC) lambda_arn_1 = aws_stack.lambda_function_arn(TEST_LAMBDA_NAME_1) lambda_arn_2 = aws_stack.lambda_function_arn(TEST_LAMBDA_NAME_2) definition["States"]["step1"]["Resource"] = lambda_arn_1 definition["States"]["step2"]["Resource"] = lambda_arn_2 definition = json.dumps(definition) sm_name = "basic-%s" % short_uid() result = self.sfn_client.create_state_machine(name=sm_name, definition=definition, roleArn=role_arn) # assert that the SM has been created self.assert_machine_created(state_machines_before) # run state machine sm_arn = self.get_machine_arn(sm_name) lambda_api.LAMBDA_EXECUTOR.function_invoke_times.clear() result = self.sfn_client.start_execution(stateMachineArn=sm_arn) self.assertTrue(result.get("executionArn")) def check_invocations(): self.assertIn(lambda_arn_1, lambda_api.LAMBDA_EXECUTOR.function_invoke_times) self.assertIn(lambda_arn_2, lambda_api.LAMBDA_EXECUTOR.function_invoke_times) # assert that the result is correct result = self._get_execution_results(sm_arn) self.assertEqual({"Hello": TEST_RESULT_VALUE}, result["result_value"]) # assert that the lambda has been invoked by the SM execution retry(check_invocations, sleep=0.7, retries=25) # clean up self.cleanup(sm_arn, state_machines_before)
def test_api_gateway_authorizer_crud(self): apig = aws_stack.connect_to_service('apigateway') authorizer = apig.create_authorizer(restApiId=self.TEST_API_GATEWAY_ID, **self.TEST_API_GATEWAY_AUTHORIZER) authorizer_id = authorizer.get('id') create_result = apig.get_authorizer(restApiId=self.TEST_API_GATEWAY_ID, authorizerId=authorizer_id) # ignore boto3 stuff del create_result['ResponseMetadata'] create_expected = clone(self.TEST_API_GATEWAY_AUTHORIZER) create_expected['id'] = authorizer_id self.assertDictEqual(create_expected, create_result) apig.update_authorizer( restApiId=self.TEST_API_GATEWAY_ID, authorizerId=authorizer_id, patchOperations=self.TEST_API_GATEWAY_AUTHORIZER_OPS) update_result = apig.get_authorizer(restApiId=self.TEST_API_GATEWAY_ID, authorizerId=authorizer_id) # ignore boto3 stuff del update_result['ResponseMetadata'] update_expected = apply_patch(create_expected, self.TEST_API_GATEWAY_AUTHORIZER_OPS) self.assertDictEqual(update_expected, update_result) apig.delete_authorizer(restApiId=self.TEST_API_GATEWAY_ID, authorizerId=authorizer_id) self.assertRaises(Exception, apig.get_authorizer, self.TEST_API_GATEWAY_ID, authorizer_id)
def add_vpc_info_to_response(path: str, response: Response): content = to_str(response.content or "") if "<HostedZone>" not in content: return if "GetHostedZoneResponse" not in content and "CreateHostedZoneResponse" not in content: return content = clone(xmltodict.parse(content)) region_details = Route53Backend.get() def _insert(obj, **_): if not isinstance(obj, dict) or "HostedZone" not in obj or "VPCs" in obj: return obj zone_id = obj["HostedZone"].get("Id", "").replace("/hostedzone/", "") zone_details = region_details.vpc_hosted_zone_associations.get(zone_id) or [] vpcs = [zone["VPC"] for zone in zone_details if zone.get("VPC")] if vpcs: obj["VPCs"] = [{"VPC": vpc} for vpc in vpcs] return obj recurse_object(content, _insert) set_response_content(response, xmltodict.unparse(content))
def test_create_run_state_machine(self): state_machines_before = self.sfn_client.list_state_machines()['stateMachines'] # create state machine role_arn = aws_stack.role_arn('sfn_role') definition = clone(STATE_MACHINE_DEF) lambda_arn_1 = aws_stack.lambda_function_arn(TEST_LAMBDA_NAME_1) lambda_arn_2 = aws_stack.lambda_function_arn(TEST_LAMBDA_NAME_2) definition['States']['step1']['Resource'] = lambda_arn_1 definition['States']['step2']['Resource'] = lambda_arn_2 definition = json.dumps(definition) result = self.sfn_client.create_state_machine( name=STATE_MACHINE_NAME, definition=definition, roleArn=role_arn) # assert that the SM has been created state_machines_after = self.sfn_client.list_state_machines()['stateMachines'] self.assertEqual(len(state_machines_after), len(state_machines_before) + 1) # run state machine state_machines = self.sfn_client.list_state_machines()['stateMachines'] sm_arn = [m['stateMachineArn'] for m in state_machines if m['name'] == STATE_MACHINE_NAME][0] result = self.sfn_client.start_execution(stateMachineArn=sm_arn) self.assertTrue(result.get('executionArn')) def check_invocations(): self.assertIn(lambda_arn_1, lambda_api.LAMBDA_EXECUTOR.function_invoke_times) self.assertIn(lambda_arn_2, lambda_api.LAMBDA_EXECUTOR.function_invoke_times) # assert that the result is correct response = self.sfn_client.list_executions(stateMachineArn=sm_arn) execution = response['executions'][0] result = self.sfn_client.get_execution_history(executionArn=execution['executionArn']) events = sorted(result['events'], key=lambda event: event['id']) result = json.loads(events[-1]['executionSucceededEventDetails']['output']) self.assertEqual(result['result_value'], {'Hello': TEST_RESULT_VALUE}) # assert that the lambda has been invoked by the SM execution retry(check_invocations, sleep=0.7, retries=25) # clean up self.sfn_client.delete_state_machine(stateMachineArn=sm_arn)
def test_create_run_map_state_machine(self): names = ["Bob", "Meg", "Joe"] test_input = [{"map": name} for name in names] test_output = [{"Hello": name} for name in names] state_machines_before = self.sfn_client.list_state_machines()["stateMachines"] role_arn = aws_stack.role_arn("sfn_role") definition = clone(STATE_MACHINE_MAP) lambda_arn_3 = aws_stack.lambda_function_arn(TEST_LAMBDA_NAME_3) definition["States"]["ExampleMapState"]["Iterator"]["States"]["CallLambda"][ "Resource" ] = lambda_arn_3 definition = json.dumps(definition) sm_name = "map-%s" % short_uid() _ = self.sfn_client.create_state_machine( name=sm_name, definition=definition, roleArn=role_arn ) # assert that the SM has been created self.assert_machine_created(state_machines_before) # run state machine sm_arn = self.get_machine_arn(sm_name) lambda_api.LAMBDA_EXECUTOR.function_invoke_times.clear() result = self.sfn_client.start_execution( stateMachineArn=sm_arn, input=json.dumps(test_input) ) self.assertTrue(result.get("executionArn")) def check_invocations(): self.assertIn(lambda_arn_3, lambda_api.LAMBDA_EXECUTOR.function_invoke_times) # assert that the result is correct result = self._get_execution_results(sm_arn) self.assertEqual(test_output, result) # assert that the lambda has been invoked by the SM execution retry(check_invocations, sleep=1, retries=10) # clean up self.cleanup(sm_arn, state_machines_before)
def test_try_catch_state_machine(self): state_machines_before = self.sfn_client.list_state_machines( )['stateMachines'] # create state machine role_arn = aws_stack.role_arn('sfn_role') definition = clone(STATE_MACHINE_CATCH) lambda_arn_1 = aws_stack.lambda_function_arn(TEST_LAMBDA_NAME_1) lambda_arn_2 = aws_stack.lambda_function_arn(TEST_LAMBDA_NAME_2) definition['States']['Start']['Parameters'][ 'FunctionName'] = lambda_arn_1 definition['States']['ErrorHandler']['Resource'] = lambda_arn_2 definition['States']['Final']['Resource'] = lambda_arn_2 definition = json.dumps(definition) sm_name = 'catch-%s' % short_uid() result = self.sfn_client.create_state_machine(name=sm_name, definition=definition, roleArn=role_arn) # run state machine sm_arn = self.get_machine_arn(sm_name) lambda_api.LAMBDA_EXECUTOR.function_invoke_times.clear() result = self.sfn_client.start_execution(stateMachineArn=sm_arn) self.assertTrue(result.get('executionArn')) def check_invocations(): self.assertIn(lambda_arn_1, lambda_api.LAMBDA_EXECUTOR.function_invoke_times) self.assertIn(lambda_arn_2, lambda_api.LAMBDA_EXECUTOR.function_invoke_times) # assert that the result is correct result = self._get_execution_results(sm_arn) self.assertEqual(result.get('handled'), {'Hello': TEST_RESULT_VALUE}) # assert that the lambda has been invoked by the SM execution retry(check_invocations, sleep=1, retries=10) # clean up self.cleanup(sm_arn, state_machines_before)
def to_authorizer_response_json(api_id, data): result = common.clone(data) self_link = '/restapis/%s/authorizers/%s' % (api_id, data['id']) if '_links' not in result: result['_links'] = {} result['_links']['self'] = {'href': self_link} result['_links']['curies'] = { 'href': 'https://docs.aws.amazon.com/apigateway/latest/developerguide/restapi-authorizer-latest.html', 'name': 'authorizer', 'templated': True } result['_links']['authorizer:delete'] = {'href': self_link} result['_links']['authorizer:delete'] = {'href': self_link} return result
def test_create_run_map_state_machine(self): state_machines_before = self.sfn_client.list_state_machines()['stateMachines'] role_arn = aws_stack.role_arn('sfn_role') definition = clone(MAP_STATE_MACHINE_DEF) lambda_arn_3 = aws_stack.lambda_function_arn(TEST_LAMBDA_NAME_3) definition['States']['ExampleMapState']['Iterator']['States']['CallLambda']['Resource'] = lambda_arn_3 definition = json.dumps(definition) result = self.sfn_client.create_state_machine( name=MAP_STATE_MACHINE_NAME, definition=definition, roleArn=role_arn) # assert that the SM has been created state_machines_after = self.sfn_client.list_state_machines()['stateMachines'] self.assertEqual(len(state_machines_after), len(state_machines_before) + 1) # run state machine state_machines = self.sfn_client.list_state_machines()['stateMachines'] sm_arn = [m['stateMachineArn'] for m in state_machines if m['name'] == MAP_STATE_MACHINE_NAME][0] result = self.sfn_client.start_execution(stateMachineArn=sm_arn, input=json.dumps(MAP_STATE_MACHINE_INPUT)) self.assertTrue(result.get('executionArn')) def check_invocations(): self.assertIn(lambda_arn_3, lambda_api.LAMBDA_EXECUTOR.function_invoke_times) # assert that the result is correct response = self.sfn_client.list_executions(stateMachineArn=sm_arn) execution = response['executions'][0] result = self.sfn_client.get_execution_history(executionArn=execution['executionArn']) events = sorted(result['events'], key=lambda event: event['id']) result = json.loads(events[-1]['executionSucceededEventDetails']['output']) self.assertEqual(result, MAP_STATE_MACHINE_OUTPUT) # assert that the lambda has been invoked by the SM execution retry(check_invocations, sleep=0.7, retries=2) # clean up self.sfn_client.delete_state_machine(stateMachineArn=sm_arn)
def test_create_choice_state_machine(self, stepfunctions_client): state_machines_before = stepfunctions_client.list_state_machines( )["stateMachines"] role_arn = aws_stack.role_arn("sfn_role") definition = clone(STATE_MACHINE_CHOICE) lambda_arn_4 = aws_stack.lambda_function_arn(TEST_LAMBDA_NAME_4) definition["States"]["Add"]["Resource"] = lambda_arn_4 definition = json.dumps(definition) sm_name = f"choice-{short_uid()}" stepfunctions_client.create_state_machine(name=sm_name, definition=definition, roleArn=role_arn) # assert that the SM has been created assert_machine_created(state_machines_before, stepfunctions_client) # run state machine sm_arn = get_machine_arn(sm_name, stepfunctions_client) input = {"x": "1", "y": "2"} result = stepfunctions_client.start_execution(stateMachineArn=sm_arn, input=json.dumps(input)) assert result.get("executionArn") # define expected output test_output = {**input, "added": {"Hello": TEST_RESULT_VALUE}} def check_result(): result = _get_execution_results(sm_arn, stepfunctions_client) assert test_output == result # assert that the result is correct retry(check_result, sleep=2, retries=10) # clean up cleanup(sm_arn, state_machines_before, sfn_client=stepfunctions_client)
def get_message_attributes_md5(self, req_data): req_data = clone(req_data) orig_types = {} for key, entry in dict(req_data).items(): # Fix an issue in moto where data types like 'Number.java.lang.Integer' are # not supported: Keep track of the original data type, and temporarily change # it to the short form (e.g., 'Number'), before changing it back again. if key.endswith('DataType'): parts = entry[0].split('.') if len(parts) > 2: short_type_name = parts[0] full_type_name = req_data[key][0] attr_num = key.split('.')[1] attr_name = req_data['MessageAttribute.%s.Name' % attr_num][0] orig_types[attr_name] = full_type_name req_data[key] = [short_type_name] if full_type_name not in TRANSPORT_TYPE_ENCODINGS: TRANSPORT_TYPE_ENCODINGS[full_type_name] = TRANSPORT_TYPE_ENCODINGS[short_type_name] moto_message = Message('dummy_msg_id', 'dummy_body') moto_message.message_attributes = parse_message_attributes(req_data) for key, data_type in orig_types.items(): moto_message.message_attributes[key]['data_type'] = data_type message_attr_hash = moto_message.attribute_md5 return message_attr_hash
def test_create_choice_state_machine(self): state_machines_before = self.sfn_client.list_state_machines( )['stateMachines'] role_arn = aws_stack.role_arn('sfn_role') definition = clone(STATE_MACHINE_CHOICE) lambda_arn_4 = aws_stack.lambda_function_arn(TEST_LAMBDA_NAME_4) definition['States']['Add']['Resource'] = lambda_arn_4 definition = json.dumps(definition) sm_name = 'choice-%s' % short_uid() result = self.sfn_client.create_state_machine(name=sm_name, definition=definition, roleArn=role_arn) # assert that the SM has been created self.assert_machine_created(state_machines_before) # run state machine sm_arn = self.get_machine_arn(sm_name) input = {'x': '1', 'y': '2'} result = self.sfn_client.start_execution(stateMachineArn=sm_arn, input=json.dumps(input)) self.assertTrue(result.get('executionArn')) # define expected output test_output = {**input, 'added': {'Hello': TEST_RESULT_VALUE}} def check_result(): result = self._get_execution_results(sm_arn) self.assertEqual(test_output, result) # assert that the result is correct retry(check_result, sleep=2, retries=10) # clean up self.cleanup(sm_arn, state_machines_before)
def clone_stack_params(stack_params): try: return clone(stack_params) except Exception as e: LOG.info("Unable to clone stack parameters: %s", e) return stack_params
def get_services_health(reload=False): if reload: reload_services_health() result = clone(dict(STATUSES)) result.get('services', {}).pop('edge', None) return result
def forward_request(self, method, path, data, headers): modified_data = None # If this request contains streaming v4 authentication signatures, strip them from the message # Related isse: https://github.com/localstack/localstack/issues/98 # TODO we should evaluate whether to replace moto s3 with scality/S3: # https://github.com/scality/S3/issues/237 if headers.get('x-amz-content-sha256') == 'STREAMING-AWS4-HMAC-SHA256-PAYLOAD': modified_data = strip_chunk_signatures(data) # POST requests to S3 may include a "${filename}" placeholder in the # key, which should be replaced with an actual file name before storing. if method == 'POST': original_data = modified_data or data expanded_data = multipart_content.expand_multipart_filename(original_data, headers) if expanded_data is not original_data: modified_data = expanded_data # If no content-type is provided, 'binary/octet-stream' should be used # src: https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectPUT.html if method == 'PUT' and not headers.get('content-type'): headers['content-type'] = 'binary/octet-stream' # persist this API call to disk persistence.record('s3', method, path, data, headers) parsed = urlparse.urlparse(path) query = parsed.query path = parsed.path bucket = path.split('/')[1] query_map = urlparse.parse_qs(query) if query == 'notification' or 'notification' in query_map: response = Response() response.status_code = 200 if method == 'GET': # TODO check if bucket exists result = '<NotificationConfiguration xmlns="%s">' % XMLNS_S3 if bucket in S3_NOTIFICATIONS: notif = S3_NOTIFICATIONS[bucket] for dest in NOTIFICATION_DESTINATION_TYPES: if dest in notif: dest_dict = { '%sConfiguration' % dest: { 'Id': uuid.uuid4(), dest: notif[dest], 'Event': notif['Event'], 'Filter': notif['Filter'] } } result += xmltodict.unparse(dest_dict, full_document=False) result += '</NotificationConfiguration>' response._content = result if method == 'PUT': parsed = xmltodict.parse(data) notif_config = parsed.get('NotificationConfiguration') S3_NOTIFICATIONS.pop(bucket, None) for dest in NOTIFICATION_DESTINATION_TYPES: config = notif_config.get('%sConfiguration' % (dest)) if config: events = config.get('Event') if isinstance(events, six.string_types): events = [events] event_filter = config.get('Filter', {}) # make sure FilterRule is an array s3_filter = _get_s3_filter(event_filter) if s3_filter and not isinstance(s3_filter.get('FilterRule', []), list): s3_filter['FilterRule'] = [s3_filter['FilterRule']] # create final details dict notification_details = { 'Id': config.get('Id'), 'Event': events, dest: config.get(dest), 'Filter': event_filter } # TODO: what if we have multiple destinations - would we overwrite the config? S3_NOTIFICATIONS[bucket] = clone(notification_details) # return response for ?notification request return response if query == 'cors' or 'cors' in query_map: if method == 'GET': return get_cors(bucket) if method == 'PUT': return set_cors(bucket, data) if method == 'DELETE': return delete_cors(bucket) if query == 'lifecycle' or 'lifecycle' in query_map: if method == 'GET': return get_lifecycle(bucket) if method == 'PUT': return set_lifecycle(bucket, data) if modified_data: return Request(data=modified_data, headers=headers, method=method) return True
def return_response(self, method, path, data, headers, response): data = json.loads(to_str(data)) # update table definitions if data and 'TableName' in data and 'KeySchema' in data: TABLE_DEFINITIONS[data['TableName']] = data if response._content: # fix the table ARN (DynamoDBLocal hardcodes "ddblocal" as the region) content_replaced = re.sub(r'"TableArn"\s*:\s*"arn:aws:dynamodb:ddblocal:([^"]+)"', r'"TableArn": "arn:aws:dynamodb:%s:\1"' % aws_stack.get_local_region(), to_str(response._content)) if content_replaced != response._content: response._content = content_replaced fix_headers_for_updated_response(response) action = headers.get('X-Amz-Target') if not action: return record = { 'eventID': '1', 'eventVersion': '1.0', 'dynamodb': { 'StreamViewType': 'NEW_AND_OLD_IMAGES', 'SizeBytes': -1 }, 'awsRegion': DEFAULT_REGION, 'eventSource': 'aws:dynamodb' } records = [record] if action == '%s.UpdateItem' % ACTION_PREFIX: updated_item = find_existing_item(data) if not updated_item: return record['eventName'] = 'MODIFY' record['dynamodb']['Keys'] = data['Key'] record['dynamodb']['OldImage'] = ProxyListenerDynamoDB.thread_local.existing_item record['dynamodb']['NewImage'] = updated_item record['dynamodb']['SizeBytes'] = len(json.dumps(updated_item)) elif action == '%s.BatchWriteItem' % ACTION_PREFIX: records = [] for table_name, requests in data['RequestItems'].items(): for request in requests: put_request = request.get('PutRequest') if put_request: keys = dynamodb_extract_keys(item=put_request['Item'], table_name=table_name) if isinstance(keys, Response): return keys new_record = clone(record) new_record['eventName'] = 'INSERT' new_record['dynamodb']['Keys'] = keys new_record['dynamodb']['NewImage'] = put_request['Item'] new_record['eventSourceARN'] = aws_stack.dynamodb_table_arn(table_name) records.append(new_record) elif action == '%s.PutItem' % ACTION_PREFIX: existing_item = ProxyListenerDynamoDB.thread_local.existing_item ProxyListenerDynamoDB.thread_local.existing_item = None record['eventName'] = 'INSERT' if not existing_item else 'MODIFY' keys = dynamodb_extract_keys(item=data['Item'], table_name=data['TableName']) if isinstance(keys, Response): return keys record['dynamodb']['Keys'] = keys record['dynamodb']['NewImage'] = data['Item'] record['dynamodb']['SizeBytes'] = len(json.dumps(data['Item'])) elif action == '%s.GetItem' % ACTION_PREFIX: if response.status_code == 200: content = json.loads(to_str(response.content)) # make sure we append 'ConsumedCapacity', which is properly # returned by dynalite, but not by AWS's DynamoDBLocal if 'ConsumedCapacity' not in content and data.get('ReturnConsumedCapacity') in ('TOTAL', 'INDEXES'): content['ConsumedCapacity'] = { 'CapacityUnits': 0.5, # TODO hardcoded 'TableName': data['TableName'] } response._content = json.dumps(content) fix_headers_for_updated_response(response) elif action == '%s.DeleteItem' % ACTION_PREFIX: old_item = ProxyListenerDynamoDB.thread_local.existing_item record['eventName'] = 'REMOVE' record['dynamodb']['Keys'] = data['Key'] record['dynamodb']['OldImage'] = old_item elif action == '%s.CreateTable' % ACTION_PREFIX: if 'StreamSpecification' in data: create_dynamodb_stream(data) event_publisher.fire_event(event_publisher.EVENT_DYNAMODB_CREATE_TABLE, payload={'n': event_publisher.get_hash(data['TableName'])}) return elif action == '%s.DeleteTable' % ACTION_PREFIX: event_publisher.fire_event(event_publisher.EVENT_DYNAMODB_DELETE_TABLE, payload={'n': event_publisher.get_hash(data['TableName'])}) return elif action == '%s.UpdateTable' % ACTION_PREFIX: if 'StreamSpecification' in data: create_dynamodb_stream(data) return else: # nothing to do return if len(records) > 0 and 'eventName' in records[0]: if 'TableName' in data: records[0]['eventSourceARN'] = aws_stack.dynamodb_table_arn(data['TableName']) forward_to_lambda(records) forward_to_ddb_stream(records)
def normalize_authorizer(data): result = common.clone(data) # terraform sends this as a string in patch, so convert to int result['authorizerResultTtlInSeconds'] = int( result.get('authorizerResultTtlInSeconds') or 300) return result
def _parse_and_create_resource(logical_id, resource_json, resources_map, region_name, update=False): stack_name = resources_map.get('AWS::StackName') resource_hash_key = (stack_name, logical_id) # If the current stack is being updated, avoid infinite recursion updating = CURRENTLY_UPDATING_RESOURCES.get(resource_hash_key) LOG.debug('Currently updating stack resource %s/%s: %s' % (stack_name, logical_id, updating)) if updating: return None # parse and get final resource JSON resource_tuple = parsing.parse_resource(logical_id, resource_json, resources_map) if not resource_tuple: return None _, resource_json, _ = resource_tuple # add some missing default props which otherwise cause deployments to fail props = resource_json['Properties'] = resource_json.get( 'Properties') or {} if resource_json[ 'Type'] == 'AWS::Lambda::EventSourceMapping' and not props.get( 'StartingPosition'): props['StartingPosition'] = 'LATEST' # check if this resource already exists in the resource map resource = resources_map._parsed_resources.get(logical_id) if resource and not update: return resource # check whether this resource needs to be deployed resource_wrapped = {logical_id: resource_json} should_be_created = template_deployer.should_be_deployed( logical_id, resource_wrapped, stack_name) # fix resource ARNs, make sure to convert account IDs 000000000000 to 123456789012 resource_json_arns_fixed = clone( json_safe(convert_objs_to_ids(resource_json))) set_moto_account_ids(resource_json_arns_fixed) # create resource definition and store CloudFormation metadata in moto if resource or update: parse_and_update_resource_orig(logical_id, resource_json_arns_fixed, resources_map, region_name) elif not resource: try: resource = parse_and_create_resource_orig( logical_id, resource_json_arns_fixed, resources_map, region_name) except Exception as e: if should_be_created: raise else: LOG.info( 'Error on moto CF resource creation. Ignoring, as should_be_created=%s: %s' % (should_be_created, e)) # Fix for moto which sometimes hard-codes region name as 'us-east-1' if hasattr(resource, 'region_name') and resource.region_name != region_name: LOG.debug('Updating incorrect region from %s to %s' % (resource.region_name, region_name)) resource.region_name = region_name # check whether this resource needs to be deployed is_updateable = False if not should_be_created: # This resource is either not deployable or already exists. Check if it can be updated is_updateable = template_deployer.is_updateable( logical_id, resource_wrapped, stack_name) if not update or not is_updateable: LOG.debug('Resource %s need not be deployed: %s %s' % (logical_id, resource_json, bool(resource))) # Return if this resource already exists and can/need not be updated return resource # Apply some fixes/patches to the resource names, then deploy resource in LocalStack update_resource_name(resource, resource_json) LOG.debug( 'Deploying CloudFormation resource (update=%s, exists=%s, updateable=%s): %s' % (update, not should_be_created, is_updateable, resource_json)) try: CURRENTLY_UPDATING_RESOURCES[resource_hash_key] = True deploy_func = template_deployer.update_resource if update else template_deployer.deploy_resource result = deploy_func(logical_id, resource_wrapped, stack_name=stack_name) finally: CURRENTLY_UPDATING_RESOURCES[resource_hash_key] = False if not should_be_created: # skip the parts below for update requests return resource def find_id(resource): """ Find ID of the given resource. """ if not resource: return for id_attr in ('Id', 'id', 'ResourceId', 'RestApiId', 'DeploymentId'): if id_attr in resource: return resource[id_attr] # update resource IDs to avoid mismatch between CF moto and LocalStack backend resources if hasattr(resource, 'id') or (isinstance(resource, dict) and resource.get('id')): existing_id = resource.id if hasattr(resource, 'id') else resource['id'] new_res_id = find_id(result) LOG.debug('Updating resource id: %s - %s, %s - %s' % (existing_id, new_res_id, resource, resource_json)) if new_res_id: LOG.info('Updating resource ID from %s to %s (%s)' % (existing_id, new_res_id, region_name)) update_resource_id(resource, new_res_id, props, region_name) else: LOG.warning('Unable to extract id for resource %s: %s' % (logical_id, result)) # update physical_resource_id field update_physical_resource_id(resource) return resource