def expand_multipart_filename(data, headers): """ Replace instance of '${filename}' in key with given file name. Data is given as multipart form submission bytes, and file name is replace according to Amazon S3 documentation for Post uploads: http://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectPOST.html """ _, params = cgi.parse_header(headers.get('Content-Type', '')) if 'boundary' not in params: return data boundary = params['boundary'].encode('ascii') data_bytes = to_bytes(data) filename = None for (disposition, _) in _iter_multipart_parts(data_bytes, boundary): if disposition.get('name') == 'file' and 'filename' in disposition: filename = disposition['filename'] break if filename is None: # Found nothing, return unaltered return data for (disposition, part) in _iter_multipart_parts(data_bytes, boundary): if disposition.get('name') == 'key' and b'${filename}' in part: search = boundary + part replace = boundary + part.replace(b'${filename}', filename.encode('utf8')) if search in data_bytes: return data_bytes.replace(search, replace) return data
def test_firehose_s3(): s3_resource = aws_stack.connect_to_resource('s3') firehose = aws_stack.connect_to_service('firehose') s3_prefix = '/testdata' test_data = '{"test": "firehose_data_%s"}' % short_uid() # create Firehose stream stream = firehose.create_delivery_stream( DeliveryStreamName=TEST_FIREHOSE_NAME, S3DestinationConfiguration={ 'RoleARN': aws_stack.iam_resource_arn('firehose'), 'BucketARN': aws_stack.s3_bucket_arn(TEST_BUCKET_NAME), 'Prefix': s3_prefix } ) assert stream assert TEST_FIREHOSE_NAME in firehose.list_delivery_streams()['DeliveryStreamNames'] # create target S3 bucket s3_resource.create_bucket(Bucket=TEST_BUCKET_NAME) # put records firehose.put_record( DeliveryStreamName=TEST_FIREHOSE_NAME, Record={ 'Data': to_bytes(test_data) } ) # check records in target bucket all_objects = testutil.list_all_s3_objects() testutil.assert_objects(json.loads(to_str(test_data)), all_objects)
def find_multipart_redirect_url(data, headers): """ Return object key and redirect URL if they can be found. Data is given as multipart form submission bytes, and redirect is found in the success_action_redirect field according to Amazon S3 documentation for Post uploads: http://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectPOST.html """ _, params = cgi.parse_header(headers.get('Content-Type', '')) key, redirect_url = None, None if 'boundary' not in params: return key, redirect_url boundary = params['boundary'].encode('ascii') data_bytes = to_bytes(data) for (disposition, part) in _iter_multipart_parts(data_bytes, boundary): if disposition.get('name') == 'key': _, value = part.split(b'\r\n\r\n', 1) key = value.rstrip(b'\r\n--').decode('utf8') if key: for (disposition, part) in _iter_multipart_parts(data_bytes, boundary): if disposition.get('name') == 'success_action_redirect': _, value = part.split(b'\r\n\r\n', 1) redirect_url = value.rstrip(b'\r\n--').decode('utf8') return key, redirect_url
def test_kinesis_lambda_forward_chain(): kinesis = aws_stack.connect_to_service('kinesis') s3 = aws_stack.connect_to_service('s3') aws_stack.create_kinesis_stream(TEST_CHAIN_STREAM1_NAME, delete=True) aws_stack.create_kinesis_stream(TEST_CHAIN_STREAM2_NAME, delete=True) s3.create_bucket(Bucket=TEST_BUCKET_NAME) # deploy test lambdas connected to Kinesis streams zip_file = testutil.create_lambda_archive(load_file(TEST_LAMBDA_PYTHON), get_content=True, libs=TEST_LAMBDA_LIBS, runtime=LAMBDA_RUNTIME_PYTHON27) testutil.create_lambda_function(func_name=TEST_CHAIN_LAMBDA1_NAME, zip_file=zip_file, event_source_arn=get_event_source_arn(TEST_CHAIN_STREAM1_NAME), runtime=LAMBDA_RUNTIME_PYTHON27) testutil.create_lambda_function(func_name=TEST_CHAIN_LAMBDA2_NAME, zip_file=zip_file, event_source_arn=get_event_source_arn(TEST_CHAIN_STREAM2_NAME), runtime=LAMBDA_RUNTIME_PYTHON27) # publish test record test_data = {'test_data': 'forward_chain_data_%s' % short_uid()} data = clone(test_data) data[lambda_integration.MSG_BODY_MESSAGE_TARGET] = 'kinesis:%s' % TEST_CHAIN_STREAM2_NAME kinesis.put_record(Data=to_bytes(json.dumps(data)), PartitionKey='testId', StreamName=TEST_CHAIN_STREAM1_NAME) # check results time.sleep(5) all_objects = testutil.list_all_s3_objects() testutil.assert_objects(test_data, all_objects)
def return_response(self, method, path, data, headers, response): try: response_data = json.loads(response.content) # Fix an upstream issue in Moto API Gateway, where it returns `createdDate` as a string # instead of as an integer timestamp: # see https://github.com/localstack/localstack/issues/511 if 'createdDate' in response_data and not isinstance(response_data['createdDate'], int): response_data['createdDate'] = int(dateutil.parser.parse(response_data['createdDate']).strftime('%s')) response._content = to_bytes(json.dumps(response_data)) response.headers['Content-Length'] = len(response.content) except Exception: pass
def handler(event, context): """ Generic event forwarder Lambda. """ if 'httpMethod' in event: # looks like this is a call from an AWS_PROXY API Gateway body = json.loads(event['body']) body['pathParameters'] = event.get('pathParameters') return { 'body': body, 'statusCode': body.get('return_status_code', 200), 'headers': body.get('return_headers', {}) } if 'Records' not in event: return event raw_event_messages = [] for record in event['Records']: # Deserialize into Python dictionary and extract the # "NewImage" (the new version of the full ddb document) ddb_new_image = deserialize_event(record) if MSG_BODY_RAISE_ERROR_FLAG in ddb_new_image.get('data', {}): raise Exception('Test exception (this is intentional)') # Place the raw event message document into the Kinesis message format kinesis_record = { 'PartitionKey': 'key123', 'Data': json.dumps(ddb_new_image) } if MSG_BODY_MESSAGE_TARGET in ddb_new_image.get('data', {}): forwarding_target = ddb_new_image['data'][MSG_BODY_MESSAGE_TARGET] target_name = forwarding_target.split(':')[-1] if forwarding_target.startswith('kinesis:'): ddb_new_image['data'][MSG_BODY_MESSAGE_TARGET] = 's3:/test_chain_result' kinesis_record['Data'] = json.dumps(ddb_new_image['data']) forward_event_to_target_stream(kinesis_record, target_name) elif forwarding_target.startswith('s3:'): s3_client = aws_stack.connect_to_service('s3') test_data = to_bytes(json.dumps({'test_data': ddb_new_image['data']['test_data']})) s3_client.upload_fileobj(BytesIO(test_data), TEST_BUCKET_NAME, target_name) else: raw_event_messages.append(kinesis_record) # Forward messages to Kinesis forward_events(raw_event_messages)
def fix_headers_for_updated_response(response): response.headers['content-length'] = len(to_bytes(response.content)) response.headers['x-amz-crc32'] = calculate_crc32(response)
def forward(self, method): data = self.data_bytes forward_headers = CaseInsensitiveDict(self.headers) # force close connection connection_header = forward_headers.get('Connection') or '' if connection_header.lower() not in ['keep-alive', '']: self.close_connection = 1 def is_full_url(url): return re.match(r'[a-zA-Z]+://.+', url) path = self.path if is_full_url(path): path = path.split('://', 1)[1] path = '/%s' % (path.split('/', 1)[1] if '/' in path else '') forward_base_url = self.proxy.forward_base_url proxy_url = '%s%s' % (forward_base_url, path) for listener in self._listeners(): if listener: proxy_url = listener.get_forward_url( method, path, data, forward_headers) or proxy_url target_url = self.path if not is_full_url(target_url): target_url = '%s%s' % (forward_base_url, target_url) # update original "Host" header (moto s3 relies on this behavior) if not forward_headers.get('Host'): forward_headers['host'] = urlparse(target_url).netloc if 'localhost.atlassian.io' in forward_headers.get('Host'): forward_headers['host'] = 'localhost' forward_headers['X-Forwarded-For'] = self.build_x_forwarded_for( forward_headers) try: response = None modified_request = None # update listener (pre-invocation) for listener in self._listeners(): if not listener: continue listener_result = listener.forward_request( method=method, path=path, data=data, headers=forward_headers) if isinstance(listener_result, Response): response = listener_result break if isinstance(listener_result, LambdaResponse): response = listener_result break if isinstance(listener_result, dict): response = Response() response._content = json.dumps(listener_result) response.headers['Content-Type'] = APPLICATION_JSON response.status_code = 200 break elif isinstance(listener_result, Request): modified_request = listener_result data = modified_request.data forward_headers = modified_request.headers break elif listener_result is not True: # get status code from response, or use Bad Gateway status code code = listener_result if isinstance(listener_result, int) else 503 self.send_response(code) self.send_header('Content-Length', '0') # allow pre-flight CORS headers by default self._send_cors_headers() self.end_headers() return # perform the actual invocation of the backend service if response is None: forward_headers['Connection'] = connection_header or 'close' data_to_send = self.data_bytes request_url = proxy_url if modified_request: if modified_request.url: request_url = '%s%s' % (forward_base_url, modified_request.url) data_to_send = modified_request.data response = self.method(request_url, data=data_to_send, headers=forward_headers, stream=True) # prevent requests from processing response body if not response._content_consumed and response.raw: response._content = response.raw.read() # update listener (post-invocation) if self.proxy.update_listener: kwargs = { 'method': method, 'path': path, 'data': self.data_bytes, 'headers': forward_headers, 'response': response } if 'request_handler' in inspect.getargspec( self.proxy.update_listener.return_response)[0]: # some listeners (e.g., sqs_listener.py) require additional details like the original # request port, hence we pass in a reference to this request handler as well. kwargs['request_handler'] = self updated_response = self.proxy.update_listener.return_response( **kwargs) if isinstance(updated_response, Response): response = updated_response # copy headers and return response self.send_response(response.status_code) content_length_sent = False for header_key, header_value in iteritems(response.headers): # filter out certain headers that we don't want to transmit if header_key.lower() not in ('transfer-encoding', 'date', 'server'): self.send_header(header_key, header_value) content_length_sent = content_length_sent or header_key.lower( ) == 'content-length' if not content_length_sent: self.send_header( 'Content-Length', '%s' % len(response.content) if response.content else 0) if isinstance(response, LambdaResponse): self.send_multi_value_headers(response.multi_value_headers) # allow pre-flight CORS headers by default self._send_cors_headers(response) self.end_headers() if response.content and len(response.content): self.wfile.write(to_bytes(response.content)) except Exception as e: trace = str(traceback.format_exc()) conn_errors = ('ConnectionRefusedError', 'NewConnectionError', 'Connection aborted', 'Unexpected EOF', 'Connection reset by peer', 'cannot read from timed out object') conn_error = any(e in trace for e in conn_errors) error_msg = 'Error forwarding request: %s %s' % (e, trace) if 'Broken pipe' in trace: LOG.warn( 'Connection prematurely closed by client (broken pipe).') elif not self.proxy.quiet or not conn_error: LOG.error(error_msg) if os.environ.get(ENV_INTERNAL_TEST_RUN): # During a test run, we also want to print error messages, because # log messages are delayed until the entire test run is over, and # hence we are missing messages if the test hangs for some reason. print('ERROR: %s' % error_msg) self.send_response(502) # bad gateway self.end_headers() # force close connection self.close_connection = 1 finally: try: self.wfile.flush() except Exception as e: LOG.warning('Unable to flush write file: %s' % e)
def invoke_rest_api_integration_backend( invocation_context: ApiInvocationContext): # define local aliases from invocation context invocation_path = invocation_context.path_with_query_string method = invocation_context.method data = invocation_context.data headers = invocation_context.headers api_id = invocation_context.api_id stage = invocation_context.stage resource_path = invocation_context.resource_path response_templates = invocation_context.response_templates integration = invocation_context.integration # extract integration type and path parameters relative_path, query_string_params = extract_query_string_params( path=invocation_path) integration_type_orig = integration.get("type") or integration.get( "integrationType") or "" integration_type = integration_type_orig.upper() uri = integration.get("uri") or integration.get("integrationUri") or "" # XXX we need replace the internal Authorization header with an Authorization header set from # the customer, even if it's empty that's what's expected in the integration. custom_auth_header = invocation_context.headers.pop( HEADER_LOCALSTACK_AUTHORIZATION, "") invocation_context.headers["Authorization"] = custom_auth_header try: path_params = extract_path_params(path=relative_path, extracted_path=resource_path) invocation_context.path_params = path_params except Exception: path_params = {} if (uri.startswith("arn:aws:apigateway:") and ":lambda:path" in uri) or uri.startswith("arn:aws:lambda"): if integration_type in ["AWS", "AWS_PROXY"]: func_arn = uri if ":lambda:path" in uri: func_arn = (uri.split(":lambda:path")[1].split("functions/") [1].split("/invocations")[0]) invocation_context.context = get_event_request_context( invocation_context) invocation_context.stage_variables = helpers.get_stage_variables( invocation_context) if invocation_context.authorizer_type: authorizer_context = { invocation_context.authorizer_type: invocation_context.auth_context } invocation_context.context["authorizer"] = authorizer_context request_templates = RequestTemplates() payload = request_templates.render(invocation_context) # TODO: change this signature to InvocationContext as well! result = lambda_api.process_apigateway_invocation( func_arn, relative_path, payload, stage, api_id, headers, is_base64_encoded=invocation_context.is_data_base64_encoded, path_params=path_params, query_string_params=query_string_params, method=method, resource_path=resource_path, request_context=invocation_context.context, stage_variables=invocation_context.stage_variables, ) if isinstance(result, FlaskResponse): response = flask_to_requests_response(result) elif isinstance(result, Response): response = result else: response = LambdaResponse() parsed_result = (result if isinstance(result, dict) else json.loads(str(result or "{}"))) parsed_result = common.json_safe(parsed_result) parsed_result = {} if parsed_result is None else parsed_result response.status_code = int(parsed_result.get( "statusCode", 200)) parsed_headers = parsed_result.get("headers", {}) if parsed_headers is not None: response.headers.update(parsed_headers) try: result_body = parsed_result.get("body") if isinstance(result_body, dict): response._content = json.dumps(result_body) else: body_bytes = to_bytes(to_str(result_body or "")) if parsed_result.get("isBase64Encoded", False): body_bytes = base64.b64decode(body_bytes) response._content = body_bytes except Exception as e: LOG.warning("Couldn't set Lambda response content: %s", e) response._content = "{}" update_content_length(response) response.multi_value_headers = parsed_result.get( "multiValueHeaders") or {} # apply custom response template invocation_context.response = response response_templates = ResponseTemplates() response_templates.render(invocation_context) invocation_context.response.headers["Content-Length"] = str( len(response.content or "")) return invocation_context.response raise Exception( f'API Gateway integration type "{integration_type}", action "{uri}", method "{method}"' ) elif integration_type == "AWS": if "kinesis:action/" in uri: if uri.endswith("kinesis:action/PutRecord"): target = kinesis_listener.ACTION_PUT_RECORD elif uri.endswith("kinesis:action/PutRecords"): target = kinesis_listener.ACTION_PUT_RECORDS elif uri.endswith("kinesis:action/ListStreams"): target = kinesis_listener.ACTION_LIST_STREAMS else: LOG.info( f"Unexpected API Gateway integration URI '{uri}' for integration type {integration_type}", ) target = "" try: invocation_context.context = get_event_request_context( invocation_context) invocation_context.stage_variables = helpers.get_stage_variables( invocation_context) request_templates = RequestTemplates() payload = request_templates.render(invocation_context) except Exception as e: LOG.warning("Unable to convert API Gateway payload to str", e) raise # forward records to target kinesis stream headers = aws_stack.mock_aws_request_headers( service="kinesis", region_name=invocation_context.region_name) headers["X-Amz-Target"] = target result = common.make_http_request( url=config.service_url("kineses"), data=payload, headers=headers, method="POST") # apply response template invocation_context.response = result response_templates = ResponseTemplates() response_templates.render(invocation_context) return invocation_context.response elif "states:action/" in uri: action = uri.split("/")[-1] if APPLICATION_JSON in integration.get("requestTemplates", {}): request_templates = RequestTemplates() payload = request_templates.render(invocation_context) payload = json.loads(payload) else: # XXX decoding in py3 sounds wrong, this actually might break payload = json.loads(data.decode("utf-8")) client = aws_stack.connect_to_service("stepfunctions") if isinstance(payload.get("input"), dict): payload["input"] = json.dumps(payload["input"]) # Hot fix since step functions local package responses: Unsupported Operation: 'StartSyncExecution' method_name = (camel_to_snake_case(action) if action != "StartSyncExecution" else "start_execution") try: method = getattr(client, method_name) except AttributeError: msg = "Invalid step function action: %s" % method_name LOG.error(msg) return make_error_response(msg, 400) result = method(**payload) result = json_safe( {k: result[k] for k in result if k not in "ResponseMetadata"}) response = requests_response( content=result, headers=aws_stack.mock_aws_request_headers(), ) if action == "StartSyncExecution": # poll for the execution result and return it result = await_sfn_execution_result(result["executionArn"]) result_status = result.get("status") if result_status != "SUCCEEDED": return make_error_response( "StepFunctions execution %s failed with status '%s'" % (result["executionArn"], result_status), 500, ) result = json_safe(result) response = requests_response(content=result) # apply response templates invocation_context.response = response response_templates = ResponseTemplates() response_templates.render(invocation_context) # response = apply_request_response_templates( # response, response_templates, content_type=APPLICATION_JSON # ) return response # https://docs.aws.amazon.com/apigateway/api-reference/resource/integration/ elif ("s3:path/" in uri or "s3:action/" in uri) and method == "GET": s3 = aws_stack.connect_to_service("s3") uri = apply_request_parameters( uri, integration=integration, path_params=path_params, query_params=query_string_params, ) uri_match = re.match(TARGET_REGEX_PATH_S3_URI, uri) or re.match( TARGET_REGEX_ACTION_S3_URI, uri) if uri_match: bucket, object_key = uri_match.group("bucket", "object") LOG.debug("Getting request for bucket %s object %s", bucket, object_key) try: object = s3.get_object(Bucket=bucket, Key=object_key) except s3.exceptions.NoSuchKey: msg = "Object %s not found" % object_key LOG.debug(msg) return make_error_response(msg, 404) headers = aws_stack.mock_aws_request_headers(service="s3") if object.get("ContentType"): headers["Content-Type"] = object["ContentType"] # stream used so large files do not fill memory response = request_response_stream(stream=object["Body"], headers=headers) return response else: msg = "Request URI does not match s3 specifications" LOG.warning(msg) return make_error_response(msg, 400) if method == "POST": if uri.startswith("arn:aws:apigateway:") and ":sqs:path" in uri: template = integration["requestTemplates"][APPLICATION_JSON] account_id, queue = uri.split("/")[-2:] region_name = uri.split(":")[3] if "GetQueueUrl" in template or "CreateQueue" in template: request_templates = RequestTemplates() payload = request_templates.render(invocation_context) new_request = f"{payload}&QueueName={queue}" else: request_templates = RequestTemplates() payload = request_templates.render(invocation_context) queue_url = f"{config.get_edge_url()}/{account_id}/{queue}" new_request = f"{payload}&QueueUrl={queue_url}" headers = aws_stack.mock_aws_request_headers( service="sqs", region_name=region_name) url = urljoin(config.service_url("sqs"), f"{TEST_AWS_ACCOUNT_ID}/{queue}") result = common.make_http_request(url, method="POST", headers=headers, data=new_request) return result elif uri.startswith("arn:aws:apigateway:") and ":sns:path" in uri: invocation_context.context = get_event_request_context( invocation_context) invocation_context.stage_variables = helpers.get_stage_variables( invocation_context) integration_response = SnsIntegration().invoke( invocation_context) return apply_request_response_templates( integration_response, response_templates, content_type=APPLICATION_JSON) raise Exception( 'API Gateway AWS integration action URI "%s", method "%s" not yet implemented' % (uri, method)) elif integration_type == "AWS_PROXY": if uri.startswith("arn:aws:apigateway:") and ":dynamodb:action" in uri: # arn:aws:apigateway:us-east-1:dynamodb:action/PutItem&Table=MusicCollection table_name = uri.split(":dynamodb:action")[1].split("&Table=")[1] action = uri.split(":dynamodb:action")[1].split("&Table=")[0] if "PutItem" in action and method == "PUT": response_template = response_templates.get("application/json") if response_template is None: msg = "Invalid response template defined in integration response." LOG.info("%s Existing: %s", msg, response_templates) return make_error_response(msg, 404) response_template = json.loads(response_template) if response_template["TableName"] != table_name: msg = "Invalid table name specified in integration response template." return make_error_response(msg, 404) dynamo_client = aws_stack.connect_to_resource("dynamodb") table = dynamo_client.Table(table_name) event_data = {} data_dict = json.loads(data) for key, _ in response_template["Item"].items(): event_data[key] = data_dict[key] table.put_item(Item=event_data) response = requests_response(event_data) return response else: raise Exception( 'API Gateway action uri "%s", integration type %s not yet implemented' % (uri, integration_type)) elif integration_type in ["HTTP_PROXY", "HTTP"]: if ":servicediscovery:" in uri: # check if this is a servicediscovery integration URI client = aws_stack.connect_to_service("servicediscovery") service_id = uri.split("/")[-1] instances = client.list_instances( ServiceId=service_id)["Instances"] instance = (instances or [None])[0] if instance and instance.get("Id"): uri = "http://%s/%s" % (instance["Id"], invocation_path.lstrip("/")) # apply custom request template invocation_context.context = get_event_request_context( invocation_context) invocation_context.stage_variables = helpers.get_stage_variables( invocation_context) request_templates = RequestTemplates() payload = request_templates.render(invocation_context) if isinstance(payload, dict): payload = json.dumps(payload) uri = apply_request_parameters(uri, integration=integration, path_params=path_params, query_params=query_string_params) result = requests.request(method=method, url=uri, data=payload, headers=headers) # apply custom response template invocation_context.response = result response_templates = ResponseTemplates() response_templates.render(invocation_context) return invocation_context.response elif integration_type == "MOCK": # TODO: apply tell don't ask principle inside ResponseTemplates or InvocationContext invocation_context.stage_variables = helpers.get_stage_variables( invocation_context) invocation_context.response = requests_response({}) response_templates = ResponseTemplates() response_templates.render(invocation_context) return invocation_context.response if method == "OPTIONS": # fall back to returning CORS headers if this is an OPTIONS request return get_cors_response(headers) raise Exception( 'API Gateway integration type "%s", method "%s", URI "%s" not yet implemented' % (integration_type, method, uri))
def return_response(self, method, path, data, headers, response): bucket_name = get_bucket_name(path, headers) # No path-name based bucket name? Try host-based hostname_parts = headers['host'].split('.') if (not bucket_name or len(bucket_name) == 0) and len(hostname_parts) > 1: bucket_name = hostname_parts[0] # POST requests to S3 may include a success_action_redirect field, # which should be used to redirect a client to a new location. key = None if method == 'POST': key, redirect_url = multipart_content.find_multipart_redirect_url(data, headers) if key and redirect_url: response.status_code = 303 response.headers['Location'] = expand_redirect_url(redirect_url, key, bucket_name) LOGGER.debug('S3 POST {} to {}'.format(response.status_code, response.headers['Location'])) parsed = urlparse.urlparse(path) bucket_name_in_host = headers['host'].startswith(bucket_name) should_send_notifications = all([ method in ('PUT', 'POST', 'DELETE'), '/' in path[1:] or bucket_name_in_host, # check if this is an actual put object request, because it could also be # a put bucket request with a path like this: /bucket_name/ bucket_name_in_host or (len(path[1:].split('/')) > 1 and len(path[1:].split('/')[1]) > 0), # don't send notification if url has a query part (some/path/with?query) # (query can be one of 'notification', 'lifecycle', 'tagging', etc) not parsed.query ]) # get subscribers and send bucket notifications if should_send_notifications: # if we already have a good key, use it, otherwise examine the path if key: object_path = '/' + key elif bucket_name_in_host: object_path = parsed.path else: parts = parsed.path[1:].split('/', 1) object_path = parts[1] if parts[1][0] == '/' else '/%s' % parts[1] send_notifications(method, bucket_name, object_path) # publish event for creation/deletion of buckets: if method in ('PUT', 'DELETE') and ('/' not in path[1:] or len(path[1:].split('/')[1]) <= 0): event_type = (event_publisher.EVENT_S3_CREATE_BUCKET if method == 'PUT' else event_publisher.EVENT_S3_DELETE_BUCKET) event_publisher.fire_event(event_type, payload={'n': event_publisher.get_hash(bucket_name)}) # fix an upstream issue in moto S3 (see https://github.com/localstack/localstack/issues/382) if method == 'PUT' and parsed.query == 'policy': response._content = '' response.status_code = 204 return response if response: # append CORS headers to response append_cors_headers(bucket_name, request_method=method, request_headers=headers, response=response) response_content_str = None try: response_content_str = to_str(response._content) except Exception: pass # we need to un-pretty-print the XML, otherwise we run into this issue with Spark: # https://github.com/jserver/mock-s3/pull/9/files # https://github.com/localstack/localstack/issues/183 # Note: yet, we need to make sure we have a newline after the first line: <?xml ...>\n if response_content_str and response_content_str.startswith('<'): is_bytes = isinstance(response._content, six.binary_type) response._content = re.sub(r'([^\?])>\n\s*<', r'\1><', response_content_str, flags=re.MULTILINE) if is_bytes: response._content = to_bytes(response._content) # fix content-type: https://github.com/localstack/localstack/issues/618 # https://github.com/localstack/localstack/issues/549 if 'text/html' in response.headers.get('Content-Type', ''): response.headers['Content-Type'] = 'application/xml; charset=utf-8' response.headers['content-length'] = len(response._content) # update content-length headers (fix https://github.com/localstack/localstack/issues/541) if method == 'DELETE': response.headers['content-length'] = len(response._content)
def calculate_crc32(response): return crc32(to_bytes(response.content)) & 0xffffffff
def forward(self, method): path = self.path if '://' in path: path = '/' + path.split('://', 1)[1].split('/', 1)[1] proxy_url = '%s%s' % (self.proxy.forward_url, path) target_url = self.path if '://' not in target_url: target_url = '%s%s' % (self.proxy.forward_url, target_url) data = self.data_bytes forward_headers = CaseInsensitiveDict(self.headers) # update original "Host" header (moto s3 relies on this behavior) if not forward_headers.get('Host'): forward_headers['host'] = urlparse(target_url).netloc if 'localhost.atlassian.io' in forward_headers.get('Host'): forward_headers['host'] = 'localhost' try: response = None modified_request = None # update listener (pre-invocation) if self.proxy.update_listener: listener_result = self.proxy.update_listener.forward_request( method=method, path=path, data=data, headers=forward_headers) if isinstance(listener_result, Response): response = listener_result elif isinstance(listener_result, Request): modified_request = listener_result data = modified_request.data forward_headers = modified_request.headers elif listener_result is not True: # get status code from response, or use Bad Gateway status code code = listener_result if isinstance(listener_result, int) else 503 self.send_response(code) self.end_headers() return # perform the actual invocation of the backend service if response is None: if modified_request: response = self.method(proxy_url, data=modified_request.data, headers=modified_request.headers) else: response = self.method(proxy_url, data=self.data_bytes, headers=forward_headers) # update listener (post-invocation) if self.proxy.update_listener: kwargs = { 'method': method, 'path': path, 'data': data, 'headers': forward_headers, 'response': response } if 'request_handler' in inspect.getargspec( self.proxy.update_listener.return_response)[0]: # some listeners (e.g., sqs_listener.py) require additional details like the original # request port, hence we pass in a reference to this request handler as well. kwargs['request_handler'] = self updated_response = self.proxy.update_listener.return_response( **kwargs) if isinstance(updated_response, Response): response = updated_response # copy headers and return response self.send_response(response.status_code) content_length_sent = False for header_key, header_value in iteritems(response.headers): self.send_header(header_key, header_value) content_length_sent = content_length_sent or header_key.lower( ) == 'content-length' if not content_length_sent: self.send_header( 'Content-Length', '%s' % len(response.content) if response.content else 0) # allow pre-flight CORS headers by default if 'Access-Control-Allow-Origin' not in response.headers: self.send_header('Access-Control-Allow-Origin', '*') if 'Access-Control-Allow-Methods' not in response.headers: self.send_header('Access-Control-Allow-Methods', ','.join(CORS_ALLOWED_METHODS)) if 'Access-Control-Allow-Headers' not in response.headers: self.send_header('Access-Control-Allow-Headers', ','.join(CORS_ALLOWED_HEADERS)) self.end_headers() if response.content and len(response.content): self.wfile.write(to_bytes(response.content)) self.wfile.flush() except Exception as e: trace = str(traceback.format_exc()) conn_error = 'ConnectionRefusedError' in trace or 'NewConnectionError' in trace error_msg = 'Error forwarding request: %s %s' % (e, trace) if not self.proxy.quiet or not conn_error: LOGGER.error(error_msg) if os.environ.get(ENV_INTERNAL_TEST_RUN): # During a test run, we also want to print error messages, because # log messages are delayed until the entire test run is over, and # hence we are missing messages if the test hangs for some reason. print('ERROR: %s' % error_msg) self.send_response(502) # bad gateway self.end_headers()
def invoke_rest_api(api_id, stage, method, invocation_path, data, headers, path=None): path = path or invocation_path relative_path, query_string_params = extract_query_string_params( path=invocation_path) path_map = helpers.get_rest_api_paths(rest_api_id=api_id) try: extracted_path, resource = get_resource_for_path(path=relative_path, path_map=path_map) except Exception: return make_error_response('Unable to find path %s' % path, 404) integrations = resource.get('resourceMethods', {}) integration = integrations.get(method, {}) if not integration: integration = integrations.get('ANY', {}) integration = integration.get('methodIntegration') if not integration: if method == 'OPTIONS' and 'Origin' in headers: # default to returning CORS headers if this is an OPTIONS request return get_cors_response(headers) return make_error_response( 'Unable to find integration for path %s' % path, 404) uri = integration.get('uri') if integration['type'] == 'AWS': if 'kinesis:action/' in uri: if uri.endswith('kinesis:action/PutRecords'): target = kinesis_listener.ACTION_PUT_RECORDS if uri.endswith('kinesis:action/ListStreams'): target = kinesis_listener.ACTION_LIST_STREAMS template = integration['requestTemplates'][APPLICATION_JSON] new_request = aws_stack.render_velocity_template(template, data) # forward records to target kinesis stream headers = aws_stack.mock_aws_request_headers(service='kinesis') headers['X-Amz-Target'] = target result = common.make_http_request(url=TEST_KINESIS_URL, method='POST', data=new_request, headers=headers) return result if method == 'POST': if uri.startswith('arn:aws:apigateway:') and ':sqs:path' in uri: template = integration['requestTemplates'][APPLICATION_JSON] account_id, queue = uri.split('/')[-2:] region_name = uri.split(':')[3] new_request = aws_stack.render_velocity_template( template, data) + '&QueueName=%s' % queue headers = aws_stack.mock_aws_request_headers( service='sqs', region_name=region_name) url = urljoin(TEST_SQS_URL, '%s/%s' % (account_id, queue)) result = common.make_http_request(url, method='POST', headers=headers, data=new_request) return result msg = 'API Gateway AWS integration action URI "%s", method "%s" not yet implemented' % ( uri, method) LOGGER.warning(msg) return make_error_response(msg, 404) elif integration['type'] == 'AWS_PROXY': if uri.startswith('arn:aws:apigateway:') and ':lambda:path' in uri: func_arn = uri.split(':lambda:path')[1].split( 'functions/')[1].split('/invocations')[0] data_str = json.dumps(data) if isinstance(data, (dict, list)) else data account_id = uri.split(':lambda:path')[1].split( ':function:')[0].split(':')[-1] source_ip = headers['X-Forwarded-For'].split(',')[-2] # Sample request context: # https://docs.aws.amazon.com/apigateway/latest/developerguide/api-gateway-create-api-as-simple-proxy-for-lambda.html#api-gateway-create-api-as-simple-proxy-for-lambda-test request_context = { 'path': relative_path, 'accountId': account_id, 'resourceId': resource.get('id'), 'stage': stage, 'identity': { 'accountId': account_id, 'sourceIp': source_ip, 'userAgent': headers['User-Agent'], } } try: path_params = extract_path_params( path=relative_path, extracted_path=extracted_path) except Exception: path_params = {} result = lambda_api.process_apigateway_invocation( func_arn, relative_path, data_str, headers, path_params=path_params, query_string_params=query_string_params, method=method, resource_path=path, request_context=request_context) if isinstance(result, FlaskResponse): return flask_to_requests_response(result) if isinstance(result, Response): return result response = Response() parsed_result = result if isinstance(result, dict) else json.loads(result) parsed_result = common.json_safe(parsed_result) parsed_result = {} if parsed_result is None else parsed_result response.status_code = int(parsed_result.get('statusCode', 200)) response.headers.update(parsed_result.get('headers', {})) try: if isinstance(parsed_result['body'], dict): response._content = json.dumps(parsed_result['body']) else: response._content = to_bytes(parsed_result['body']) except Exception: response._content = '{}' response.headers['Content-Length'] = len(response._content) return response else: msg = 'API Gateway action uri "%s" not yet implemented' % uri LOGGER.warning(msg) return make_error_response(msg, 404) elif integration['type'] == 'HTTP': function = getattr(requests, method.lower()) if isinstance(data, dict): data = json.dumps(data) result = function(integration['uri'], data=data, headers=headers) return result else: msg = ( 'API Gateway integration type "%s" for method "%s" not yet implemented' % (integration['type'], method)) LOGGER.warning(msg) return make_error_response(msg, 404) return 200
def return_response(self, method, path, data, headers, response): bucket_name = get_bucket_name(path, headers) # No path-name based bucket name? Try host-based hostname_parts = headers['host'].split('.') if (not bucket_name or len(bucket_name) == 0) and len(hostname_parts) > 1: bucket_name = hostname_parts[0] # POST requests to S3 may include a success_action_redirect field, # which should be used to redirect a client to a new location. key = None if method == 'POST': key, redirect_url = multipart_content.find_multipart_redirect_url(data, headers) if key and redirect_url: response.status_code = 303 response.headers['Location'] = expand_redirect_url(redirect_url, key, bucket_name) LOGGER.debug('S3 POST {} to {}'.format(response.status_code, response.headers['Location'])) parsed = urlparse.urlparse(path) bucket_name_in_host = headers['host'].startswith(bucket_name) should_send_notifications = all([ method in ('PUT', 'POST', 'DELETE'), '/' in path[1:] or bucket_name_in_host, # check if this is an actual put object request, because it could also be # a put bucket request with a path like this: /bucket_name/ bucket_name_in_host or (len(path[1:].split('/')) > 1 and len(path[1:].split('/')[1]) > 0), self.is_query_allowable(method, parsed.query) ]) # get subscribers and send bucket notifications if should_send_notifications: # if we already have a good key, use it, otherwise examine the path if key: object_path = '/' + key elif bucket_name_in_host: object_path = parsed.path else: parts = parsed.path[1:].split('/', 1) object_path = parts[1] if parts[1][0] == '/' else '/%s' % parts[1] version_id = response.headers.get('x-amz-version-id', None) send_notifications(method, bucket_name, object_path, version_id) # publish event for creation/deletion of buckets: if method in ('PUT', 'DELETE') and ('/' not in path[1:] or len(path[1:].split('/')[1]) <= 0): event_type = (event_publisher.EVENT_S3_CREATE_BUCKET if method == 'PUT' else event_publisher.EVENT_S3_DELETE_BUCKET) event_publisher.fire_event(event_type, payload={'n': event_publisher.get_hash(bucket_name)}) # fix an upstream issue in moto S3 (see https://github.com/localstack/localstack/issues/382) if method == 'PUT' and parsed.query == 'policy': response._content = '' response.status_code = 204 return response if response: # append CORS headers to response append_cors_headers(bucket_name, request_method=method, request_headers=headers, response=response) response_content_str = None try: response_content_str = to_str(response._content) except Exception: pass # We need to un-pretty-print the XML, otherwise we run into this issue with Spark: # https://github.com/jserver/mock-s3/pull/9/files # https://github.com/localstack/localstack/issues/183 # Note: yet, we need to make sure we have a newline after the first line: <?xml ...>\n if response_content_str and response_content_str.startswith('<'): is_bytes = isinstance(response._content, six.binary_type) # un-pretty-print the XML response._content = re.sub(r'([^\?])>\n\s*<', r'\1><', response_content_str, flags=re.MULTILINE) # update Location information in response payload response._content = self._update_location(response._content, bucket_name) # convert back to bytes if is_bytes: response._content = to_bytes(response._content) # fix content-type: https://github.com/localstack/localstack/issues/618 # https://github.com/localstack/localstack/issues/549 if 'text/html' in response.headers.get('Content-Type', ''): response.headers['Content-Type'] = 'application/xml; charset=utf-8' response.headers['content-length'] = len(response._content) # update content-length headers (fix https://github.com/localstack/localstack/issues/541) if method == 'DELETE': response.headers['content-length'] = len(response._content)
def _assert_record(): received_record = records[0]["records"][0] received_record_data = to_str( base64.b64decode(to_bytes(received_record["data"]))) assert received_record_data == f"{msg_text}-processed"
def send_event_to_firehose(event, arn): delivery_stream_name = aws_stack.firehose_name(arn) firehose_client = aws_stack.connect_to_service('firehose') firehose_client.put_record(DeliveryStreamName=delivery_stream_name, Record={'Data': to_bytes(json.dumps(event))})
def forward_request(self, method, path, data, headers): # Make sure we use 'localhost' as forward host, to ensure moto uses path style addressing. # Note that all S3 clients using LocalStack need to enable path style addressing. if 's3.amazonaws.com' not in headers.get('host', ''): headers['host'] = 'localhost' # check content md5 hash integrity if 'Content-MD5' in headers: response = check_content_md5(data, headers) if response is not None: return response modified_data = None # TODO: For some reason, moto doesn't allow us to put a location constraint on us-east-1 to_find = to_bytes('<LocationConstraint>us-east-1</LocationConstraint>') if data and data.startswith(to_bytes('<')) and to_find in data: modified_data = data.replace(to_find, '') # If this request contains streaming v4 authentication signatures, strip them from the message # Related isse: https://github.com/localstack/localstack/issues/98 # TODO we should evaluate whether to replace moto s3 with scality/S3: # https://github.com/scality/S3/issues/237 if headers.get('x-amz-content-sha256') == 'STREAMING-AWS4-HMAC-SHA256-PAYLOAD': modified_data = strip_chunk_signatures(modified_data or data) headers['content-length'] = headers.get('x-amz-decoded-content-length') # POST requests to S3 may include a "${filename}" placeholder in the # key, which should be replaced with an actual file name before storing. if method == 'POST': original_data = modified_data or data expanded_data = multipart_content.expand_multipart_filename(original_data, headers) if expanded_data is not original_data: modified_data = expanded_data # If no content-type is provided, 'binary/octet-stream' should be used # src: https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectPUT.html if method == 'PUT' and not headers.get('content-type'): headers['content-type'] = 'binary/octet-stream' # persist this API call to disk persistence.record('s3', method, path, data, headers) # parse query params parsed = urlparse.urlparse(path) query = parsed.query path = parsed.path bucket = path.split('/')[1] query_map = urlparse.parse_qs(query, keep_blank_values=True) if query == 'notification' or 'notification' in query_map: # handle and return response for ?notification request response = handle_notification_request(bucket, method, data) return response if query == 'cors' or 'cors' in query_map: if method == 'GET': return get_cors(bucket) if method == 'PUT': return set_cors(bucket, data) if method == 'DELETE': return delete_cors(bucket) if query == 'lifecycle' or 'lifecycle' in query_map: if method == 'GET': return get_lifecycle(bucket) if method == 'PUT': return set_lifecycle(bucket, data) if modified_data is not None: return Request(data=modified_data, headers=headers, method=method) return True
def calculate_crc32(content): return crc32(to_bytes(content)) & 0xFFFFFFFF
def invoke_rest_api_integration_backend(api_id, stage, integration, method, path, invocation_path, data, headers, resource_path, context={}, resource_id=None, response_templates={}): relative_path, query_string_params = extract_query_string_params( path=invocation_path) integration_type_orig = integration.get('type') or integration.get( 'integrationType') or '' integration_type = integration_type_orig.upper() uri = integration.get('uri') or integration.get('integrationUri') or '' try: path_params = extract_path_params(path=relative_path, extracted_path=resource_path) except Exception: path_params = {} if (uri.startswith('arn:aws:apigateway:') and ':lambda:path' in uri) or uri.startswith('arn:aws:lambda'): if integration_type in ['AWS', 'AWS_PROXY']: func_arn = uri if ':lambda:path' in uri: func_arn = uri.split(':lambda:path')[1].split( 'functions/')[1].split('/invocations')[0] # apply custom request template data_str = data try: data_str = json.dumps(data) if isinstance( data, (dict, list)) else to_str(data) data_str = apply_template(integration, 'request', data_str, path_params=path_params, query_params=query_string_params, headers=headers) except Exception: pass # Sample request context: # https://docs.aws.amazon.com/apigateway/latest/developerguide/api-gateway-create-api-as-simple-proxy-for-lambda.html#api-gateway-create-api-as-simple-proxy-for-lambda-test request_context = get_lambda_event_request_context( method, path, data, headers, integration_uri=uri, resource_id=resource_id, resource_path=resource_path) stage_variables = get_stage_variables(api_id, stage) result = lambda_api.process_apigateway_invocation( func_arn, relative_path, data_str, stage, api_id, headers, path_params=path_params, query_string_params=query_string_params, method=method, resource_path=resource_path, request_context=request_context, event_context=context, stage_variables=stage_variables) if isinstance(result, FlaskResponse): response = flask_to_requests_response(result) elif isinstance(result, Response): response = result else: response = LambdaResponse() parsed_result = result if isinstance( result, dict) else json.loads(str(result or '{}')) parsed_result = common.json_safe(parsed_result) parsed_result = {} if parsed_result is None else parsed_result response.status_code = int(parsed_result.get( 'statusCode', 200)) parsed_headers = parsed_result.get('headers', {}) if parsed_headers is not None: response.headers.update(parsed_headers) try: if isinstance(parsed_result['body'], dict): response._content = json.dumps(parsed_result['body']) else: body_bytes = to_bytes(parsed_result.get('body') or '') if parsed_result.get('isBase64Encoded', False): body_bytes = base64.b64decode(body_bytes) response._content = body_bytes except Exception as e: LOG.warning("Couldn't set lambda response content: %s" % e) response._content = '{}' update_content_length(response) response.multi_value_headers = parsed_result.get( 'multiValueHeaders') or {} # apply custom response template response._content = apply_template(integration, 'response', response._content) response.headers['Content-Length'] = str( len(response.content or '')) return response raise Exception( 'API Gateway %s integration action "%s", method "%s" not yet implemented' % (integration_type, uri, method)) elif integration_type == 'AWS': if 'kinesis:action/' in uri: if uri.endswith('kinesis:action/PutRecord'): target = kinesis_listener.ACTION_PUT_RECORD if uri.endswith('kinesis:action/PutRecords'): target = kinesis_listener.ACTION_PUT_RECORDS if uri.endswith('kinesis:action/ListStreams'): target = kinesis_listener.ACTION_LIST_STREAMS template = integration['requestTemplates'][APPLICATION_JSON] new_request = aws_stack.render_velocity_template(template, data) # forward records to target kinesis stream headers = aws_stack.mock_aws_request_headers(service='kinesis') headers['X-Amz-Target'] = target result = common.make_http_request(url=TEST_KINESIS_URL, method='POST', data=new_request, headers=headers) # TODO apply response template..? return result elif 'states:action/' in uri: if uri.endswith('states:action/StartExecution'): action = 'StartExecution' decoded_data = data.decode() payload = {} if 'stateMachineArn' in decoded_data and 'input' in decoded_data: payload = json.loads(decoded_data) elif APPLICATION_JSON in integration.get('requestTemplates', {}): template = integration['requestTemplates'][APPLICATION_JSON] payload = aws_stack.render_velocity_template(template, data, as_json=True) client = aws_stack.connect_to_service('stepfunctions') kwargs = {'name': payload['name']} if 'name' in payload else {} result = client.start_execution( stateMachineArn=payload['stateMachineArn'], input=payload['input'], **kwargs) response = requests_response( content={ 'executionArn': result['executionArn'], 'startDate': str(result['startDate']) }, headers=aws_stack.mock_aws_request_headers()) response.headers['content-type'] = APPLICATION_JSON return response elif 's3:path/' in uri and method == 'GET': s3 = aws_stack.connect_to_service('s3') uri_match = re.match(TARGET_REGEX_S3_URI, uri) if uri_match: bucket, object_key = uri_match.group('bucket', 'object') LOG.debug('Getting request for bucket %s object %s', bucket, object_key) try: object = s3.get_object(Bucket=bucket, Key=object_key) except s3.exceptions.NoSuchKey: msg = 'Object %s not found' % object_key LOG.debug(msg) return make_error_response(msg, 404) headers = aws_stack.mock_aws_request_headers(service='s3') if object.get('ContentType'): headers['Content-Type'] = object['ContentType'] # stream used so large files do not fill memory response = request_response_stream(stream=object['Body'], headers=headers) return response else: msg = 'Request URI does not match s3 specifications' LOG.warning(msg) return make_error_response(msg, 400) if method == 'POST': if uri.startswith('arn:aws:apigateway:') and ':sqs:path' in uri: template = integration['requestTemplates'][APPLICATION_JSON] account_id, queue = uri.split('/')[-2:] region_name = uri.split(':')[3] new_request = '%s&QueueName=%s' % ( aws_stack.render_velocity_template(template, data), queue) headers = aws_stack.mock_aws_request_headers( service='sqs', region_name=region_name) url = urljoin(TEST_SQS_URL, '%s/%s' % (TEST_AWS_ACCOUNT_ID, queue)) result = common.make_http_request(url, method='POST', headers=headers, data=new_request) return result raise Exception( 'API Gateway AWS integration action URI "%s", method "%s" not yet implemented' % (uri, method)) elif integration_type == 'AWS_PROXY': if uri.startswith('arn:aws:apigateway:') and ':dynamodb:action' in uri: # arn:aws:apigateway:us-east-1:dynamodb:action/PutItem&Table=MusicCollection table_name = uri.split(':dynamodb:action')[1].split('&Table=')[1] action = uri.split(':dynamodb:action')[1].split('&Table=')[0] if 'PutItem' in action and method == 'PUT': response_template = response_templates.get('application/json') if response_template is None: msg = 'Invalid response template defined in integration response.' LOG.info('%s Existing: %s' % (msg, response_templates)) return make_error_response(msg, 404) response_template = json.loads(response_template) if response_template['TableName'] != table_name: msg = 'Invalid table name specified in integration response template.' return make_error_response(msg, 404) dynamo_client = aws_stack.connect_to_resource('dynamodb') table = dynamo_client.Table(table_name) event_data = {} data_dict = json.loads(data) for key, _ in response_template['Item'].items(): event_data[key] = data_dict[key] table.put_item(Item=event_data) response = requests_response( event_data, headers=aws_stack.mock_aws_request_headers()) return response else: raise Exception( 'API Gateway action uri "%s", integration type %s not yet implemented' % (uri, integration_type)) elif integration_type in ['HTTP_PROXY', 'HTTP']: if ':servicediscovery:' in uri: # check if this is a servicediscovery integration URI client = aws_stack.connect_to_service('servicediscovery') service_id = uri.split('/')[-1] instances = client.list_instances( ServiceId=service_id)['Instances'] instance = (instances or [None])[0] if instance and instance.get('Id'): uri = 'http://%s/%s' % (instance['Id'], invocation_path.lstrip('/')) # apply custom request template data = apply_template(integration, 'request', data) if isinstance(data, dict): data = json.dumps(data) uri = apply_request_parameter(integration=integration, path_params=path_params) function = getattr(requests, method.lower()) result = function(uri, data=data, headers=headers) # apply custom response template data = apply_template(integration, 'response', data) return result elif integration_type == 'MOCK': # return empty response - details filled in via responseParameters above... return requests_response({}) if method == 'OPTIONS': # fall back to returning CORS headers if this is an OPTIONS request return get_cors_response(headers) raise Exception( 'API Gateway integration type "%s", method "%s", URI "%s" not yet implemented' % (integration_type, method, uri))
def handler(event, context): """ Generic event forwarder Lambda. """ # print test messages (to test CloudWatch Logs integration) LOGGER.info('Lambda log message - logging module') print('Lambda log message - print function') if MSG_BODY_RAISE_ERROR_FLAG in event: raise Exception('Test exception (this is intentional)') if 'httpMethod' in event: # looks like this is a call from an AWS_PROXY API Gateway try: body = json.loads(event['body']) except Exception: body = {} body['path'] = event.get('path') body['resource'] = event.get('resource') body['pathParameters'] = event.get('pathParameters') body['requestContext'] = event.get('requestContext') body['queryStringParameters'] = event.get('queryStringParameters') body['httpMethod'] = event.get('httpMethod') body['body'] = event.get('body') if body['httpMethod'] == 'DELETE': return {'statusCode': 204} status_code = body.get('return_status_code', 200) headers = body.get('return_headers', {}) body = body.get('return_raw_body') or body return { 'body': body, 'statusCode': status_code, 'headers': headers, 'multiValueHeaders': { 'set-cookie': ['language=en-US', 'theme=blue moon'] }, } if 'Records' not in event: result_map = {'event': event, 'context': {}} result_map['context'][ 'invoked_function_arn'] = context.invoked_function_arn result_map['context']['function_version'] = context.function_version result_map['context']['function_name'] = context.function_name result_map['context'][ 'memory_limit_in_mb'] = context.memory_limit_in_mb result_map['context']['aws_request_id'] = context.aws_request_id result_map['context']['log_group_name'] = context.log_group_name result_map['context']['log_stream_name'] = context.log_stream_name if hasattr(context, 'client_context'): result_map['context']['client_context'] = context.client_context return result_map raw_event_messages = [] for record in event['Records']: # Deserialize into Python dictionary and extract the # "NewImage" (the new version of the full ddb document) ddb_new_image = deserialize_event(record) if MSG_BODY_RAISE_ERROR_FLAG in ddb_new_image.get('data', {}): raise Exception('Test exception (this is intentional)') # Place the raw event message document into the Kinesis message format kinesis_record = { 'PartitionKey': 'key123', 'Data': json.dumps(ddb_new_image) } if MSG_BODY_MESSAGE_TARGET in ddb_new_image.get('data', {}): forwarding_target = ddb_new_image['data'][MSG_BODY_MESSAGE_TARGET] target_name = forwarding_target.split(':')[-1] if forwarding_target.startswith('kinesis:'): ddb_new_image['data'][ MSG_BODY_MESSAGE_TARGET] = 's3:test_chain_result' kinesis_record['Data'] = json.dumps(ddb_new_image['data']) forward_event_to_target_stream(kinesis_record, target_name) elif forwarding_target.startswith('s3:'): s3_client = aws_stack.connect_to_service('s3') test_data = to_bytes( json.dumps( {'test_data': ddb_new_image['data']['test_data']})) s3_client.upload_fileobj(BytesIO(test_data), TEST_BUCKET_NAME, target_name) else: raw_event_messages.append(kinesis_record) # Forward messages to Kinesis forward_events(raw_event_messages)
def is_s3_form_data(data_bytes): if(to_bytes('key=') in data_bytes): return True if(to_bytes('Content-Disposition: form-data') in data_bytes and to_bytes('name="key"') in data_bytes): return True return False
def invoke_rest_api(api_id, stage, method, invocation_path, data, headers, path=None): path = path or invocation_path relative_path, query_string_params = extract_query_string_params( path=invocation_path) # run gateway authorizers for this request authorize_invocation(api_id, headers) path_map = helpers.get_rest_api_paths(rest_api_id=api_id) try: extracted_path, resource = get_resource_for_path(path=relative_path, path_map=path_map) except Exception: return make_error_response('Unable to find path %s' % path, 404) api_key_required = resource.get('resourceMethods', {}).get(method, {}).get('apiKeyRequired') if not is_api_key_valid(api_key_required, headers, stage): return make_error_response('Access denied - invalid API key', 403) integrations = resource.get('resourceMethods', {}) integration = integrations.get(method, {}) if not integration: integration = integrations.get('ANY', {}) integration = integration.get('methodIntegration') if not integration: if method == 'OPTIONS' and 'Origin' in headers: # default to returning CORS headers if this is an OPTIONS request return get_cors_response(headers) return make_error_response( 'Unable to find integration for path %s' % path, 404) uri = integration.get('uri') if integration['type'] == 'AWS': if 'kinesis:action/' in uri: if uri.endswith('kinesis:action/PutRecords'): target = kinesis_listener.ACTION_PUT_RECORDS if uri.endswith('kinesis:action/ListStreams'): target = kinesis_listener.ACTION_LIST_STREAMS template = integration['requestTemplates'][APPLICATION_JSON] new_request = aws_stack.render_velocity_template(template, data) # forward records to target kinesis stream headers = aws_stack.mock_aws_request_headers(service='kinesis') headers['X-Amz-Target'] = target result = common.make_http_request(url=TEST_KINESIS_URL, method='POST', data=new_request, headers=headers) return result if method == 'POST': if uri.startswith('arn:aws:apigateway:') and ':sqs:path' in uri: template = integration['requestTemplates'][APPLICATION_JSON] account_id, queue = uri.split('/')[-2:] region_name = uri.split(':')[3] new_request = '%s&QueueName=%s' % ( aws_stack.render_velocity_template(template, data), queue) headers = aws_stack.mock_aws_request_headers( service='sqs', region_name=region_name) url = urljoin(TEST_SQS_URL, '%s/%s' % (TEST_AWS_ACCOUNT_ID, queue)) result = common.make_http_request(url, method='POST', headers=headers, data=new_request) return result msg = 'API Gateway AWS integration action URI "%s", method "%s" not yet implemented' % ( uri, method) LOGGER.warning(msg) return make_error_response(msg, 404) elif integration['type'] == 'AWS_PROXY': if uri.startswith('arn:aws:apigateway:') and ':lambda:path' in uri: func_arn = uri.split(':lambda:path')[1].split( 'functions/')[1].split('/invocations')[0] data_str = json.dumps(data) if isinstance(data, (dict, list)) else to_str(data) account_id = uri.split(':lambda:path')[1].split( ':function:')[0].split(':')[-1] source_ip = headers['X-Forwarded-For'].split(',')[-2] # Sample request context: # https://docs.aws.amazon.com/apigateway/latest/developerguide/api-gateway-create-api-as-simple-proxy-for-lambda.html#api-gateway-create-api-as-simple-proxy-for-lambda-test request_context = { # adding stage to the request context path. # https://github.com/localstack/localstack/issues/2210 'path': '/' + stage + relative_path, 'accountId': account_id, 'resourceId': resource.get('id'), 'stage': stage, 'identity': { 'accountId': account_id, 'sourceIp': source_ip, 'userAgent': headers['User-Agent'], }, 'httpMethod': method, 'protocol': 'HTTP/1.1', 'requestTime': datetime.datetime.utcnow(), 'requestTimeEpoch': int(time.time() * 1000), } try: path_params = extract_path_params( path=relative_path, extracted_path=extracted_path) except Exception: path_params = {} result = lambda_api.process_apigateway_invocation( func_arn, relative_path, data_str, stage, api_id, headers, path_params=path_params, query_string_params=query_string_params, method=method, resource_path=path, request_context=request_context) if isinstance(result, FlaskResponse): return flask_to_requests_response(result) if isinstance(result, Response): return result response = LambdaResponse() parsed_result = result if isinstance(result, dict) else json.loads( str(result)) parsed_result = common.json_safe(parsed_result) parsed_result = {} if parsed_result is None else parsed_result response.status_code = int(parsed_result.get('statusCode', 200)) parsed_headers = parsed_result.get('headers', {}) if parsed_headers is not None: response.headers.update(parsed_headers) try: if isinstance(parsed_result['body'], dict): response.content = json.dumps(parsed_result['body']) else: response.content = to_bytes(parsed_result['body']) except Exception: response._content = '{}' update_content_length(response) response.multi_value_headers = parsed_result.get( 'multiValueHeaders') or {} return response elif uri.startswith( 'arn:aws:apigateway:') and ':dynamodb:action' in uri: # arn:aws:apigateway:us-east-1:dynamodb:action/PutItem&Table=MusicCollection table_name = uri.split(':dynamodb:action')[1].split('&Table=')[1] action = uri.split(':dynamodb:action')[1].split('&Table=')[0] if 'PutItem' in action and method == 'PUT': response_template = path_map.get(relative_path, {}).get('resourceMethods', {})\ .get(method, {}).get('methodIntegration', {}).\ get('integrationResponses', {}).get('200', {}).get('responseTemplates', {})\ .get('application/json', None) if response_template is None: msg = 'Invalid response template defined in integration response.' return make_error_response(msg, 404) response_template = json.loads(response_template) if response_template['TableName'] != table_name: msg = 'Invalid table name specified in integration response template.' return make_error_response(msg, 404) dynamo_client = aws_stack.connect_to_resource('dynamodb') table = dynamo_client.Table(table_name) event_data = {} data_dict = json.loads(data) for key, _ in response_template['Item'].items(): event_data[key] = data_dict[key] table.put_item(Item=event_data) response = requests_response( event_data, headers=aws_stack.mock_aws_request_headers()) return response else: msg = 'API Gateway action uri "%s" not yet implemented' % uri LOGGER.warning(msg) return make_error_response(msg, 404) elif integration['type'] in ['HTTP_PROXY', 'HTTP']: function = getattr(requests, method.lower()) if integration['type'] == 'HTTP': # apply custom request template template = integration.get('requestTemplates', {}).get(APPLICATION_JSON) if template: data = aws_stack.render_velocity_template(template, data) if isinstance(data, dict): data = json.dumps(data) result = function(integration['uri'], data=data, headers=headers) if integration['type'] == 'HTTP': # apply custom response template template = integration.get('responseTemplates', {}).get(APPLICATION_JSON) if template and result.content: result._content = aws_stack.render_velocity_template( template, result.content) update_content_length(result) return result else: msg = ( 'API Gateway integration type "%s" for method "%s" not yet implemented' % (integration['type'], method)) LOGGER.warning(msg) return make_error_response(msg, 404)
def return_response(self, method, path, data, headers, response, request_handler=None): if headers.get('Accept-Encoding') == 'gzip' and response._content: response._content = gzip.compress(to_bytes(response._content)) response.headers['Content-Length'] = str(len(response._content)) response.headers['Content-Encoding'] = 'gzip'
def send_event_to_target(target_arn: str, event: Dict, target_attributes: Dict = None, asynchronous: bool = True): region = target_arn.split(":")[3] if ":lambda:" in target_arn: from localstack.services.awslambda import lambda_api lambda_api.run_lambda(func_arn=target_arn, event=event, context={}, asynchronous=asynchronous) elif ":sns:" in target_arn: sns_client = connect_to_service("sns", region_name=region) sns_client.publish(TopicArn=target_arn, Message=json.dumps(event)) elif ":sqs:" in target_arn: sqs_client = connect_to_service("sqs", region_name=region) queue_url = get_sqs_queue_url(target_arn) msg_group_id = dict_utils.get_safe(target_attributes, "$.SqsParameters.MessageGroupId") kwargs = {"MessageGroupId": msg_group_id} if msg_group_id else {} sqs_client.send_message(QueueUrl=queue_url, MessageBody=json.dumps(event), **kwargs) elif ":states:" in target_arn: stepfunctions_client = connect_to_service("stepfunctions", region_name=region) stepfunctions_client.start_execution(stateMachineArn=target_arn, input=json.dumps(event)) elif ":firehose:" in target_arn: delivery_stream_name = firehose_name(target_arn) firehose_client = connect_to_service("firehose", region_name=region) firehose_client.put_record( DeliveryStreamName=delivery_stream_name, Record={"Data": to_bytes(json.dumps(event))}, ) elif ":events:" in target_arn: if ":api-destination/" in target_arn or ":destination/" in target_arn: send_event_to_api_destination(target_arn, event) else: events_client = connect_to_service("events", region_name=region) eventbus_name = target_arn.split(":")[-1].split("/")[-1] events_client.put_events( Entries=[{ "EventBusName": eventbus_name, "Source": event.get("source"), "DetailType": event.get("detail-type"), "Detail": event.get("detail"), }]) elif ":kinesis:" in target_arn: partition_key_path = dict_utils.get_safe( target_attributes, "$.KinesisParameters.PartitionKeyPath", default_value="$.id", ) stream_name = target_arn.split("/")[-1] partition_key = dict_utils.get_safe(event, partition_key_path, event["id"]) kinesis_client = connect_to_service("kinesis", region_name=region) kinesis_client.put_record( StreamName=stream_name, Data=to_bytes(json.dumps(event)), PartitionKey=partition_key, ) elif ":logs:" in target_arn: log_group_name = target_arn.split(":")[-1] logs_client = connect_to_service("logs", region_name=region) log_stream_name = str(uuid.uuid4()) logs_client.create_log_stream(logGroupName=log_group_name, logStreamName=log_stream_name) logs_client.put_log_events( logGroupName=log_group_name, logStreamName=log_stream_name, logEvents=[{ "timestamp": now_utc(millis=True), "message": json.dumps(event) }], ) else: LOG.warning('Unsupported Events rule target ARN: "%s"' % target_arn)
def forward_request(self, method, path, data, headers): # parse path and query params parsed_path = urlparse.urlparse(path) # Make sure we use 'localhost' as forward host, to ensure moto uses path style addressing. # Note that all S3 clients using LocalStack need to enable path style addressing. if 's3.amazonaws.com' not in headers.get('host', ''): headers['host'] = 'localhost' # check content md5 hash integrity if not a copy request if 'Content-MD5' in headers and not self.is_s3_copy_request( headers, path): response = check_content_md5(data, headers) if response is not None: return response modified_data = None # check bucket name bucket_name = get_bucket_name(path, headers) if method == 'PUT' and not re.match(BUCKET_NAME_REGEX, bucket_name): if len(parsed_path.path) <= 1: return error_response( 'Unable to extract valid bucket name. Please ensure that your AWS SDK is ' + 'configured to use path style addressing, or send a valid <Bucket>.s3.amazonaws.com "Host" header', 'InvalidBucketName', status_code=400) return error_response('The specified bucket is not valid.', 'InvalidBucketName', status_code=400) # TODO: For some reason, moto doesn't allow us to put a location constraint on us-east-1 to_find = to_bytes( '<LocationConstraint>us-east-1</LocationConstraint>') if data and data.startswith(to_bytes('<')) and to_find in data: modified_data = data.replace(to_find, to_bytes('')) # If this request contains streaming v4 authentication signatures, strip them from the message # Related isse: https://github.com/localstack/localstack/issues/98 # TODO we should evaluate whether to replace moto s3 with scality/S3: # https://github.com/scality/S3/issues/237 if headers.get('x-amz-content-sha256' ) == 'STREAMING-AWS4-HMAC-SHA256-PAYLOAD': modified_data = strip_chunk_signatures(modified_data or data) headers['content-length'] = headers.get( 'x-amz-decoded-content-length') # POST requests to S3 may include a "${filename}" placeholder in the # key, which should be replaced with an actual file name before storing. if method == 'POST': original_data = modified_data or data expanded_data = multipart_content.expand_multipart_filename( original_data, headers) if expanded_data is not original_data: modified_data = expanded_data # If no content-type is provided, 'binary/octet-stream' should be used # src: https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectPUT.html if method == 'PUT' and not headers.get('content-type'): headers['content-type'] = 'binary/octet-stream' # persist this API call to disk persistence.record('s3', method, path, data, headers) # parse query params query = parsed_path.query path = parsed_path.path bucket = path.split('/')[1] query_map = urlparse.parse_qs(query, keep_blank_values=True) # remap metadata query params (not supported in moto) to request headers append_metadata_headers(method, query_map, headers) if query == 'notification' or 'notification' in query_map: # handle and return response for ?notification request response = handle_notification_request(bucket, method, data) return response if query == 'cors' or 'cors' in query_map: if method == 'GET': return get_cors(bucket) if method == 'PUT': return set_cors(bucket, data) if method == 'DELETE': return delete_cors(bucket) if query == 'lifecycle' or 'lifecycle' in query_map: if method == 'GET': return get_lifecycle(bucket) if method == 'PUT': return set_lifecycle(bucket, data) if query == 'replication' or 'replication' in query_map: if method == 'GET': return get_replication(bucket) if method == 'PUT': return set_replication(bucket, data) if query == 'encryption' or 'encryption' in query_map: if method == 'GET': return get_encryption(bucket) if method == 'PUT': return set_encryption(bucket, data) if query == 'object-lock' or 'object-lock' in query_map: if method == 'GET': return get_object_lock(bucket) if method == 'PUT': return set_object_lock(bucket, data) if modified_data is not None: return Request(data=modified_data, headers=headers, method=method) return True
def forward(self, method): path = self.path if '://' in path: path = '/' + path.split('://', 1)[1].split('/', 1)[1] proxy_url = '%s%s' % (self.proxy.forward_url, path) target_url = self.path if '://' not in target_url: target_url = '%s%s' % (self.proxy.forward_url, target_url) data = self.data_bytes forward_headers = CaseInsensitiveDict(self.headers) # update original "Host" header (moto s3 relies on this behavior) if not forward_headers.get('Host'): forward_headers['host'] = urlparse(target_url).netloc if 'localhost.atlassian.io' in forward_headers.get('Host'): forward_headers['host'] = 'localhost' try: response = None modified_request = None # update listener (pre-invocation) if self.proxy.update_listener: listener_result = self.proxy.update_listener.forward_request(method=method, path=path, data=data, headers=forward_headers) if isinstance(listener_result, Response): response = listener_result elif isinstance(listener_result, Request): modified_request = listener_result data = modified_request.data forward_headers = modified_request.headers elif listener_result is not True: # get status code from response, or use Bad Gateway status code code = listener_result if isinstance(listener_result, int) else 503 self.send_response(code) self.end_headers() return # perform the actual invocation of the backend service if response is None: if modified_request: response = self.method(proxy_url, data=modified_request.data, headers=modified_request.headers) else: response = self.method(proxy_url, data=self.data_bytes, headers=forward_headers) # update listener (post-invocation) if self.proxy.update_listener: kwargs = { 'method': method, 'path': path, 'data': data, 'headers': forward_headers, 'response': response } if 'request_handler' in inspect.getargspec(self.proxy.update_listener.return_response)[0]: # some listeners (e.g., sqs_listener.py) require additional details like the original # request port, hence we pass in a reference to this request handler as well. kwargs['request_handler'] = self updated_response = self.proxy.update_listener.return_response(**kwargs) if isinstance(updated_response, Response): response = updated_response # copy headers and return response self.send_response(response.status_code) content_length_sent = False for header_key, header_value in iteritems(response.headers): # filter out certain headers that we don't want to transmit if header_key.lower() not in ('transfer-encoding', 'date', 'server'): self.send_header(header_key, header_value) content_length_sent = content_length_sent or header_key.lower() == 'content-length' if not content_length_sent: self.send_header('Content-Length', '%s' % len(response.content) if response.content else 0) # allow pre-flight CORS headers by default if 'Access-Control-Allow-Origin' not in response.headers: self.send_header('Access-Control-Allow-Origin', '*') if 'Access-Control-Allow-Methods' not in response.headers: self.send_header('Access-Control-Allow-Methods', ','.join(CORS_ALLOWED_METHODS)) if 'Access-Control-Allow-Headers' not in response.headers: self.send_header('Access-Control-Allow-Headers', ','.join(CORS_ALLOWED_HEADERS)) self.end_headers() if response.content and len(response.content): self.wfile.write(to_bytes(response.content)) self.wfile.flush() except Exception as e: trace = str(traceback.format_exc()) conn_errors = ('ConnectionRefusedError', 'NewConnectionError') conn_error = any(e in trace for e in conn_errors) error_msg = 'Error forwarding request: %s %s' % (e, trace) if 'Broken pipe' in trace: LOGGER.warn('Connection prematurely closed by client (broken pipe).') elif not self.proxy.quiet or not conn_error: LOGGER.error(error_msg) if os.environ.get(ENV_INTERNAL_TEST_RUN): # During a test run, we also want to print error messages, because # log messages are delayed until the entire test run is over, and # hence we are missing messages if the test hangs for some reason. print('ERROR: %s' % error_msg) self.send_response(502) # bad gateway self.end_headers()
def forward(self, method): data = self.data_bytes path = self.path forward_headers = CaseInsensitiveDict(self.headers) # force close connection connection_header = forward_headers.get('Connection') or '' if connection_header.lower() not in ['keep-alive', '']: self.close_connection = 1 client_address = self.client_address[0] server_address = ':'.join(map(str, self.server.server_address)) try: # run the actual response forwarding response = modify_and_forward(method=method, path=path, data_bytes=data, headers=forward_headers, forward_base_url=self.proxy.forward_base_url, listeners=self._listeners(), request_handler=self, client_address=client_address, server_address=server_address) # copy headers and return response self.send_response(response.status_code) # set content for chunked encoding is_chunked = uses_chunked_encoding(response) if is_chunked: response._content = create_chunked_data(response._content) # send headers content_length_sent = False for header_key, header_value in iteritems(response.headers): # filter out certain headers that we don't want to transmit if header_key.lower() not in ('transfer-encoding', 'date', 'server'): self.send_header(header_key, header_value) content_length_sent = content_length_sent or header_key.lower() == 'content-length' # fix content-type header if needed if not content_length_sent and not is_chunked: self.send_header('Content-Length', '%s' % len(response.content) if response.content else 0) if isinstance(response, LambdaResponse): self.send_multi_value_headers(response.multi_value_headers) self.end_headers() if response.content and len(response.content): self.wfile.write(to_bytes(response.content)) except Exception as e: trace = str(traceback.format_exc()) conn_errors = ('ConnectionRefusedError', 'NewConnectionError', 'Connection aborted', 'Unexpected EOF', 'Connection reset by peer', 'cannot read from timed out object') conn_error = any(e in trace for e in conn_errors) error_msg = 'Error forwarding request: %s %s' % (e, trace) if 'Broken pipe' in trace: LOG.warn('Connection prematurely closed by client (broken pipe).') elif not self.proxy.quiet or not conn_error: LOG.error(error_msg) if os.environ.get(ENV_INTERNAL_TEST_RUN): # During a test run, we also want to print error messages, because # log messages are delayed until the entire test run is over, and # hence we are missing messages if the test hangs for some reason. print('ERROR: %s' % error_msg) self.send_response(502) # bad gateway self.end_headers() # force close connection self.close_connection = 1 finally: try: self.wfile.flush() except Exception as e: LOG.warning('Unable to flush write file: %s' % e)
def _execute(self, func_arn, func_details, event, context=None, version=None): lambda_cwd = func_details.cwd runtime = func_details.runtime handler = func_details.handler environment = self._prepare_environment(func_details) # configure USE_SSL in environment if config.USE_SSL: environment['USE_SSL'] = '1' # prepare event body if not event: LOG.warning( 'Empty event body specified for invocation of Lambda "%s"' % func_arn) event = {} event_body = json.dumps(json_safe(event)) stdin = self.prepare_event(environment, event_body) main_endpoint = get_main_endpoint_from_container() environment['LOCALSTACK_HOSTNAME'] = main_endpoint environment['EDGE_PORT'] = str(config.EDGE_PORT) environment['_HANDLER'] = handler if os.environ.get('HTTP_PROXY'): environment['HTTP_PROXY'] = os.environ['HTTP_PROXY'] if func_details.timeout: environment['AWS_LAMBDA_FUNCTION_TIMEOUT'] = str( func_details.timeout) if context: environment['AWS_LAMBDA_FUNCTION_NAME'] = context.function_name environment[ 'AWS_LAMBDA_FUNCTION_VERSION'] = context.function_version environment[ 'AWS_LAMBDA_FUNCTION_INVOKED_ARN'] = context.invoked_function_arn environment['AWS_LAMBDA_COGNITO_IDENTITY'] = json.dumps( context.cognito_identity or {}) if context.client_context is not None: environment['AWS_LAMBDA_CLIENT_CONTEXT'] = json.dumps( to_str(base64.b64decode(to_bytes(context.client_context)))) # custom command to execute in the container command = '' events_file = '' if USE_CUSTOM_JAVA_EXECUTOR and is_java_lambda(runtime): # if running a Java Lambda with our custom executor, set up classpath arguments java_opts = Util.get_java_opts() stdin = None # copy executor jar into temp directory target_file = os.path.join(lambda_cwd, os.path.basename(LAMBDA_EXECUTOR_JAR)) if not os.path.exists(target_file): cp_r(LAMBDA_EXECUTOR_JAR, target_file) # TODO cleanup once we have custom Java Docker image taskdir = '/var/task' events_file = '_lambda.events.%s.json' % short_uid() save_file(os.path.join(lambda_cwd, events_file), event_body) classpath = Util.get_java_classpath(target_file) command = ( "bash -c 'cd %s; java %s -cp \"%s\" \"%s\" \"%s\" \"%s\"'" % (taskdir, java_opts, classpath, LAMBDA_EXECUTOR_CLASS, handler, events_file)) # accept any self-signed certificates for outgoing calls from the Lambda if is_nodejs_runtime(runtime): environment['NODE_TLS_REJECT_UNAUTHORIZED'] = '0' # determine the command to be executed (implemented by subclasses) cmd = self.prepare_execution(func_arn, environment, runtime, command, handler, lambda_cwd) # lambci writes the Lambda result to stdout and logs to stderr, fetch it from there! LOG.info('Running lambda cmd: %s' % cmd) result = self.run_lambda_executor(cmd, stdin, env_vars=environment, func_details=func_details) # clean up events file events_file and os.path.exists(events_file) and rm_rf(events_file) return result
def get_api_from_custom_rules(method, path, data, headers): """ Determine backend port based on custom rules. """ # detect S3 presigned URLs if 'AWSAccessKeyId=' in path or 'Signature=' in path: return 's3', config.PORT_S3 # heuristic for SQS queue URLs if is_sqs_queue_url(path): return 'sqs', config.PORT_SQS # DynamoDB shell URLs if path.startswith('/shell') or path.startswith('/dynamodb/shell'): return 'dynamodb', config.PORT_DYNAMODB # API Gateway invocation URLs if ('/%s/' % PATH_USER_REQUEST) in path: return 'apigateway', config.PORT_APIGATEWAY data_bytes = to_bytes(data or '') if path == '/' and b'QueueName=' in data_bytes: return 'sqs', config.PORT_SQS if path.startswith('/2015-03-31/functions/'): return 'lambda', config.PORT_LAMBDA # TODO: move S3 public URLs to a separate port/endpoint, OR check ACLs here first stripped = path.strip('/') if method in ['GET', 'HEAD'] and '/' in stripped: # assume that this is an S3 GET request with URL path `/<bucket>/<key ...>` return 's3', config.PORT_S3 # detect S3 URLs if stripped and '/' not in stripped: if method == 'HEAD': # assume that this is an S3 HEAD bucket request with URL path `/<bucket>` return 's3', config.PORT_S3 if method == 'PUT': # assume that this is an S3 PUT bucket request with URL path `/<bucket>` return 's3', config.PORT_S3 if method == 'POST' and is_s3_form_data(data_bytes): # assume that this is an S3 POST request with form parameters or multipart form in the body return 's3', config.PORT_S3 if stripped.count('/') == 1 and method == 'PUT': # assume that this is an S3 PUT bucket object request with URL path `/<bucket>/object` return 's3', config.PORT_S3 # detect S3 requests sent from aws-cli using --no-sign-request option if 'aws-cli/' in headers.get('User-Agent', ''): return 's3', config.PORT_S3 # S3 delete object requests if method == 'POST' and 'delete=' in path and b'<Delete' in data_bytes and b'<Key>' in data_bytes: return 's3', config.PORT_S3 # SQS queue requests if ('QueueUrl=' in path and 'Action=' in path) or (b'QueueUrl=' in data_bytes and b'Action=' in data_bytes): return 'sqs', config.PORT_SQS
def get_api_from_custom_rules(method, path, data, headers): """ Determine backend port based on custom rules. """ # detect S3 presigned URLs if 'AWSAccessKeyId=' in path or 'Signature=' in path: return 's3', config.PORT_S3 # heuristic for SQS queue URLs if is_sqs_queue_url(path): return 'sqs', config.PORT_SQS # DynamoDB shell URLs if path.startswith('/shell') or path.startswith('/dynamodb/shell'): return 'dynamodb', config.PORT_DYNAMODB # API Gateway invocation URLs if ('/%s/' % PATH_USER_REQUEST) in path: return 'apigateway', config.PORT_APIGATEWAY data_bytes = to_bytes(data or '') if path == '/' and b'QueueName=' in data_bytes: return 'sqs', config.PORT_SQS if 'Action=ConfirmSubscription' in path: return 'sns', config.PORT_SNS if path.startswith('/2015-03-31/functions/'): return 'lambda', config.PORT_LAMBDA if b'Action=AssumeRoleWithWebIdentity' in data_bytes or 'Action=AssumeRoleWithWebIdentity' in path: return 'sts', config.PORT_STS if b'Action=AssumeRoleWithSAML' in data_bytes or 'Action=AssumeRoleWithSAML' in path: return 'sts', config.PORT_STS # CloudWatch backdoor API to retrieve raw metrics if path.startswith(PATH_GET_RAW_METRICS): return 'cloudwatch', config.PORT_CLOUDWATCH # SQS queue requests if ('QueueUrl=' in path and 'Action=' in path) or (b'QueueUrl=' in data_bytes and b'Action=' in data_bytes): return 'sqs', config.PORT_SQS # TODO: move S3 public URLs to a separate port/endpoint, OR check ACLs here first stripped = path.strip('/') if method in ['GET', 'HEAD'] and '/' in stripped: # assume that this is an S3 GET request with URL path `/<bucket>/<key ...>` return 's3', config.PORT_S3 # detect S3 URLs if stripped and '/' not in stripped: if method == 'HEAD': # assume that this is an S3 HEAD bucket request with URL path `/<bucket>` return 's3', config.PORT_S3 if method == 'PUT': # assume that this is an S3 PUT bucket request with URL path `/<bucket>` return 's3', config.PORT_S3 if method == 'POST' and is_s3_form_data(data_bytes): # assume that this is an S3 POST request with form parameters or multipart form in the body return 's3', config.PORT_S3 # detect S3 requests sent from aws-cli using --no-sign-request option if 'aws-cli/' in headers.get('User-Agent', ''): return 's3', config.PORT_S3 # S3 delete object requests if method == 'POST' and 'delete=' in path and b'<Delete' in data_bytes and b'<Key>' in data_bytes: return 's3', config.PORT_S3 # Put Object API can have multiple keys if stripped.count('/') >= 1 and method == 'PUT': # assume that this is an S3 PUT bucket object request with URL path `/<bucket>/object` # or `/<bucket>/object/object1/+` return 's3', config.PORT_S3 auth_header = headers.get('Authorization') or '' # detect S3 requests with "AWS id:key" Auth headers if auth_header.startswith('AWS '): return 's3', config.PORT_S3
def get_api_from_custom_rules(method, path, data, headers): """Determine backend port based on custom rules.""" # detect S3 presigned URLs if "AWSAccessKeyId=" in path or "Signature=" in path: return "s3", config.PORT_S3 # heuristic for SQS queue URLs if is_sqs_queue_url(path): return "sqs", config.PORT_SQS # DynamoDB shell URLs if path.startswith("/shell") or path.startswith("/dynamodb/shell"): return "dynamodb", config.PORT_DYNAMODB # API Gateway invocation URLs if ("/%s/" % PATH_USER_REQUEST) in path: return "apigateway", config.PORT_APIGATEWAY data_bytes = to_bytes(data or "") version, action = extract_version_and_action(path, data_bytes) def _in_path_or_payload(search_str): return to_str(search_str) in path or to_bytes(search_str) in data_bytes if path == "/" and b"QueueName=" in data_bytes: return "sqs", config.PORT_SQS if "Action=ConfirmSubscription" in path: return "sns", config.PORT_SNS if path.startswith("/2015-03-31/functions/"): return "lambda", config.PORT_LAMBDA if _in_path_or_payload("Action=AssumeRoleWithWebIdentity"): return "sts", config.PORT_STS if _in_path_or_payload("Action=AssumeRoleWithSAML"): return "sts", config.PORT_STS # CloudWatch backdoor API to retrieve raw metrics if path.startswith(PATH_GET_RAW_METRICS): return "cloudwatch", config.PORT_CLOUDWATCH # SQS queue requests if _in_path_or_payload("QueueUrl=") and _in_path_or_payload("Action="): return "sqs", config.PORT_SQS if matches_service_action("sqs", action, version=version): return "sqs", config.PORT_SQS # SNS topic requests if matches_service_action("sns", action, version=version): return "sns", config.PORT_SNS # TODO: move S3 public URLs to a separate port/endpoint, OR check ACLs here first stripped = path.strip("/") if method in ["GET", "HEAD"] and "/" in stripped: # assume that this is an S3 GET request with URL path `/<bucket>/<key ...>` return "s3", config.PORT_S3 # detect S3 URLs if stripped and "/" not in stripped: if method == "HEAD": # assume that this is an S3 HEAD bucket request with URL path `/<bucket>` return "s3", config.PORT_S3 if method == "PUT": # assume that this is an S3 PUT bucket request with URL path `/<bucket>` return "s3", config.PORT_S3 if method == "POST" and is_s3_form_data(data_bytes): # assume that this is an S3 POST request with form parameters or multipart form in the body return "s3", config.PORT_S3 # detect S3 requests sent from aws-cli using --no-sign-request option if "aws-cli/" in headers.get("User-Agent", ""): return "s3", config.PORT_S3 # S3 delete object requests if (method == "POST" and "delete=" in path and b"<Delete" in data_bytes and b"<Key>" in data_bytes): return "s3", config.PORT_S3 # Put Object API can have multiple keys if stripped.count("/") >= 1 and method == "PUT": # assume that this is an S3 PUT bucket object request with URL path `/<bucket>/object` # or `/<bucket>/object/object1/+` return "s3", config.PORT_S3 auth_header = headers.get("Authorization") or "" # detect S3 requests with "AWS id:key" Auth headers if auth_header.startswith("AWS "): return "s3", config.PORT_S3 # certain EC2 requests from Java SDK contain no Auth headers (issue #3805) if b"Version=2016-11-15" in data_bytes: return "ec2", config.PORT_EC2
def return_response(self, method, path, data, headers, response): path = to_str(path) method = to_str(method) bucket_name = get_bucket_name(path, headers) # No path-name based bucket name? Try host-based hostname_parts = headers['host'].split('.') if (not bucket_name or len(bucket_name) == 0) and len(hostname_parts) > 1: bucket_name = hostname_parts[0] # POST requests to S3 may include a success_action_redirect or # success_action_status field, which should be used to redirect a # client to a new location. key = None if method == 'POST': key, redirect_url = multipart_content.find_multipart_key_value( data, headers) if key and redirect_url: response.status_code = 303 response.headers['Location'] = expand_redirect_url( redirect_url, key, bucket_name) LOGGER.debug('S3 POST {} to {}'.format( response.status_code, response.headers['Location'])) key, status_code = multipart_content.find_multipart_key_value( data, headers, 'success_action_status') if response.status_code == 200 and status_code == '201' and key: response.status_code = 201 response._content = self.get_201_reponse(key, bucket_name) response.headers['Content-Length'] = str(len( response._content)) response.headers[ 'Content-Type'] = 'application/xml; charset=utf-8' return response parsed = urlparse.urlparse(path) bucket_name_in_host = headers['host'].startswith(bucket_name) should_send_notifications = all([ method in ('PUT', 'POST', 'DELETE'), '/' in path[1:] or bucket_name_in_host, # check if this is an actual put object request, because it could also be # a put bucket request with a path like this: /bucket_name/ bucket_name_in_host or (len(path[1:].split('/')) > 1 and len(path[1:].split('/')[1]) > 0), self.is_query_allowable(method, parsed.query) ]) # get subscribers and send bucket notifications if should_send_notifications: # if we already have a good key, use it, otherwise examine the path if key: object_path = '/' + key elif bucket_name_in_host: object_path = parsed.path else: parts = parsed.path[1:].split('/', 1) object_path = parts[1] if parts[1][ 0] == '/' else '/%s' % parts[1] version_id = response.headers.get('x-amz-version-id', None) send_notifications(method, bucket_name, object_path, version_id) # publish event for creation/deletion of buckets: if method in ('PUT', 'DELETE') and ('/' not in path[1:] or len(path[1:].split('/')[1]) <= 0): event_type = (event_publisher.EVENT_S3_CREATE_BUCKET if method == 'PUT' else event_publisher.EVENT_S3_DELETE_BUCKET) event_publisher.fire_event( event_type, payload={'n': event_publisher.get_hash(bucket_name)}) # fix an upstream issue in moto S3 (see https://github.com/localstack/localstack/issues/382) if method == 'PUT' and parsed.query == 'policy': response._content = '' response.status_code = 204 return response # emulate ErrorDocument functionality if a website is configured if method == 'GET' and response.status_code == 404 and parsed.query != 'website': s3_client = aws_stack.connect_to_service('s3') try: # Verify the bucket exists in the first place--if not, we want normal processing of the 404 s3_client.head_bucket(Bucket=bucket_name) website_config = s3_client.get_bucket_website( Bucket=bucket_name) error_doc_key = website_config.get('ErrorDocument', {}).get('Key') if error_doc_key: error_object = s3_client.get_object(Bucket=bucket_name, Key=error_doc_key) response.status_code = 200 response._content = error_object['Body'].read() response.headers['content-length'] = len(response._content) except ClientError: # Pass on the 404 as usual pass if response: reset_content_length = False # append CORS headers and other annotations/patches to response append_cors_headers(bucket_name, request_method=method, request_headers=headers, response=response) append_last_modified_headers(response=response) append_list_objects_marker(method, path, data, response) fix_location_constraint(response) # Remove body from PUT response on presigned URL # https://github.com/localstack/localstack/issues/1317 if method == 'PUT' and ('X-Amz-Security-Token=' in path or 'X-Amz-Credential=' in path or 'AWSAccessKeyId=' in path): response._content = '' reset_content_length = True response_content_str = None try: response_content_str = to_str(response._content) except Exception: pass # Honor response header overrides # https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectGET.html if method == 'GET': query_map = urlparse.parse_qs(parsed.query, keep_blank_values=True) for param_name, header_name in ALLOWED_HEADER_OVERRIDES.items( ): if param_name in query_map: response.headers[header_name] = query_map[param_name][ 0] if response_content_str and response_content_str.startswith('<'): is_bytes = isinstance(response._content, six.binary_type) response._content = response_content_str append_last_modified_headers(response=response, content=response_content_str) # We need to un-pretty-print the XML, otherwise we run into this issue with Spark: # https://github.com/jserver/mock-s3/pull/9/files # https://github.com/localstack/localstack/issues/183 # Note: yet, we need to make sure we have a newline after the first line: <?xml ...>\n # Note: make sure to return XML docs verbatim: https://github.com/localstack/localstack/issues/1037 if method != 'GET' or not is_object_specific_request( path, headers): response._content = re.sub(r'([^\?])>\n\s*<', r'\1><', response_content_str, flags=re.MULTILINE) # update Location information in response payload response._content = self._update_location( response._content, bucket_name) # convert back to bytes if is_bytes: response._content = to_bytes(response._content) # fix content-type: https://github.com/localstack/localstack/issues/618 # https://github.com/localstack/localstack/issues/549 # https://github.com/localstack/localstack/issues/854 if 'text/html' in response.headers.get('Content-Type', '') \ and not response_content_str.lower().startswith('<!doctype html'): response.headers[ 'Content-Type'] = 'application/xml; charset=utf-8' reset_content_length = True # update content-length headers (fix https://github.com/localstack/localstack/issues/541) if method == 'DELETE': reset_content_length = True if reset_content_length: response.headers['content-length'] = len(response._content)
def _in_path_or_payload(search_str): return to_str(search_str) in path or to_bytes(search_str) in data_bytes
def invoke_rest_api_integration(api_id, stage, integration, method, path, invocation_path, data, headers, resource_path, context={}, resource_id=None, response_templates={}): relative_path, query_string_params = extract_query_string_params( path=invocation_path) integration_type_orig = integration.get('type') or integration.get( 'integrationType') or '' integration_type = integration_type_orig.upper() uri = integration.get('uri') or integration.get('integrationUri') or '' if (uri.startswith('arn:aws:apigateway:') and ':lambda:path' in uri) or uri.startswith('arn:aws:lambda'): if integration_type in ['AWS', 'AWS_PROXY']: func_arn = uri if ':lambda:path' in uri: func_arn = uri.split(':lambda:path')[1].split( 'functions/')[1].split('/invocations')[0] try: path_params = extract_path_params(path=relative_path, extracted_path=resource_path) except Exception: path_params = {} # apply custom request template data_str = data try: data_str = json.dumps(data) if isinstance( data, (dict, list)) else to_str(data) data_str = apply_template(integration, 'request', data_str, path_params=path_params, query_params=query_string_params, headers=headers) except Exception: pass # Sample request context: # https://docs.aws.amazon.com/apigateway/latest/developerguide/api-gateway-create-api-as-simple-proxy-for-lambda.html#api-gateway-create-api-as-simple-proxy-for-lambda-test request_context = get_lambda_event_request_context( method, path, data, headers, integration_uri=uri, resource_id=resource_id) stage_variables = get_stage_variables(api_id, stage) result = lambda_api.process_apigateway_invocation( func_arn, relative_path, data_str, stage, api_id, headers, path_params=path_params, query_string_params=query_string_params, method=method, resource_path=resource_path, request_context=request_context, event_context=context, stage_variables=stage_variables) if isinstance(result, FlaskResponse): response = flask_to_requests_response(result) elif isinstance(result, Response): response = result else: response = LambdaResponse() parsed_result = result if isinstance( result, dict) else json.loads(str(result or '{}')) parsed_result = common.json_safe(parsed_result) parsed_result = {} if parsed_result is None else parsed_result response.status_code = int(parsed_result.get( 'statusCode', 200)) parsed_headers = parsed_result.get('headers', {}) if parsed_headers is not None: response.headers.update(parsed_headers) try: if isinstance(parsed_result['body'], dict): response._content = json.dumps(parsed_result['body']) else: response._content = to_bytes(parsed_result['body']) except Exception: response._content = '{}' update_content_length(response) response.multi_value_headers = parsed_result.get( 'multiValueHeaders') or {} # apply custom response template response._content = apply_template(integration, 'response', response._content) response.headers['Content-Length'] = str( len(response.content or '')) return response msg = 'API Gateway %s integration action "%s", method "%s" not yet implemented' % ( integration_type, uri, method) LOGGER.warning(msg) return make_error_response(msg, 404) elif integration_type == 'AWS': if 'kinesis:action/' in uri: if uri.endswith('kinesis:action/PutRecord'): target = kinesis_listener.ACTION_PUT_RECORD if uri.endswith('kinesis:action/PutRecords'): target = kinesis_listener.ACTION_PUT_RECORDS if uri.endswith('kinesis:action/ListStreams'): target = kinesis_listener.ACTION_LIST_STREAMS template = integration['requestTemplates'][APPLICATION_JSON] new_request = aws_stack.render_velocity_template(template, data) # forward records to target kinesis stream headers = aws_stack.mock_aws_request_headers(service='kinesis') headers['X-Amz-Target'] = target result = common.make_http_request(url=TEST_KINESIS_URL, method='POST', data=new_request, headers=headers) # TODO apply response template..? return result elif 'states:action/' in uri: if uri.endswith('states:action/StartExecution'): action = 'StartExecution' decoded_data = data.decode() payload = {} if 'stateMachineArn' in decoded_data and 'input' in decoded_data: payload = json.loads(decoded_data) elif APPLICATION_JSON in integration.get('requestTemplates', {}): template = integration['requestTemplates'][APPLICATION_JSON] payload = aws_stack.render_velocity_template(template, data, as_json=True) client = aws_stack.connect_to_service('stepfunctions') kwargs = {'name': payload['name']} if 'name' in payload else {} result = client.start_execution( stateMachineArn=payload['stateMachineArn'], input=payload['input'], **kwargs) response = requests_response( content={ 'executionArn': result['executionArn'], 'startDate': str(result['startDate']) }, headers=aws_stack.mock_aws_request_headers()) return response if method == 'POST': if uri.startswith('arn:aws:apigateway:') and ':sqs:path' in uri: template = integration['requestTemplates'][APPLICATION_JSON] account_id, queue = uri.split('/')[-2:] region_name = uri.split(':')[3] new_request = '%s&QueueName=%s' % ( aws_stack.render_velocity_template(template, data), queue) headers = aws_stack.mock_aws_request_headers( service='sqs', region_name=region_name) url = urljoin(TEST_SQS_URL, '%s/%s' % (TEST_AWS_ACCOUNT_ID, queue)) result = common.make_http_request(url, method='POST', headers=headers, data=new_request) return result msg = 'API Gateway AWS integration action URI "%s", method "%s" not yet implemented' % ( uri, method) LOGGER.warning(msg) return make_error_response(msg, 404) elif integration_type == 'AWS_PROXY': if uri.startswith('arn:aws:apigateway:') and ':dynamodb:action' in uri: # arn:aws:apigateway:us-east-1:dynamodb:action/PutItem&Table=MusicCollection table_name = uri.split(':dynamodb:action')[1].split('&Table=')[1] action = uri.split(':dynamodb:action')[1].split('&Table=')[0] if 'PutItem' in action and method == 'PUT': response_template = response_templates.get( 'application/json', None) if response_template is None: msg = 'Invalid response template defined in integration response.' return make_error_response(msg, 404) response_template = json.loads(response_template) if response_template['TableName'] != table_name: msg = 'Invalid table name specified in integration response template.' return make_error_response(msg, 404) dynamo_client = aws_stack.connect_to_resource('dynamodb') table = dynamo_client.Table(table_name) event_data = {} data_dict = json.loads(data) for key, _ in response_template['Item'].items(): event_data[key] = data_dict[key] table.put_item(Item=event_data) response = requests_response( event_data, headers=aws_stack.mock_aws_request_headers()) return response else: msg = 'API Gateway action uri "%s", integration type %s not yet implemented' % ( uri, integration_type) LOGGER.warning(msg) return make_error_response(msg, 404) elif integration_type in ['HTTP_PROXY', 'HTTP']: function = getattr(requests, method.lower()) # apply custom request template data = apply_template(integration, 'request', data) if isinstance(data, dict): data = json.dumps(data) result = function(uri, data=data, headers=headers) # apply custom response template data = apply_template(integration, 'response', data) return result elif integration_type == 'MOCK': # TODO: add logic for MOCK responses pass if method == 'OPTIONS': # fall back to returning CORS headers if this is an OPTIONS request return get_cors_response(headers) msg = ( 'API Gateway integration type "%s", method "%s", URI "%s" not yet implemented' % (integration_type, method, uri)) LOGGER.warning(msg) return make_error_response(msg, 404)