def test_send_email_can_retrospect(self, ses_client): # Test that sent emails can be retrospected through saved file and API access data_dir = config.dirs.data or config.dirs.tmp email = f"user-{short_uid()}@example.com" ses_client.verify_email_address(EmailAddress=email) message = ses_client.send_email( Source=email, Message={ "Subject": { "Data": "A_SUBJECT", }, "Body": { "Text": { "Data": "A_MESSAGE", }, "Html": { "Data": "A_HTML", }, }, }, Destination={ "ToAddresses": ["*****@*****.**"], }, ) message_id = message["MessageId"] with open(os.path.join(data_dir, "ses", message_id + ".json"), "r") as f: message = f.read() contents = json.loads(message) assert email == contents["Source"] assert "A_SUBJECT" == contents["Subject"] assert { "text_part": "A_MESSAGE", "html_part": "A_HTML" } == contents["Body"] assert ["*****@*****.**" ] == contents["Destination"]["ToAddresses"] emails_url = config.get_edge_url( ) + INTERNAL_RESOURCE_PATH + EMAILS_ENDPOINT api_contents = requests.get(emails_url).json() api_contents = {msg["Id"]: msg for msg in api_contents["messages"]} assert len(api_contents) >= 1 assert message_id in api_contents assert api_contents[message_id] == contents # Ensure messages can be filtered by email source emails_url = (config.get_edge_url() + INTERNAL_RESOURCE_PATH + EMAILS_ENDPOINT + "[email protected]") assert len(requests.get(emails_url).json()["messages"]) == 0 emails_url = (config.get_edge_url() + INTERNAL_RESOURCE_PATH + EMAILS_ENDPOINT + f"?email={email}") assert len(requests.get(emails_url).json()["messages"]) == 1
def test_get_records(self, kinesis_client, kinesis_create_stream, wait_for_stream_ready): stream_name = "test-%s" % short_uid() kinesis_create_stream(StreamName=stream_name, ShardCount=1) wait_for_stream_ready(stream_name) kinesis_client.put_records( StreamName=stream_name, Records=[{ "Data": "SGVsbG8gd29ybGQ=", "PartitionKey": "1" }], ) # get records with JSON encoding iterator = self._get_shard_iterator(stream_name, kinesis_client) response = kinesis_client.get_records(ShardIterator=iterator) json_records = response.get("Records") assert 1 == len(json_records) assert "Data" in json_records[0] # get records with CBOR encoding iterator = self._get_shard_iterator(stream_name, kinesis_client) url = config.get_edge_url() headers = aws_stack.mock_aws_request_headers("kinesis") headers["Content-Type"] = constants.APPLICATION_AMZ_CBOR_1_1 headers["X-Amz-Target"] = "Kinesis_20131202.GetRecords" data = cbor2.dumps({"ShardIterator": iterator}) result = requests.post(url, data, headers=headers) assert 200 == result.status_code result = cbor2.loads(result.content) attrs = ("Data", "EncryptionType", "PartitionKey", "SequenceNumber") assert select_attributes(json_records[0], attrs) == select_attributes( result["Records"][0], attrs)
def test_response_content_type(self): url = config.get_edge_url() data = {"Action": "GetCallerIdentity", "Version": "2011-06-15"} # receive response as XML (default) headers = aws_stack.mock_aws_request_headers("sts") response = requests.post(url, data=data, headers=headers) assert response content1 = to_str(response.content) with pytest.raises(json.decoder.JSONDecodeError): json.loads(content1) content1 = xmltodict.parse(content1) content1_result = content1["GetCallerIdentityResponse"]["GetCallerIdentityResult"] assert content1_result["Account"] == TEST_AWS_ACCOUNT_ID # receive response as JSON (via Accept header) headers = aws_stack.mock_aws_request_headers("sts") headers["Accept"] = APPLICATION_JSON response = requests.post(url, data=data, headers=headers) assert response content2 = json.loads(to_str(response.content)) content2_result = content2["GetCallerIdentityResponse"]["GetCallerIdentityResult"] assert content2_result["Account"] == TEST_AWS_ACCOUNT_ID content1.get("GetCallerIdentityResponse", {}).pop("ResponseMetadata", None) content2.get("GetCallerIdentityResponse", {}).pop("ResponseMetadata", None) assert strip_xmlns(content1) == content2
def test_put_metric_data_gzip(self): metric_name = "test-metric" namespace = "namespace" data = ("Action=PutMetricData&MetricData.member.1." "MetricName=%s&MetricData.member.1.Value=1&" "Namespace=%s&Version=2010-08-01" % (metric_name, namespace)) bytes_data = bytes(data, encoding="utf-8") encoded_data = gzip.compress(bytes_data) url = config.get_edge_url() headers = aws_stack.mock_aws_request_headers("cloudwatch") authorization = ("AWS4-HMAC-SHA256 Credential=test/20201230/" "us-east-1/monitoring/aws4_request, " "SignedHeaders=content-encoding;host;" "x-amz-content-sha256;x-amz-date, Signature=" "bb31fc5f4e58040ede9ed751133fe" "839668b27290bc1406b6ffadc4945c705dc") headers.update({ "Content-Type": "application/x-www-form-urlencoded; charset=utf-8", "Content-Length": len(encoded_data), "Content-Encoding": "GZIP", "User-Agent": "aws-sdk-nodejs/2.819.0 linux/v12.18.2 callback", "Authorization": authorization, }) request = Request(url, encoded_data, headers, method="POST") urlopen(request) client = aws_stack.connect_to_service("cloudwatch") rs = client.list_metrics(Namespace=namespace, MetricName=metric_name) self.assertEqual(1, len(rs["Metrics"])) self.assertEqual(namespace, rs["Metrics"][0]["Namespace"])
def run_process_as_sudo(component, port, asynchronous=False, env_vars=None): # make sure we can run sudo commands try: ensure_can_use_sudo() except Exception as e: LOG.error("cannot start service on privileged port %s: %s", port, str(e)) return # prepare environment env_vars = env_vars or {} env_vars["PYTHONPATH"] = f".:{LOCALSTACK_ROOT_FOLDER}" env_vars["EDGE_FORWARD_URL"] = config.get_edge_url() env_vars["EDGE_BIND_HOST"] = config.EDGE_BIND_HOST env_vars_str = env_vars_to_string(env_vars) # start the process as sudo sudo_cmd = "sudo -n" python_cmd = sys.executable cmd = [ sudo_cmd, env_vars_str, python_cmd, __file__, component, str(port), ] shell_cmd = " ".join(cmd) def run_command(*_): run(shell_cmd, outfile=subprocess.PIPE, print_error=False, env_vars=env_vars) LOG.debug("Running command as sudo: %s", shell_cmd) result = start_thread(run_command, quiet=True) if asynchronous else run_command() return result
def test_put_metric_data_gzip(self, cloudwatch_client): metric_name = "test-metric" namespace = "namespace" data = ("Action=PutMetricData&MetricData.member.1." "MetricName=%s&MetricData.member.1.Value=1&" "Namespace=%s&Version=2010-08-01" % (metric_name, namespace)) bytes_data = bytes(data, encoding="utf-8") encoded_data = gzip.compress(bytes_data) url = config.get_edge_url() headers = aws_stack.mock_aws_request_headers("cloudwatch") authorization = aws_stack.mock_aws_request_headers( "monitoring")["Authorization"] headers.update({ "Content-Type": "application/x-www-form-urlencoded; charset=utf-8", "Content-Length": len(encoded_data), "Content-Encoding": "GZIP", "User-Agent": "aws-sdk-nodejs/2.819.0 linux/v12.18.2 callback", "Authorization": authorization, }) request = Request(url, encoded_data, headers, method="POST") urlopen(request) rs = cloudwatch_client.list_metrics(Namespace=namespace, MetricName=metric_name) assert 1 == len(rs["Metrics"]) assert namespace == rs["Metrics"][0]["Namespace"]
def test_put_metric_data_gzip(self): metric_name = 'test-metric' namespace = 'namespace' data = 'Action=PutMetricData&MetricData.member.1.' \ 'MetricName=%s&MetricData.member.1.Value=1&' \ 'Namespace=%s&Version=2010-08-01' \ % (metric_name, namespace) bytes_data = bytes(data, encoding='utf-8') encoded_data = gzip.compress(bytes_data) url = config.get_edge_url() headers = aws_stack.mock_aws_request_headers('cloudwatch') authorization = 'AWS4-HMAC-SHA256 Credential=test/20201230/' \ 'us-east-1/monitoring/aws4_request, ' \ 'SignedHeaders=content-encoding;host;' \ 'x-amz-content-sha256;x-amz-date, Signature='\ 'bb31fc5f4e58040ede9ed751133fe'\ '839668b27290bc1406b6ffadc4945c705dc' headers.update({ 'Content-Type': 'application/x-www-form-urlencoded; charset=utf-8', 'Content-Length': len(encoded_data), 'Content-Encoding': 'GZIP', 'User-Agent': 'aws-sdk-nodejs/2.819.0 linux/v12.18.2 callback', 'Authorization': authorization, }) request = Request(url, encoded_data, headers, method='POST') urlopen(request) client = aws_stack.connect_to_service('cloudwatch') rs = client.list_metrics(Namespace=namespace, MetricName=metric_name) self.assertEqual(len(rs['Metrics']), 1) self.assertEqual(rs['Metrics'][0]['Namespace'], namespace)
def run_process_as_sudo(component, port, asynchronous=False): # make sure we can run sudo commands try: ensure_can_use_sudo() except Exception as e: LOG.error("cannot start service on privileged port %s: %s", port, str(e)) return # start the process as sudo sudo_cmd = "sudo -n " python_cmd = sys.executable edge_url = config.get_edge_url() cmd = "%sPYTHONPATH=.:%s EDGE_FORWARD_URL=%s EDGE_BIND_HOST=%s %s %s %s %s" % ( sudo_cmd, LOCALSTACK_ROOT_FOLDER, edge_url, config.EDGE_BIND_HOST, python_cmd, __file__, component, port, ) def run_command(*_): run(cmd, outfile=subprocess.PIPE, print_error=False) result = start_thread(run_command, quiet=True) if asynchronous else run_command() return result
def cmd_status_services(format): import requests from localstack import config url = config.get_edge_url() try: health = requests.get(f"{url}/health", timeout=2) doc = health.json() services = doc.get("services", []) if format == "table": print_service_table(services) if format == "plain": for service, status in services.items(): console.print(f"{service}={status}") if format == "dict": console.print(services) if format == "json": console.print(json.dumps(services)) except requests.ConnectionError: error = f"could not connect to LocalStack health endpoint at {url}" print_error(format, error) if config.DEBUG: console.print_exception() sys.exit(1)
def test_cdk_bootstrap_redeploy(self, is_change_set_finished, cleanup_stacks, cleanup_changesets): """Test that simulates a sequence of commands executed by CDK when running 'cdk bootstrap' twice""" base_folder = os.path.join(os.path.dirname(os.path.realpath(__file__)), "..") requests_file = os.path.join(base_folder, "files", "cdk-bootstrap-requests.json") operations = json.loads(load_file(requests_file)) change_set_name = "cdk-deploy-change-set-a4b98b18" stack_name = "CDKToolkit-a4b98b18" try: headers = aws_stack.mock_aws_request_headers("cloudformation") base_url = config.get_edge_url() for op in operations: url = f"{base_url}{op['path']}" data = op["data"] requests.request(method=op["method"], url=url, headers=headers, data=data) if "Action=ExecuteChangeSet" in data: assert wait_until(is_change_set_finished(change_set_name), _max_wait=20, strategy="linear") finally: # clean up cleanup_changesets([change_set_name]) cleanup_stacks([stack_name])
def record_service_health(api, status): data = {api: status} health_url = '%s/health' % config.get_edge_url() try: requests.put(health_url, data=json.dumps(data), verify=False) except Exception: # ignore for now, if the service is not running pass
def record_service_health(api, status): # TODO: consider making in-memory calls here, to optimize performance data = {api: status} health_url = '%s/health' % config.get_edge_url() try: requests.put(health_url, data=json.dumps(data), verify=False) except Exception: # ignore for now, if the service is not running pass
def test_start_wait_stop(self, runner, container_client): result = runner.invoke(cli, ["start", "-d"]) assert result.exit_code == 0 assert "starting LocalStack" in result.output result = runner.invoke(cli, ["wait", "-t", "60"]) assert result.exit_code == 0 assert container_client.is_container_running( config.MAIN_CONTAINER_NAME ), "container name was not running after wait" health = requests.get(get_edge_url() + "/health") assert health.ok, "health request did not return OK: %s" % health.text result = runner.invoke(cli, ["stop"]) assert result.exit_code == 0 with pytest.raises(requests.ConnectionError): requests.get(get_edge_url() + "/health")
def test_expiration_date_format(self): url = config.get_edge_url() data = {"Action": "GetSessionToken", "Version": "2011-06-15"} headers = aws_stack.mock_aws_request_headers("sts") headers["Accept"] = APPLICATION_JSON response = requests.post(url, data=data, headers=headers) assert response content = json.loads(to_str(response.content)) # Expiration field should be numeric (tested against AWS) result = content["GetSessionTokenResponse"]["GetSessionTokenResult"] assert is_number(result["Credentials"]["Expiration"])
def test_request_with_custom_host_header(self): url = config.get_edge_url() headers = aws_stack.mock_aws_request_headers("lambda") # using a simple for-loop here (instead of pytest parametrization), for simplicity for host in ["localhost", "example.com"]: for port in ["", ":123", f":{config.EDGE_PORT}"]: headers["Host"] = f"{host}{port}" response = requests.get(f"{url}/2015-03-31/functions", headers=headers) assert response assert "Functions" in json.loads(to_str(response.content))
def test_invoke_apis_via_edge(self): edge_url = config.get_edge_url() if is_api_enabled("s3"): self._invoke_s3_via_edge(edge_url) self._invoke_s3_via_edge_multipart_form(edge_url) if is_api_enabled("kinesis"): self._invoke_kinesis_via_edge(edge_url) if is_api_enabled("dynamodbstreams"): self._invoke_dynamodb_via_edge_go_sdk(edge_url) if is_api_enabled("dynamodbstreams"): self._invoke_dynamodbstreams_via_edge(edge_url) if is_api_enabled("firehose"): self._invoke_firehose_via_edge(edge_url) if is_api_enabled("stepfunctions"): self._invoke_stepfunctions_via_edge(edge_url)
def cmd_status_services(): import requests from localstack import config url = config.get_edge_url() try: health = requests.get(f"{url}/health") doc = health.json() services = doc.get("services", []) print_service_table(services) except requests.ConnectionError: err = "[bold][red]:heavy_multiplication_x: ERROR[/red][/bold]" console.print( f"{err}: could not connect to LocalStack health endpoint at {url}") if config.DEBUG: console.print_exception() sys.exit(1)
def test_get_records(self): client = aws_stack.create_external_boto_client("kinesis") stream_name = "test-%s" % short_uid() client.create_stream(StreamName=stream_name, ShardCount=1) sleep(1.5) client.put_records( StreamName=stream_name, Records=[{ "Data": "SGVsbG8gd29ybGQ=", "PartitionKey": "1" }], ) # get records with JSON encoding iterator = self._get_shard_iterator(stream_name) response = client.get_records(ShardIterator=iterator) json_records = response.get("Records") self.assertEqual(1, len(json_records)) self.assertIn("Data", json_records[0]) # get records with CBOR encoding iterator = self._get_shard_iterator(stream_name) url = config.get_edge_url() headers = aws_stack.mock_aws_request_headers("kinesis") headers["Content-Type"] = constants.APPLICATION_AMZ_CBOR_1_1 headers["X-Amz-Target"] = "Kinesis_20131202.GetRecords" data = cbor2.dumps({"ShardIterator": iterator}) result = requests.post(url, data, headers=headers) self.assertEqual(200, result.status_code) result = cbor2.loads(result.content) attrs = ("Data", "EncryptionType", "PartitionKey", "SequenceNumber") self.assertEqual( select_attributes(json_records[0], attrs), select_attributes(result["Records"][0], attrs), ) # clean up client.delete_stream(StreamName=stream_name)
def test_disable_cors_headers(self, monkeypatch): """Test DISABLE_CORS_CHECKS=1 (most restrictive setting, not sending any CORS headers)""" headers = aws_stack.mock_aws_request_headers("sns") headers["Origin"] = "https://app.localstack.cloud" url = config.get_edge_url() data = {"Action": "ListTopics", "Version": "2010-03-31"} response = requests.post(url, headers=headers, data=data) assert response.status_code == 200 assert response.headers["access-control-allow-origin"] == headers[ "Origin"] assert "authorization" in response.headers[ "access-control-allow-headers"].lower() assert "GET" in response.headers["access-control-allow-methods"].split( ",") assert "<ListTopicsResponse" in to_str(response.content) monkeypatch.setattr(config, "DISABLE_CORS_HEADERS", True) response = requests.post(url, headers=headers, data=data) assert response.status_code == 200 assert "<ListTopicsResponse" in to_str(response.content) assert not response.headers.get("access-control-allow-headers") assert not response.headers.get("access-control-allow-methods") assert not response.headers.get("access-control-allow-origin") assert not response.headers.get("access-control-allow-credentials")
def authenticate_presign_url(method, path, headers, data=None): url = "{}{}".format(config.get_edge_url(), path) parsed = urlparse.urlparse(url) query_params = parse_qs(parsed.query) forwarded_for = get_forwarded_for_host(headers) if forwarded_for: url = re.sub("://[^/]+", "://%s" % forwarded_for, url) LOGGER.debug("Received presign S3 URL: %s", url) sign_headers = {} query_string = {} is_v2 = all(p in query_params for p in SIGNATURE_V2_PARAMS) is_v4 = all(p in query_params for p in SIGNATURE_V4_PARAMS) # Add overrided headers to the query string params for param_name, header_name in ALLOWED_HEADER_OVERRIDES.items(): if param_name in query_params: query_string[param_name] = query_params[param_name][0] # Request's headers are more essentials than the query parameters in the request. # Different values of header in the header of the request and in the query parameter of the # request URL will fail the signature calulation. As per the AWS behaviour # Add valid headers into the sign_header. Skip the overrided headers # and the headers which have been sent in the query string param presign_params_lower = ( [p.lower() for p in SIGNATURE_V4_PARAMS] if is_v4 else [p.lower() for p in SIGNATURE_V2_PARAMS] ) params_header_override = [ param_name for param_name, header_name in ALLOWED_HEADER_OVERRIDES.items() ] if len(query_params) > 2: for key in query_params: key_lower = key.lower() if key_lower not in presign_params_lower: if ( key_lower not in (header[0].lower() for header in headers) and key_lower not in params_header_override ): if key_lower in ( allowed_param.lower() for allowed_param in ALLOWED_QUERY_PARAMS ): query_string[key] = query_params[key][0] elif key_lower in ( blacklisted_header.lower() for blacklisted_header in BLACKLISTED_HEADERS ): pass else: query_string[key] = query_params[key][0] for header_name, header_value in headers.items(): header_name_lower = header_name.lower() if header_name_lower.startswith("x-amz-") or header_name_lower.startswith("content-"): if is_v2 and header_name_lower in query_params: sign_headers[header_name] = header_value if is_v4 and header_name_lower in query_params["X-Amz-SignedHeaders"][0]: sign_headers[header_name] = header_value # Preparnig dictionary of request to build AWSRequest's object of the botocore request_url = "{}://{}{}".format(parsed.scheme, parsed.netloc, parsed.path) # Fix https://github.com/localstack/localstack/issues/3912 # urlencode method replaces white spaces with plus sign cause signature calculation to fail query_string_encoded = ( urlencode(query_string, quote_via=urlparse.quote, safe=" ") if query_string else None ) request_url = "%s?%s" % (request_url, query_string_encoded) if query_string else request_url if forwarded_for: request_url = re.sub("://[^/]+", "://%s" % forwarded_for, request_url) bucket_name = extract_bucket_name(headers, parsed.path) request_dict = { "url_path": parsed.path, "query_string": query_string, "method": method, "headers": sign_headers, "body": b"", "url": request_url, "context": { "is_presign_request": True, "use_global_endpoint": True, "signing": {"bucket": bucket_name}, }, } # Support for virtual host addressing style in signature version 2 # We don't need to do this in v4 as we already concerting it to the virtual addressing style. # v2 require path base styled request_dict and v4 require virtual styled request_dict if uses_host_addressing(headers) and is_v2: request_dict["url_path"] = "/{}{}".format(bucket_name, request_dict["url_path"]) parsed_url = urlparse.urlparse(request_url) request_dict["url"] = "{}://{}:{}{}".format( parsed_url.scheme, S3_VIRTUAL_HOSTNAME, config.EDGE_PORT, request_dict["url_path"], ) request_dict["url"] = ( "%s?%s" % (request_dict["url"], query_string_encoded) if query_string else request_dict["url"] ) if not is_v2 and any(p in query_params for p in SIGNATURE_V2_PARAMS): response = requests_error_response_xml_signature_calculation( code=403, message="Query-string authentication requires the Signature, Expires and AWSAccessKeyId parameters", code_string="AccessDenied", ) elif is_v2 and not is_v4: response = authenticate_presign_url_signv2( method, path, headers, data, url, query_params, request_dict ) if not is_v4 and any(p in query_params for p in SIGNATURE_V4_PARAMS): response = requests_error_response_xml_signature_calculation( code=403, message="Query-string authentication requires the X-Amz-Algorithm, \ X-Amz-Credential, X-Amz-Date, X-Amz-Expires, \ X-Amz-SignedHeaders and X-Amz-Signature parameters.", code_string="AccessDenied", ) elif is_v4 and not is_v2: response = authenticate_presign_url_signv4( method, path, headers, data, url, query_params, request_dict ) if response is not None: LOGGER.info("Presign signature calculation failed: %s", response) return response LOGGER.debug("Valid presign url.")
def test_get_metric_data(self, cloudwatch_client): cloudwatch_client.put_metric_data( Namespace="some/thing", MetricData=[dict(MetricName="someMetric", Value=23)]) cloudwatch_client.put_metric_data( Namespace="some/thing", MetricData=[dict(MetricName="someMetric", Value=18)]) cloudwatch_client.put_metric_data( Namespace="ug/thing", MetricData=[dict(MetricName="ug", Value=23)]) # filtering metric data with current time interval response = cloudwatch_client.get_metric_data( MetricDataQueries=[ { "Id": "some", "MetricStat": { "Metric": { "Namespace": "some/thing", "MetricName": "someMetric", }, "Period": 60, "Stat": "Sum", }, }, { "Id": "part", "MetricStat": { "Metric": { "Namespace": "ug/thing", "MetricName": "ug" }, "Period": 60, "Stat": "Sum", }, }, ], StartTime=datetime.utcnow() - timedelta(hours=1), EndTime=datetime.utcnow(), ) assert 2 == len(response["MetricDataResults"]) for data_metric in response["MetricDataResults"]: if data_metric["Id"] == "some": assert 41.0 == data_metric["Values"][0] if data_metric["Id"] == "part": assert 23.0 == data_metric["Values"][0] # filtering metric data with current time interval response = cloudwatch_client.get_metric_data( MetricDataQueries=[ { "Id": "some", "MetricStat": { "Metric": { "Namespace": "some/thing", "MetricName": "someMetric", }, "Period": 60, "Stat": "Sum", }, }, { "Id": "part", "MetricStat": { "Metric": { "Namespace": "ug/thing", "MetricName": "ug" }, "Period": 60, "Stat": "Sum", }, }, ], StartTime=datetime.utcnow() + timedelta(hours=1), EndTime=datetime.utcnow() + timedelta(hours=2), ) for data_metric in response["MetricDataResults"]: if data_metric["Id"] == "some": assert len(data_metric["Values"]) == 0 if data_metric["Id"] == "part": assert len(data_metric["Values"]) == 0 # get raw metric data url = "%s%s" % (config.get_edge_url(), PATH_GET_RAW_METRICS) result = requests.get(url) assert 200 == result.status_code result = json.loads(to_str(result.content)) assert len(result["metrics"]) >= 3
def test_get_metric_data(self): conn = aws_stack.connect_to_service("cloudwatch") conn.put_metric_data( Namespace="some/thing", MetricData=[dict(MetricName="someMetric", Value=23)]) conn.put_metric_data( Namespace="some/thing", MetricData=[dict(MetricName="someMetric", Value=18)]) conn.put_metric_data(Namespace="ug/thing", MetricData=[dict(MetricName="ug", Value=23)]) # filtering metric data with current time interval response = conn.get_metric_data( MetricDataQueries=[ { "Id": "some", "MetricStat": { "Metric": { "Namespace": "some/thing", "MetricName": "someMetric", }, "Period": 60, "Stat": "Sum", }, }, { "Id": "part", "MetricStat": { "Metric": { "Namespace": "ug/thing", "MetricName": "ug" }, "Period": 60, "Stat": "Sum", }, }, ], StartTime=datetime.utcnow() - timedelta(hours=1), EndTime=datetime.utcnow(), ) self.assertEqual(2, len(response["MetricDataResults"])) for data_metric in response["MetricDataResults"]: if data_metric["Id"] == "some": self.assertEqual(41.0, data_metric["Values"][0]) if data_metric["Id"] == "part": self.assertEqual(23.0, data_metric["Values"][0]) # filtering metric data with current time interval response = conn.get_metric_data( MetricDataQueries=[ { "Id": "some", "MetricStat": { "Metric": { "Namespace": "some/thing", "MetricName": "someMetric", }, "Period": 60, "Stat": "Sum", }, }, { "Id": "part", "MetricStat": { "Metric": { "Namespace": "ug/thing", "MetricName": "ug" }, "Period": 60, "Stat": "Sum", }, }, ], StartTime=datetime.utcnow() + timedelta(hours=1), EndTime=datetime.utcnow() + timedelta(hours=2), ) for data_metric in response["MetricDataResults"]: if data_metric["Id"] == "some": self.assertEqual(0, len(data_metric["Values"])) if data_metric["Id"] == "part": self.assertEqual(0, len(data_metric["Values"])) # get raw metric data url = "%s%s" % (config.get_edge_url(), PATH_GET_RAW_METRICS) result = requests.get(url) self.assertEqual(200, result.status_code) result = json.loads(to_str(result.content)) self.assertGreaterEqual(len(result["metrics"]), 3)
def test_invoke_kinesis(self): edge_url = config.get_edge_url() self._invoke_kinesis_via_edge(edge_url)
def test_invoke_s3_multipart_request(self): edge_url = config.get_edge_url() self._invoke_s3_via_edge_multipart_form(edge_url)
def test_invoke_s3(self): edge_url = config.get_edge_url() self._invoke_s3_via_edge(edge_url)
def test_invoke_stepfunctions(self): edge_url = config.get_edge_url() self._invoke_stepfunctions_via_edge(edge_url)
def test_invoke_firehose(self): edge_url = config.get_edge_url() self._invoke_firehose_via_edge(edge_url)
def test_invoke_dynamodb(self): edge_url = config.get_edge_url() self._invoke_dynamodb_via_edge_go_sdk(edge_url)
def authenticate_presign_url(method, path, headers, data=None): url = '{}{}'.format(config.get_edge_url(), path) parsed = urlparse.urlparse(url) query_params = parse_qs(parsed.query) forwarded_for = get_forwarded_for_host(headers) if forwarded_for: url = re.sub('://[^/]+', '://%s' % forwarded_for, url) LOGGER.debug('Received presign S3 URL: %s' % url) sign_headers = {} query_string = {} is_v2 = all([p in query_params for p in SIGNATURE_V2_PARAMS]) is_v4 = all([p in query_params for p in SIGNATURE_V4_PARAMS]) # Add overrided headers to the query string params for param_name, header_name in ALLOWED_HEADER_OVERRIDES.items(): if param_name in query_params: query_string[param_name] = query_params[param_name][0] # Request's headers are more essentials than the query parameters in the request. # Different values of header in the header of the request and in the query parameter of the # request URL will fail the signature calulation. As per the AWS behaviour # Add valid headers into the sign_header. Skip the overrided headers # and the headers which have been sent in the query string param presign_params_lower = \ [p.lower() for p in SIGNATURE_V4_PARAMS] if is_v4 else [p.lower() for p in SIGNATURE_V2_PARAMS] params_header_override = [param_name for param_name, header_name in ALLOWED_HEADER_OVERRIDES.items()] if len(query_params) > 2: for key in query_params: key_lower = key.lower() if key_lower not in presign_params_lower: if (key_lower not in (header[0].lower() for header in headers) and key_lower not in params_header_override): if key_lower in ['versionid', 'uploadid', 'partnumber']: query_string[key] = query_params[key][0] else: sign_headers[key] = query_params[key][0] for header_name, header_value in headers.items(): header_name_lower = header_name.lower() if header_name_lower.startswith('x-amz-') or header_name_lower.startswith('content-'): if is_v2 and header_name_lower in query_params: sign_headers[header_name] = header_value if is_v4 and header_name_lower in query_params['X-Amz-SignedHeaders'][0]: sign_headers[header_name] = header_value # Preparnig dictionary of request to build AWSRequest's object of the botocore request_url = '{}://{}{}'.format(parsed.scheme, parsed.netloc, urlparse.quote(parsed.path)) request_url = \ ('%s?%s' % (request_url, urlencode(query_string)) if query_string else request_url) if forwarded_for: request_url = re.sub('://[^/]+', '://%s' % forwarded_for, request_url) bucket_name = extract_bucket_name(headers, parsed.path) request_dict = { 'url_path': urlparse.quote(parsed.path), 'query_string': query_string, 'method': method, 'headers': sign_headers, 'body': b'', 'url': request_url, 'context': { 'is_presign_request': True, 'use_global_endpoint': True, 'signing': { 'bucket': bucket_name } } } # Support for virtual host addressing style in signature version 2 # We don't need to do this in v4 as we already concerting it to the virtual addressing style. # v2 require path base styled request_dict and v4 require virtual styled request_dict if uses_host_addressing(headers) and is_v2: request_dict['url_path'] = '/{}{}'.format(bucket_name, request_dict['url_path']) parsed_url = urlparse.urlparse(request_url) request_dict['url'] = '{}://{}:{}{}'.format( parsed_url.scheme, S3_VIRTUAL_HOSTNAME, config.EDGE_PORT, request_dict['url_path']) request_dict['url'] = \ ('%s?%s' % (request_dict['url'], urlencode(query_string)) if query_string else request_dict['url']) if not is_v2 and any([p in query_params for p in SIGNATURE_V2_PARAMS]): response = requests_error_response_xml_signature_calculation( code=403, message='Query-string authentication requires the Signature, Expires and AWSAccessKeyId parameters', code_string='AccessDenied' ) elif is_v2 and not is_v4: response = authenticate_presign_url_signv2(method, path, headers, data, url, query_params, request_dict) if not is_v4 and any([p in query_params for p in SIGNATURE_V4_PARAMS]): response = requests_error_response_xml_signature_calculation( code=403, message='Query-string authentication requires the X-Amz-Algorithm, \ X-Amz-Credential, X-Amz-Date, X-Amz-Expires, \ X-Amz-SignedHeaders and X-Amz-Signature parameters.', code_string='AccessDenied' ) elif is_v4 and not is_v2: response = authenticate_presign_url_signv4(method, path, headers, data, url, query_params, request_dict) if response is not None: LOGGER.error('Presign signature calculation failed: %s' % response) return response LOGGER.debug('Valid presign url.')
def test_invoke_dynamodbstreams(self): edge_url = config.get_edge_url() self._invoke_dynamodbstreams_via_edge(edge_url)