def test_get_container_instances(fetcher, ecs, ec2): cluster_name = "test_ecs_cluster" response = ecs.client.create_cluster(clusterName=cluster_name) cluster_arn = response["cluster"]["clusterArn"] test_instances = ec2.resource.create_instances(ImageId="ami-bb9a6bc2", MinCount=3, MaxCount=3) instance_id_document = json.dumps( ec2_utils.generate_instance_identity_document(test_instances[0])) response = ecs.client.register_container_instance( cluster=cluster_name, instanceIdentityDocument=instance_id_document) toolbox.pstruct(response, "register_container_instance") container_instances = fetcher.get_container_instances(cluster_arn) assert len(container_instances) == 1 assert (response["containerInstance"]["containerInstanceArn"] in container_instances.keys()) container_instances = fetcher.get_container_instances( cluster_arn, arns=fetcher.get_container_instance_arns(cluster_arn)) assert len(container_instances) == 1 assert (response["containerInstance"]["containerInstanceArn"] in container_instances.keys())
def get_arns(self, method: str, key: str, **aws_api_parameters) -> list: """Gets the ARNs with a given method and key. Args: method: AWS API method to use. key: Key to extract from the response(s). Returns: list: List of ARNs. """ arns = [] total_start_time = default_timer() start_time = total_start_time for page in self.ecs.get_paginator(method).paginate(**aws_api_parameters): DURATION.labels(method).observe(max(default_timer() - start_time, 0)) arns += page.get(key, []) if self.should_throttle: time.sleep(self.throttle_interval_seconds) start_time = default_timer() if s.DEBUG: logger.bind(**aws_api_parameters).debug("{} {}.", method, key) if s.PRINT_STRUCTS: toolbox.pstruct(arns, f"{key} {method}") return arns
def uncached_fetch(arns: list) -> dict: logger.bind(arns=arns).debug( "Fetch container instances from AWS with describe_container_instances." ) if s.DEBUG else None lst = [] arns_chunks = toolbox.chunk_list(arns, 100) for arns_chunk in arns_chunks: start_time = default_timer() lst += self.ecs.describe_container_instances( cluster=cluster_arn, containerInstances=arns_chunk )["containerInstances"] DURATION.labels("describe_container_instances").observe( max(default_timer() - start_time, 0) ) if self.should_throttle: time.sleep(self.throttle_interval_seconds) dct = toolbox.list_to_dict(lst, "containerInstanceArn") if s.PRINT_STRUCTS: toolbox.pstruct(dct, "describe_container_instances") return dct
def uncached_fetch(instance_ids: list) -> dict: logger.bind(instance_ids=instance_ids).debug( "Fetch EC2 instances from AWS with describe_instances." ) if s.DEBUG else None instances_list = [] ids_chunks = toolbox.chunk_list(instance_ids, 100) for ids_chunk in ids_chunks: start_time = default_timer() response = self.ec2.describe_instances(InstanceIds=ids_chunk) for reservation in response["Reservations"]: instances_list += reservation["Instances"] DURATION.labels("describe_instances").observe( max(default_timer() - start_time, 0) ) if self.should_throttle: time.sleep(self.throttle_interval_seconds) dct = toolbox.list_to_dict(instances_list, "InstanceId") if s.PRINT_STRUCTS: toolbox.pstruct(dct, "ec2.describe_instances") return dct
def uncached_fetch(task_arns: list) -> dict: logger.bind(cluster_arn=cluster_arn, task_arns=task_arns).debug( "Fetch tasks from AWS with describe_tasks." ) if s.DEBUG else None tasks = [] chunked_task_arns = toolbox.chunk_list(task_arns, 100) for task_arns_chunk in chunked_task_arns: start_time = default_timer() _t = self.ecs.describe_tasks(cluster=cluster_arn, tasks=task_arns_chunk)["tasks"] tasks += list( filter(lambda x: x.get("lastStatus", None) == "RUNNING", _t)) DURATION.labels("describe_tasks").observe( max(default_timer() - start_time, 0)) if self.should_throttle: time.sleep(self.throttle_interval_seconds) if s.PRINT_STRUCTS: toolbox.pstruct(tasks, "describe_tasks") return toolbox.list_to_dict(tasks, "taskArn")
def test_get_task_arns_with_run_task(fetcher, ecs, ec2): cluster_name = "test_ecs_cluster" response = ecs.client.create_cluster(clusterName=cluster_name) cluster_arn = response["cluster"]["clusterArn"] test_instances = ec2.resource.create_instances( ImageId="ami-bb9a6bc2", MinCount=3, MaxCount=3 ) for test_instance in test_instances: instance_id_document = json.dumps( ec2_utils.generate_instance_identity_document(test_instance) ) _ = ecs.client.register_container_instance( cluster=cluster_name, instanceIdentityDocument=instance_id_document ) _ = ecs.client.register_task_definition( family="test_ecs_task", containerDefinitions=[ { "name": "hello_world", "image": "docker/hello-world:latest", "cpu": 1024, "memory": 400, "essential": True, "logConfiguration": {"logDriver": "json-file"}, } ], ) response_ec2 = ecs.client.run_task( cluster=cluster_name, overrides={}, taskDefinition="test_ecs_task", count=2, launchType="EC2", startedBy="moto", ) toolbox.pstruct(response, "run_task ec2") _ = ecs.client.run_task( cluster=cluster_name, overrides={}, taskDefinition="test_ecs_task", count=1, launchType="EC2", startedBy="moto", ) toolbox.pstruct(response, "run_task fargate") assert len(response_ec2["tasks"]) == 2 # Currently moto does not support launch type. assert "EC2" != response_ec2["tasks"][0].get("launchType", None) assert "EC2" != response_ec2["tasks"][1].get("launchType", None) task_arns = fetcher.get_task_arns(cluster_arn=cluster_arn) assert len(task_arns) == 3
def test_get_ec2_instances(fetcher, ecs, ec2): test_instances = ec2.resource.create_instances(ImageId="ami-bb9a6bc2", MinCount=3, MaxCount=3) toolbox.pstruct(test_instances, "c2.resource.create_instances") ids = [instance.id for instance in test_instances] ec2_instances = fetcher.get_ec2_instances(ids) assert len(ec2_instances) == 3
def get_multiple( self, keys: List[str], fetch: Callable[[List[str]], dict], ) -> dict: """Get entries from cache and update if missing. Important: Don't forget the `flush()` method. Without using it the cache will never remove old data and eat up more and more memory. Args: keys (List[str]): Keys to retrieve from cache. fetch (Callable[[List[str]], List[str]]): Function that fetches missing key values. Returns: dict: Dictionary where the keys match given keys. """ self.last_hits = 0 self.last_misses = 0 missing = [] result = {} for key in keys: if key in self.current: result[key] = self.current[key] self.total_hits += 1 self.last_hits += 1 else: missing.append(key) self.total_misses += 1 self.last_misses += 1 if s.DEBUG: logger.bind(cache=self.name, missing_keys=missing).debug( "{} hits. {} misses.", self.last_hits, self.last_misses, ) fetched = fetch(missing) if missing else {} result.update(fetched) self.current.update(fetched) self.next.update(result) toolbox.pstruct(result) if s.PRINT_STRUCTS else None return result
def test_marshalling_targets_labels(): targets = [ discovery.Target( ip="ip", port="port", p_instance="ip:port", task_name="task_name", metrics_path=None, cluster_name="cluster_name", task_version="task_version", task_id="task_id", container_id="container_id", instance_id="instance_id", custom_labels={ "custom_label1": "value", "custom_label2": "value", }, # noqa ) ] result = marshalling.marshall_targets(targets) expected = { s.FILENAME_GENERIC: [ { "targets": ["ip:port"], "labels": { "instance": "ip:port", "job": "task_name", "metrics_path": "/metrics", "metrics_path": "/metrics", "cluster": "cluster_name", "task_version": "task_version", "task_id": "task_id", "container_id": "container_id", "instance_id": "instance_id", "custom_label1": "value", "custom_label2": "value", }, }, ], s.FILENAME_15S: [], s.FILENAME_30S: [], s.FILENAME_1M: [], s.FILENAME_5M: [], } toolbox.pstruct(expected, "expected") assert result == expected
def uncached_fetch(arn: str) -> dict: logger.bind(arn=arn).debug( "Fetch task definition from AWS with describe_task_definition." ) if s.DEBUG else None start_time = default_timer() task_definition = self.ecs.describe_task_definition( taskDefinition=arn)["taskDefinition"] DURATION.labels("describe_task_definition").observe( max(default_timer() - start_time, 0)) if s.PRINT_STRUCTS: toolbox.pstruct(task_definition, "fetched task definition") if self.should_throttle: time.sleep(self.throttle_interval_seconds) return task_definition
def uncached_fetch(arns: list) -> dict: logger.bind(arns=arns).debug( "Fetch task definitions from AWS with describe_task_definition." ) if s.DEBUG else None descriptions = {} for arn in arns: start_time = default_timer() response = self.ecs.describe_task_definition(taskDefinition=arn) DURATION.labels("describe_task_definition").observe( max(default_timer() - start_time, 0) ) response_arn = response["taskDefinition"]["taskDefinitionArn"] descriptions[response_arn] = response["taskDefinition"] if self.should_throttle: time.sleep(self.throttle_interval_seconds) if s.PRINT_STRUCTS: toolbox.pstruct(descriptions, "fetched task definitions") return descriptions
def get_single( self, key: str, fetch: Callable[[str], dict], ) -> dict: """Get entry from cache and update if missing. Important: Don't forget the `flush()` method. Without using it the cache will never remove old data and eat up more and more memory. Args: key (str): Key to retrieve from cache. fetch (Callable[[str], dict]): Function that fetches and returns missing. Returns: dict: Dictionary representing key value. """ self.last_hits = 0 self.last_misses = 0 result = {} if key in self.current: result = self.current[key] self.total_hits += 1 self.last_hits = 1 logger.bind(cache=self.name, found_key=key).debug("Hit.") else: self.total_misses += 1 self.last_misses = 1 logger.bind(cache=self.name, missing_key=key).debug("Miss.") result = fetch(key) if result: self.current[key] = result self.next[key] = result toolbox.pstruct(result) if s.PRINT_STRUCTS else None return result
def test_get_task_definition_arns(fetcher, ecs, ec2): cluster_name = "test_ecs_cluster" _ = ecs.client.create_cluster(clusterName=cluster_name) test_instances = ec2.resource.create_instances(ImageId="ami-bb9a6bc2", MinCount=3, MaxCount=3) for test_instance in test_instances: instance_id_document = json.dumps( ec2_utils.generate_instance_identity_document(test_instance)) _ = ecs.client.register_container_instance( cluster=cluster_name, instanceIdentityDocument=instance_id_document) response = ecs.client.register_task_definition( family="test_ecs_task", containerDefinitions=[{ "name": "hello_world", "image": "docker/hello-world:latest", "cpu": 1024, "memory": 400, "essential": True, "logConfiguration": { "logDriver": "json-file" }, }], ) toolbox.pstruct(response["taskDefinition"], "register_task_definition") task_definition_arns = fetcher.get_task_definition_arns() assert len(task_definition_arns) == 1 assert task_definition_arns[0] == response["taskDefinition"][ "taskDefinitionArn"]
def test_discovery_full(): os.environ["AWS_DEFAULT_REGION"] = "eu-central-1" os.environ["AWS_ACCESS_KEY_ID"] = "testing" os.environ["AWS_SECRET_ACCESS_KEY"] = "testing" os.environ["AWS_SECURITY_TOKEN"] = "testing" os.environ["AWS_SESSION_TOKEN"] = "testing" ecs_client = boto3.client("ecs") ec2_client = boto3.client("ec2") ecs_stubber = Stubber(ecs_client) ec2_stubber = Stubber(ec2_client) # ---------------------------------------------------------------------- # Preload stubbers with everything necessary. ecs_stubber.add_response("list_clusters", data.list_clusters_response, {}) ecs_stubber.add_response( "list_tasks", data.list_tasks_response, data.list_tasks_parameters ) ecs_stubber.add_response( "list_container_instances", data.list_container_instances_response, data.list_container_instances_parameters, ) ecs_stubber.activate() ec2_stubber.activate() # -------------------------------------------------------------------------- fetcher = fetching.CachedFetcher(ecs_client, ec2_client) discoverer = discovery.PrometheusEcsDiscoverer(fetcher) # Inject data into caches. fetcher.task_cache.current = toolbox.list_to_dict( data.describe_tasks_response["tasks"], "taskArn" ) fetcher.task_definition_cache.current = {} for task_definition in data.describe_task_definition_responses: fetcher.task_definition_cache.current[ task_definition["taskDefinition"]["taskDefinitionArn"] ] = task_definition["taskDefinition"] fetcher.container_instance_cache.current = toolbox.list_to_dict( data.describe_container_instances_response["containerInstances"], "containerInstanceArn", ) fetcher.ec2_instance_cache.current = {} for reservation in data.describe_instances_response["Reservations"]: for instance in reservation["Instances"]: fetcher.ec2_instance_cache.current[instance["InstanceId"]] = instance targets = discoverer.discover() assert len(targets) == 6 target = targets[0] assert target.ip == "10.0.2.85" assert target.port == "32794" assert target.p_instance == "10.0.2.85:32794" assert target.task_name == "webapp-test-3" assert target.metrics_path == "/app-3/metrics" assert target.cluster_name == "cluster-name" assert target.task_version == "2" assert target.task_id == "550b823c-288a-4f31-b3e1-69f9ea15060d" assert target.container_id == "213d21d5-4718-4b7f-9f7d-a1b3a8e57ad8" assert target.instance_id == "i-0b967c2479dd4f5af" assert target.custom_labels == {} toolbox.pstruct(targets, "targets")
def test_extract_path_interval_pairs_1(): inp = "30s:/mymetrics1,/mymetrics2" outp = marshalling.extract_path_interval_pairs(inp) toolbox.pstruct(outp, inp) assert outp == {"/mymetrics1": "30s", "/mymetrics2": None}
def test_marshalling_targets_nolabels(): targets = [ discovery.Target( ip="ip", port="port", p_instance="task_name", task_name="task_name", metrics_path=None, ), discovery.Target( ip="ip", port="port", p_instance="task_name", task_name="task_name", metrics_path="/mymetrics2", ), discovery.Target( ip="ip", port="port", p_instance="task_name", task_name="task_name", metrics_path="15s:/mymetrics,/metrics", ), discovery.Target( ip="ip", port="port", p_instance="task_name", task_name="task_name", metrics_path="30s:/mymetrics", ), discovery.Target( ip="ip", port="port", p_instance="task_name", task_name="task_name", metrics_path="1m:/mymetrics", ), discovery.Target( ip="ip", port="port", p_instance="task_name", task_name="task_name", metrics_path="5m:/mymetrics", ), ] result = marshalling.marshall_targets(targets) expected = { s.FILENAME_GENERIC: [ { "targets": ["ip:port"], "labels": { "instance": "task_name", "job": "task_name", "metrics_path": "/metrics", }, }, { "targets": ["ip:port"], "labels": { "instance": "task_name", "job": "task_name", "metrics_path": "/mymetrics2", }, }, { "targets": ["ip:port"], "labels": { "instance": "task_name", "job": "task_name", "metrics_path": "/metrics", }, }, ], s.FILENAME_15S: [ { "targets": ["ip:port"], "labels": { "instance": "task_name", "job": "task_name", "metrics_path": "/mymetrics", }, }, ], s.FILENAME_30S: [ { "targets": ["ip:port"], "labels": { "instance": "task_name", "job": "task_name", "metrics_path": "/mymetrics", }, }, ], s.FILENAME_1M: [ { "targets": ["ip:port"], "labels": { "instance": "task_name", "job": "task_name", "metrics_path": "/mymetrics", }, }, ], s.FILENAME_5M: [ { "targets": ["ip:port"], "labels": { "instance": "task_name", "job": "task_name", "metrics_path": "/mymetrics", }, }, ], } toolbox.pstruct(expected, "expected") assert result == expected
def test_extract_path_interval_pairs_3(): inp = None outp = marshalling.extract_path_interval_pairs(inp) toolbox.pstruct(outp, inp) assert outp == {"/metrics": None}
def test_print_structure(): lst = [{"key1": "hallo", "key2": "my"}, {"key1": "old", "key2": "friend"}] toolbox.pstruct(lst) assert True