def test_docker_down(self, *args): DockerUtil().set_docker_settings({}, {}) DockerUtil().last_init_retry = None DockerUtil().left_init_retries = 10 DockerUtil()._client = None self.run_check(MOCK_CONFIG, force_reload=True) self.assertServiceCheck("docker.service_up", status=AgentCheck.CRITICAL, tags=None, count=1)
def test_healthcheck(self): config = { "init_config": {}, "instances": [{ "url": "unix://var/run/docker.sock", "health_service_check_whitelist": ["docker_image:nginx", "docker_image:redis"], }, ], } DockerUtil().set_docker_settings(config['init_config'], config['instances'][0]) DockerUtil().filtering_enabled = False self.run_check(config, force_reload=True) self.assertServiceCheck('docker.container_health', count=2) config = { "init_config": {}, "instances": [{ "url": "unix://var/run/docker.sock", "health_service_check_whitelist": [], }, ], } DockerUtil._drop() DockerUtil(init_config=config['init_config'], instance=config['instances'][0]) self.run_check(config, force_reload=True) self.assertServiceCheck('docker.container_health', count=0)
def test_docker_host_metadata_invalid_response(self): mock_version = mock.MagicMock(name='version', return_value=None) du = DockerUtil() du._client = mock.MagicMock() du._client.version = mock_version du.swarm_node_state = 'inactive' self.assertEqual({'docker_swarm': 'inactive'}, DockerUtil().get_host_metadata()) mock_version.assert_called_once()
def test_docker_host_metadata_swarm_ok(self): du = DockerUtil() mock_version = mock.MagicMock(name='version', return_value={'Version': '1.13.1'}) mock_isswarm = mock.MagicMock(name='is_swarm', return_value=True) du._client = mock.MagicMock() du._client.version = mock_version du.is_swarm = mock_isswarm self.assertEqual({'docker_version': '1.13.1', 'docker_swarm': 'active'}, DockerUtil().get_host_metadata()) mock_version.assert_called_once()
def test_image_name_from_image_repotags(self, mock_init, mock_image): mock_image.return_value = {'RepoTags': ["redis:3.2"], 'RepoDigests': []} mock_init.return_value = None sha = 'sha256:e48e77eee11b6d9ac9fc35a23992b4158355a8ec3fd3725526eba3f467e4b6c9' co = {'Image': sha} self.assertEqual('redis:3.2', DockerUtil().image_name_extractor(co)) mock_image.assert_called_once_with(sha) # Make sure cache is used insead of call again inspect_image DockerUtil().image_name_extractor(co) mock_image.assert_called_once()
def test_image_name_from_image_repotags(self): du = DockerUtil() du._client = mock.MagicMock() mock_img = mock.MagicMock(name='inspect_image', return_value = {'RepoTags': ["redis:3.2"], 'RepoDigests': []}) du._client.inspect_image = mock_img sha = 'sha256:e48e77eee11b6d9ac9fc35a23992b4158355a8ec3fd3725526eba3f467e4b6c9' co = {'Image': sha} self.assertEqual('redis:3.2', DockerUtil().image_name_extractor(co)) mock_img.assert_called_once_with(sha) # Make sure cache is used insead of call again inspect_image DockerUtil().image_name_extractor(co) mock_img.assert_called_once()
def test_event_attributes_tag(self): config = { "init_config": {}, "instances": [{ "url": "unix://var/run/docker.sock", "event_attributes_as_tags": ["exitCode", "name"], }, ], } DockerUtil().set_docker_settings(config['init_config'], config['instances'][0]) DockerUtil().last_init_retry = None DockerUtil().left_init_retries = 10 DockerUtil()._client = None container_fail = DockerUtil().client.create_container( "nginx:latest", detach=True, name='event-tags-test', entrypoint='/bin/false') log.debug('start nginx:latest with entrypoint /bin/false') DockerUtil().client.start(container_fail) log.debug('container exited with %s' % DockerUtil().client.wait(container_fail, 1)) # Wait 1 second after exit so the event will be picked up from time import sleep sleep(1) self.run_check(config, force_reload=True) DockerUtil().client.remove_container(container_fail) # Previous tests might have left unprocessed events, to be ignored filtered_events = [] for event in self.events: if 'container_name:event-tags-test' in event.get('tags', []): filtered_events.append(event) self.assertEqual(len(filtered_events), 1) self.assertIn("exitCode:1", filtered_events[0]["tags"]) self.assertNotIn("name:test-exit-fail", filtered_events[0]["tags"])
def test_basic_config_single(self): expected_metrics = [ ('docker.containers.running', ['docker_image:nginx:latest', 'image_name:nginx', 'image_tag:latest']), ('docker.containers.running', ['docker_image:redis:latest', 'image_name:redis', 'image_tag:latest']), ('docker.containers.stopped', ['docker_image:redis:latest', 'image_name:redis', 'image_tag:latest']), ('docker.containers.stopped', ['docker_image:nginx:latest', 'image_name:nginx', 'image_tag:latest']), ('docker.image.size', ['image_name:nginx', 'image_tag:latest']), ('docker.image.size', ['image_name:redis', 'image_tag:latest']), ('docker.image.virtual_size', ['image_name:nginx', 'image_tag:latest']), ('docker.image.virtual_size', ['image_name:redis', 'image_tag:latest']), ('docker.images.available', None), ('docker.images.intermediate', None), ('docker.mem.cache', ['container_name:test-new-nginx-latest', 'docker_image:nginx:latest', 'image_name:nginx', 'image_tag:latest']), ('docker.mem.cache', ['container_name:test-new-redis-latest', 'docker_image:redis:latest', 'image_name:redis', 'image_tag:latest']), ('docker.mem.rss', ['container_name:test-new-nginx-latest', 'docker_image:nginx:latest', 'image_name:nginx', 'image_tag:latest']), ('docker.mem.rss', ['container_name:test-new-redis-latest', 'docker_image:redis:latest', 'image_name:redis', 'image_tag:latest']) ] config = { "init_config": {}, "instances": [{ "url": "unix://var/run/docker.sock", "collect_image_size": True, "collect_images_stats": True }, ], } DockerUtil().set_docker_settings(config['init_config'], config['instances'][0]) self.run_check(config, force_reload=True) for mname, tags in expected_metrics: self.assertMetric(mname, tags=tags, count=1, at_least=1)
def test_collect_labels_as_tags(self): expected_metrics = [ ('docker.containers.stopped.total', None), ('docker.containers.running.total', None), ('docker.containers.running', ['docker_image:redis:latest', 'image_name:redis', 'image_tag:latest', 'short_image:redis:latest']), ('docker.containers.running', ['docker_image:nginx:latest', 'image_name:nginx', 'image_tag:latest', 'short_image:nginx:latest', 'label1:nginx']), ('docker.mem.rss', ['container_name:test-new-nginx-latest', 'docker_image:nginx:latest', 'image_name:nginx', 'image_tag:latest', 'label1:nginx']), ('docker.containers.stopped', ['docker_image:redis:latest', 'image_name:redis', 'image_tag:latest', 'short_image:redis:latest']), ('docker.containers.stopped', ['docker_image:nginx:latest', 'image_name:nginx', 'image_tag:latest', 'short_image:nginx:latest', 'label1:nginx']), ('docker.mem.rss', ['container_name:test-new-redis-latest', 'docker_image:redis:latest', 'image_name:redis', 'image_tag:latest']), ('docker.mem.limit', ['container_name:test-new-nginx-latest', 'docker_image:nginx:latest', 'image_name:nginx', 'image_tag:latest', 'label1:nginx']), ('docker.mem.cache', ['container_name:test-new-nginx-latest', 'docker_image:nginx:latest', 'image_name:nginx', 'image_tag:latest', 'label1:nginx']), ('docker.mem.cache', ['container_name:test-new-redis-latest', 'docker_image:redis:latest', 'image_name:redis', 'image_tag:latest']), ('docker.mem.in_use', ['container_name:test-new-nginx-latest', 'docker_image:nginx:latest', 'image_name:nginx', 'image_tag:latest', 'label1:nginx']), ] config = { "init_config": {}, "instances": [{ "url": "unix://var/run/docker.sock", }, ], } DockerUtil._drop() DockerUtil(init_config=config['init_config'], instance=config['instances'][0]) self.agentConfig = { 'docker_labels_as_tags': 'label1' } self.check = load_check('docker_daemon', config, self.agentConfig) self.run_check(config) for mname, tags in expected_metrics: self.assertMetric(mname, tags=tags, count=1, at_least=1)
def test_network_tagging(self): expected_metrics = [ ('docker.net.bytes_rcvd', ['container_name:test-new-nginx-latest', 'docker_image:nginx:latest', 'image_name:nginx', 'image_tag:latest', 'docker_network:bridge']), ('docker.net.bytes_rcvd', ['container_name:test-new-nginx-latest', 'docker_image:nginx:latest', 'image_name:nginx', 'image_tag:latest', 'docker_network:second']), ('docker.net.bytes_sent', ['container_name:test-new-nginx-latest', 'docker_image:nginx:latest', 'image_name:nginx', 'image_tag:latest', 'docker_network:bridge']), ('docker.net.bytes_sent', ['container_name:test-new-nginx-latest', 'docker_image:nginx:latest', 'image_name:nginx', 'image_tag:latest', 'docker_network:second']) ] custom_tags = ["extra_tag", "env:testing"] config = { "init_config": {}, "instances": [{ "url": "unix://var/run/docker.sock", "tags": custom_tags, "collect_image_size": True, "collect_images_stats": True, }, ], } DockerUtil().set_docker_settings(config['init_config'], config['instances'][0]) self.run_check_twice(config, force_reload=True) for mname, tags in expected_metrics: expected_tags = list(custom_tags) if tags is not None: expected_tags += tags self.assertMetric(mname, tags=expected_tags, count=1, at_least=1)
def __init__(self, instance=None): self.docker_util = DockerUtil() if instance is None: try: config_file_path = get_conf_path(KUBERNETES_CHECK_NAME) check_config = check_yaml(config_file_path) instance = check_config['instances'][0] # kubernetes.yaml was not found except IOError as ex: log.error(ex.message) instance = {} except Exception: log.error('Kubernetes configuration file is invalid. ' 'Trying connecting to kubelet with default settings anyway...') instance = {} self.method = instance.get('method', KubeUtil.DEFAULT_METHOD) self.host = instance.get("host") or self.docker_util.get_hostname() self._node_ip = self._node_name = None # lazy evaluation self.host_name = os.environ.get('HOSTNAME') self.cadvisor_port = instance.get('port', KubeUtil.DEFAULT_CADVISOR_PORT) self.kubelet_port = instance.get('kubelet_port', KubeUtil.DEFAULT_KUBELET_PORT) self.kubelet_api_url = '%s://%s:%d' % (self.method, self.host, self.kubelet_port) self.cadvisor_url = '%s://%s:%d' % (self.method, self.host, self.cadvisor_port) self.kubernetes_api_url = 'https://%s/api/v1' % (os.environ.get('KUBERNETES_SERVICE_HOST') or self.DEFAULT_MASTER_NAME) self.metrics_url = urljoin(self.cadvisor_url, KubeUtil.METRICS_PATH) self.pods_list_url = urljoin(self.kubelet_api_url, KubeUtil.PODS_LIST_PATH) self.kube_health_url = urljoin(self.kubelet_api_url, 'healthz') # keep track of the latest k8s event we collected and posted # default value is 0 but TTL for k8s events is one hour anyways self.last_event_collection_ts = defaultdict(int)
def __init__(self, agentConfig): try: self.config_store = get_config_store(agentConfig=agentConfig) except Exception as e: log.error('Failed to instantiate the config store client. ' 'Auto-config only will be used. %s' % str(e)) agentConfig['sd_config_backend'] = None self.config_store = get_config_store(agentConfig=agentConfig) self.dockerutil = DockerUtil(config_store=self.config_store) self.kubeutil = None if Platform.is_k8s(): try: self.kubeutil = KubeUtil() except Exception as ex: log.error( "Couldn't instantiate the kubernetes client, " "subsequent kubernetes calls will fail as well. Error: %s" % str(ex)) self.metadata_collector = MetadataCollector() self.VAR_MAPPING = { 'host': self._get_host_address, 'pid': self._get_container_pid, 'port': self._get_port, 'container-name': self._get_container_name, 'tags': self._get_additional_tags, } AbstractSDBackend.__init__(self, agentConfig)
def test_parse_subsystem(self): lines = [ # (line, expected_result) ( # Kubernetes < 1.6 ['10', 'memory', '/2ea504688cad325b9105f183b0d7831266a05f95b513c7327a6e9989ce8a450a'], '2ea504688cad325b9105f183b0d7831266a05f95b513c7327a6e9989ce8a450a' ), ( # New CoreOS / most systems ['10', 'memory', '/docker/2ea504688cad325b9105f183b0d7831266a05f95b513c7327a6e9989ce8a450a'], 'docker/2ea504688cad325b9105f183b0d7831266a05f95b513c7327a6e9989ce8a450a' ), ( # Unidentified legacy system? ['10', 'memory', '2ea504688cad325b9105f183b0d7831266a05f95b513c7327a6e9989ce8a450a'], '2ea504688cad325b9105f183b0d7831266a05f95b513c7327a6e9989ce8a450a' ), ( # Rancher ['10', 'memory', '/docker/864daa0a0b19aa4703231b6c76f85c6f369b2452a5a7f777f0c9101c0fd5772a/docker/3bac629503293d1bb61e74f3e25b6c525f0c262f22974634c5d6988bb4b07927'], 'docker/3bac629503293d1bb61e74f3e25b6c525f0c262f22974634c5d6988bb4b07927' ), ( # Legacy CoreOS 7xx ['7', 'memory', '/system.slice/docker-71116698eb215f2a5819f11ece7ea721f0e8d45169c7484d1cd7812596fad454.scope'], 'system.slice/docker-71116698eb215f2a5819f11ece7ea721f0e8d45169c7484d1cd7812596fad454.scope' ), ( # Kubernetes >= 1.6 QoS cgroups ['7', 'memory', '/kubepods/burstable/poda0f63163-3fa8-11e7-a098-42010a840216/7e071d0086ebe623dcbf3a7e0005f23eb08d7ea4df4bb42075df43c9359ce078'], 'kubepods/burstable/poda0f63163-3fa8-11e7-a098-42010a840216/7e071d0086ebe623dcbf3a7e0005f23eb08d7ea4df4bb42075df43c9359ce078' ) ] du = DockerUtil() for line, exp_res in lines: self.assertEquals(du._parse_subsystem(line), exp_res)
def __init__(self, agentConfig): try: self.config_store = get_config_store(agentConfig=agentConfig) except Exception as e: log.error('Failed to instantiate the config store client. ' 'Auto-config only will be used. %s' % str(e)) agentConfig['sd_config_backend'] = None self.config_store = get_config_store(agentConfig=agentConfig) self.dockerutil = DockerUtil(config_store=self.config_store) self.docker_client = self.dockerutil.client if Platform.is_k8s(): try: self.kubeutil = KubeUtil() except Exception as ex: self.kubeutil = None log.error( "Couldn't instantiate the kubernetes client, " "subsequent kubernetes calls will fail as well. Error: %s" % str(ex)) if Platform.is_nomad(): self.nomadutil = NomadUtil() elif Platform.is_ecs_instance(): self.ecsutil = ECSUtil() self.VAR_MAPPING = { 'host': self._get_host_address, 'port': self._get_port, 'tags': self._get_additional_tags, } AbstractSDBackend.__init__(self, agentConfig)
def setUp(self): self.docker_client = DockerUtil().client self.second_network = self.docker_client.create_network("second", driver="bridge")['Id'] for c in CONTAINERS_TO_RUN: images = [i["RepoTags"][0] for i in self.docker_client.images(c.split(":")[0]) if i["RepoTags"] and i["RepoTags"][0].startswith(c)] if len(images) == 0: for line in self.docker_client.pull(c, stream=True): print line self.containers = [] for c in CONTAINERS_TO_RUN: name = "test-new-{0}".format(c.replace(":", "-")) host_config = None labels = None if c == "nginx:latest": host_config = {"Memory": 137438953472} labels = {"label1": "nginx", "foo": "bar"} cont = self.docker_client.create_container( c, detach=True, name=name, host_config=host_config, labels=labels) self.containers.append(cont) if c == "nginx:latest": self.docker_client.connect_container_to_network(cont['Id'], self.second_network) for c in self.containers: log.info("Starting container: {0}".format(c)) self.docker_client.start(c)
def agent_container_inspect(): # Self inspection based on cgroups # On all platforms, the container ID is the last part of the path. REGEX_PATTERN = '(.*/)+([a-z0-9]{64})$' dockerutil = DockerUtil() cgroup_path = '/proc/self/cgroup' container_id = None with open(cgroup_path, 'r') as f: for ind in f: id_match = re.search(REGEX_PATTERN, ind) if id_match: container_id = id_match.group(2) break if container_id is None: print( "The container_id could not be found. Refer to the docker log of the container running the agent" ) return 1 try: inspect = dockerutil.inspect_container(container_id) key_indices = [ i for i, k in enumerate(inspect['Config']['Env']) if 'API_KEY' in k ] for ind in key_indices: inspect['Config']['Env'][ind] = '%s=%s' % ( inspect['Config']['Env'][ind].split('=', 1)[0], 'redacted') print json.dumps(inspect, indent=4) return 0 except Exception as e: print "Could not inspect container: %s" % e
def __init__(self): self.docker_util = DockerUtil() try: config_file_path = get_conf_path(KUBERNETES_CHECK_NAME) check_config = check_yaml(config_file_path) instance = check_config['instances'][0] # kubernetes.yaml was not found except IOError as ex: log.error(ex.message) instance = {} except Exception: log.error( 'Kubernetes configuration file is invalid. ' 'Trying connecting to kubelet with default settings anyway...') instance = {} self.method = instance.get('method', KubeUtil.DEFAULT_METHOD) self.host = instance.get("host") or self.docker_util.get_hostname() self.cadvisor_port = instance.get('port', KubeUtil.DEFAULT_CADVISOR_PORT) self.kubelet_port = instance.get('kubelet_port', KubeUtil.DEFAULT_KUBELET_PORT) self.metrics_url = urljoin( '%s://%s:%d' % (self.method, self.host, self.cadvisor_port), KubeUtil.METRICS_PATH) self.pods_list_url = urljoin( '%s://%s:%d' % (self.method, self.host, self.kubelet_port), KubeUtil.PODS_LIST_PATH) self.kube_health_url = '%s://%s:%d/healthz' % (self.method, self.host, self.kubelet_port)
def init(self): try: instance = self.instances[0] self.docker_util = DockerUtil() self.docker_client = self.docker_util.client self.docker_gateway = DockerUtil.get_gateway() if Platform.is_k8s(): self.kubeutil = KubeUtil() # We configure the check with the right cgroup settings for this host # Just needs to be done once self._mountpoints = self.docker_util.get_mountpoints(CGROUP_METRICS) self.cgroup_listing_retries = 0 self._latest_size_query = 0 self._filtered_containers = set() self._disable_net_metrics = False # Set tagging options self.custom_tags = instance.get("tags", []) self.collect_labels_as_tags = instance.get("collect_labels_as_tags", []) self.kube_labels = {} self.use_histogram = _is_affirmative(instance.get('use_histogram', False)) performance_tags = instance.get("performance_tags", DEFAULT_PERFORMANCE_TAGS) self.tag_names = { CONTAINER: instance.get("container_tags", DEFAULT_CONTAINER_TAGS), PERFORMANCE: performance_tags, IMAGE: instance.get('image_tags', DEFAULT_IMAGE_TAGS) } # Set filtering settings if not instance.get("exclude"): self._filtering_enabled = False if instance.get("include"): self.log.warning("You must specify an exclude section to enable filtering") else: self._filtering_enabled = True include = instance.get("include", []) exclude = instance.get("exclude", []) self._exclude_patterns, self._include_patterns, _filtered_tag_names = get_filters(include, exclude) self.tag_names[FILTERED] = _filtered_tag_names # Other options self.collect_image_stats = _is_affirmative(instance.get('collect_images_stats', False)) self.collect_container_size = _is_affirmative(instance.get('collect_container_size', False)) self.collect_events = _is_affirmative(instance.get('collect_events', True)) self.collect_image_size = _is_affirmative(instance.get('collect_image_size', False)) self.collect_disk_stats = _is_affirmative(instance.get('collect_disk_stats', False)) self.collect_ecs_tags = _is_affirmative(instance.get('ecs_tags', True)) and Platform.is_ecs_instance() self.ecs_tags = {} except Exception as e: self.log.critical(e) self.warning("Initialization failed. Will retry at next iteration") else: self.init_success = True
def test_docker_host_metadata_ok(self): mock_version = mock.MagicMock(name='version', return_value={'Version': '1.13.1'}) du = DockerUtil() du._client = mock.MagicMock() du._client.version = mock_version du.swarm_node_state = 'inactive' self.assertEqual({'docker_version': '1.13.1', 'docker_swarm': 'inactive'}, du.get_host_metadata()) mock_version.assert_called_once()
def test_image_name_from_image_repodigests(self): du = DockerUtil() du._client = mock.MagicMock() du._client.inspect_image = mock.MagicMock(name='inspect_image', return_value = {'RepoTags': [], 'RepoDigests': ['alpine@sha256:4f2d8bbad359e3e6f23c0498e009aaa3e2f31996cbea7269b78f92ee43647811']}) co = {'Image': 'sha256:e48e77eee11b6d9ac9fc35a23992b4158355a8ec3fd3725526eba3f467e4b6d9'} self.assertEqual('alpine', du.image_name_extractor(co))
def test_docker_host_tags_swarm_ok(self, mock_init, mock_version, mock_isswarm): mock_isswarm.return_value = True mock_version.return_value = {'Version': '1.13.1'} mock_init.return_value = None self.assertEqual(['docker_version:1.13.1', 'docker_swarm:active'], DockerUtil().get_host_tags()) mock_version.assert_called_once()
def is_detected(): try: if "Version" in DockerUtil().client.version(): return True else: return False except Exception: return False
def __init__(self, instance=None): self.docker_util = DockerUtil() if instance is None: try: config_file_path = get_conf_path(KUBERNETES_CHECK_NAME) check_config = check_yaml(config_file_path) instance = check_config['instances'][0] # kubernetes.yaml was not found except IOError as ex: log.error(ex.message) instance = {} except Exception: log.error( 'Kubernetes configuration file is invalid. ' 'Trying connecting to kubelet with default settings anyway...' ) instance = {} self.method = instance.get('method', KubeUtil.DEFAULT_METHOD) self._node_ip = self._node_name = None # lazy evaluation self.host_name = os.environ.get('HOSTNAME') self.tls_settings = self._init_tls_settings(instance) # apiserver self.kubernetes_api_url = 'https://%s/api/v1' % ( os.environ.get('KUBERNETES_SERVICE_HOST') or self.DEFAULT_MASTER_NAME) # kubelet try: self.kubelet_api_url = self._locate_kubelet(instance) if not self.kubelet_api_url: raise Exception( "Couldn't find a method to connect to kubelet.") except Exception as ex: log.error( "Kubernetes check exiting, cannot run without access to kubelet." ) raise ex self.kubelet_host = self.kubelet_api_url.split(':')[1].lstrip('/') self.pods_list_url = urljoin(self.kubelet_api_url, KubeUtil.PODS_LIST_PATH) self.kube_health_url = urljoin(self.kubelet_api_url, KubeUtil.KUBELET_HEALTH_PATH) # cadvisor self.cadvisor_port = instance.get('port', KubeUtil.DEFAULT_CADVISOR_PORT) self.cadvisor_url = '%s://%s:%d' % (self.method, self.kubelet_host, self.cadvisor_port) self.metrics_url = urljoin(self.cadvisor_url, KubeUtil.METRICS_PATH) self.machine_info_url = urljoin(self.cadvisor_url, KubeUtil.MACHINE_INFO_PATH) # keep track of the latest k8s event we collected and posted # default value is 0 but TTL for k8s events is one hour anyways self.last_event_collection_ts = 0
def test_get_config_id(self, mock_get_auto_confd_path): """Test get_config_id""" with mock.patch('utils.dockerutil.DockerUtil.client', return_value=None): for c_ins, _, _, _, expected_ident, _ in self.container_inspects: sd_backend = get_sd_backend(agentConfig=self.auto_conf_agentConfig) self.assertEqual( sd_backend.get_config_id(DockerUtil().image_name_extractor(c_ins), c_ins.get('Config', {}).get('Labels', {})), expected_ident) clear_singletons(self.auto_conf_agentConfig)
def test_include_filter(self): expected_metrics = [ ('docker.containers.running', ['docker_image:nginx:latest', 'image_name:nginx', 'image_tag:latest', 'short_image:nginx:latest']), ('docker.containers.running', ['docker_image:redis:latest', 'image_name:redis', 'image_tag:latest', 'short_image:redis:latest']), ('docker.containers.stopped', ['docker_image:nginx:latest', 'image_name:nginx', 'image_tag:latest', 'short_image:nginx:latest']), ('docker.containers.stopped', ['docker_image:redis:latest', 'image_name:redis', 'image_tag:latest', 'short_image:redis:latest']), ('docker.containers.running.total', None), ('docker.containers.stopped.total', None), ('docker.cpu.system', ['container_name:test-new-redis-latest', 'docker_image:redis:latest', 'image_name:redis', 'image_tag:latest']), ('docker.cpu.user', ['container_name:test-new-redis-latest', 'docker_image:redis:latest', 'image_name:redis', 'image_tag:latest']), ('docker.image.size', ['image_name:redis', 'image_tag:latest']), ('docker.image.size', ['image_name:nginx', 'image_tag:latest']), ('docker.image.virtual_size', ['image_name:nginx', 'image_tag:latest']), ('docker.image.virtual_size', ['image_name:redis', 'image_tag:latest']), ('docker.images.available', None), ('docker.images.intermediate', None), ('docker.io.read_bytes', ['container_name:test-new-redis-latest', 'docker_image:redis:latest', 'image_name:redis', 'image_tag:latest']), ('docker.io.write_bytes', ['container_name:test-new-redis-latest', 'docker_image:redis:latest', 'image_name:redis', 'image_tag:latest']), ('docker.mem.cache', ['container_name:test-new-redis-latest', 'docker_image:redis:latest', 'image_name:redis', 'image_tag:latest']), ('docker.mem.rss', ['container_name:test-new-redis-latest', 'docker_image:redis:latest', 'image_name:redis', 'image_tag:latest']), ('docker.net.bytes_rcvd', ['container_name:test-new-redis-latest', 'docker_image:redis:latest', 'image_name:redis', 'image_tag:latest', 'docker_network:bridge']), ('docker.net.bytes_sent', ['container_name:test-new-redis-latest', 'docker_image:redis:latest', 'image_name:redis', 'image_tag:latest', 'docker_network:bridge']) ] config = { "init_config": {}, "instances": [{ "url": "unix://var/run/docker.sock", "include": ["image_name:redis"], "exclude": [".*"], "collect_images_stats": True, "collect_image_size": True, }, ], } DockerUtil._drop() DockerUtil(init_config=config['init_config'], instance=config['instances'][0]) self.run_check_twice(config, force_reload=True) for mname, tags in expected_metrics: self.assertMetric(mname, tags=tags, count=1, at_least=1) perf_metrics = [ "docker.cpu.system", "docker.cpu.user", "docker.io.read_bytes", "docker.io.write_bytes", "docker.mem.cache", "docker.mem.rss", "docker.net.bytes_rcvd", "docker.net.bytes_sent" ] nginx_tags = ['container_name:test-new-nginx-latest', 'docker_image:nginx:latest', 'image_name:nginx', 'image_tag:latest'] for m in perf_metrics: self.assertMetric(mname, tags=nginx_tags, count=0)
def print_containers(): containers = DockerUtil().client.containers() print("\nContainers info:\n") print("Number of containers found: %s" % len(containers)) for co in containers: c_id = 'ID: %s' % co.get('Id')[:12] c_image = 'image: %s' % co.get('Image') c_name = 'name: %s' % DockerUtil.container_name_extractor(co)[0] print("\t- %s %s %s" % (c_id, c_image, c_name)) print('\n')
def __init__(self): # Whether your get___tags methods need the Config section inspect data self.needs_inspect_config = False # Whether your get___tags methods need the Labels section inspect data self.needs_inspect_labels = False self.log = logging.getLogger(__name__) self.docker_util = DockerUtil() # Tags cache as a dict {co_id: [tags]} self._container_tags_cache = {}
def test_collect_exit_code(self): config = { "init_config": {}, "instances": [{ "url": "unix://var/run/docker.sock", "collect_exit_codes": True }] } DockerUtil().set_docker_settings(config['init_config'], config['instances'][0]) expected_service_checks = [ (AgentCheck.OK, [ 'docker_image:nginx:latest', 'image_name:nginx', 'image_tag:latest', 'container_name:test-exit-ok' ]), (AgentCheck.CRITICAL, [ 'docker_image:nginx:latest', 'image_name:nginx', 'image_tag:latest', 'container_name:test-exit-fail' ]), ] container_ok = self.docker_client.create_container( "nginx:latest", detach=True, name='test-exit-ok', entrypoint='/bin/true') log.debug('start nginx:latest with entrypoint /bin/true') container_fail = self.docker_client.create_container( "nginx:latest", detach=True, name='test-exit-fail', entrypoint='/bin/false') log.debug('start nginx:latest with entrypoint /bin/false') self.docker_client.start(container_ok) self.docker_client.start(container_fail) log.debug('container exited with %s' % self.docker_client.wait(container_ok, 1)) log.debug('container exited with %s' % self.docker_client.wait(container_fail, 1)) # After the container exits, we need to wait a second so the event isn't too recent # when the check runs, otherwise the event is not picked up from time import sleep sleep(1) self.run_check(config) self.docker_client.remove_container(container_ok) self.docker_client.remove_container(container_fail) for status, tags in expected_service_checks: self.assertServiceCheck('docker.exit', status=status, tags=tags, count=1)
def test_image_tags_extraction(self): entities = [ # ({'Image': image_name}, [expected_image_name, expected_image_tag]) ({'Image': 'nginx:latest'}, [['nginx'], ['latest']]), ({'Image': 'localhost/nginx:latest'}, [['localhost/nginx'], ['latest']]), ({'Image': 'localhost:5000/nginx:latest'}, [['localhost:5000/nginx'], ['latest']]), ({'RepoTags': ['redis:latest']}, [['redis'], ['latest']]), ({'RepoTags': ['localhost/redis:latest']}, [['localhost/redis'], ['latest']]), ({'RepoTags': ['localhost:5000/redis:latest']}, [['localhost:5000/redis'], ['latest']]), ({'RepoTags': ['localhost:5000/redis:latest', 'localhost:5000/redis:v1.1']}, [['localhost:5000/redis'], ['latest', 'v1.1']]), ({'RepoTags': [], 'RepoDigests': [u'datadog/docker-dd-agent@sha256:47a59c2ea4f6d9555884aacc608b303f18bde113b1a3a6743844bfc364d73b44']}, [['datadog/docker-dd-agent'], None]), ] for entity in entities: self.assertEqual(sorted(DockerUtil().image_tag_extractor(entity[0], 0)), sorted(entity[1][0])) tags = DockerUtil().image_tag_extractor(entity[0], 1) if isinstance(entity[1][1], list): self.assertEqual(sorted(tags), sorted(entity[1][1])) else: self.assertEqual(tags, entity[1][1])
def setUp(self): super(TestProxy, self).setUp() self.docker_client = DockerUtil().client self.docker_client.pull(CONTAINER_TO_RUN) self.container = self.docker_client.create_container(CONTAINER_TO_RUN, detach=True, name=CONTAINER_NAME, ports=[PROXY_PORT], host_config=self.docker_client.create_host_config(port_bindings={3128: PROXY_PORT})) log.info("Starting container: {0}".format(CONTAINER_TO_RUN)) self.docker_client.start(CONTAINER_NAME) for line in self.docker_client.logs(CONTAINER_NAME, stdout=True, stream=True): if "Accepting HTTP Socket connections" in line: break # Wait for the container to properly start, otherwise we get 'Proxy CONNECT aborted'