Exemple #1
0
 def _get_events(self):
     """Get the list of events."""
     events, changed_container_ids = self.docker_util.get_events()
     if changed_container_ids and self._service_discovery:
         get_sd_backend(
             self.agentConfig).update_checks(changed_container_ids)
     return events
Exemple #2
0
 def _get_events(self):
     """Get the list of events."""
     events, changed_container_ids = self.docker_util.get_events()
     if not self._disable_net_metrics:
         self._invalidate_network_mapping_cache(events)
     if changed_container_ids and self._service_discovery:
         get_sd_backend(
             self.agentConfig).update_checks(changed_container_ids)
     return events
 def _get_events(self):
     """Get the list of events."""
     events, changed_container_ids = self.docker_util.get_events()
     if not self._disable_net_metrics:
         self._invalidate_network_mapping_cache(events)
     if changed_container_ids and self._service_discovery:
         get_sd_backend(self.agentConfig).update_checks(changed_container_ids)
     if changed_container_ids:
         self.metadata_collector.invalidate_cache(events)
         if Platform.is_nomad():
             self.nomadutil.invalidate_cache(events)
         elif Platform.is_ecs_instance():
             self.ecsutil.invalidate_cache(events)
     return events
 def _get_events(self):
     """Get the list of events."""
     events, changed_container_ids = self.docker_util.get_events()
     if not self._disable_net_metrics:
         self._invalidate_network_mapping_cache(events)
     if changed_container_ids and self._service_discovery:
         get_sd_backend(self.agentConfig).update_checks(changed_container_ids)
     if changed_container_ids:
         self.metadata_collector.invalidate_cache(events)
         if Platform.is_nomad():
             self.nomadutil.invalidate_cache(events)
         elif Platform.is_ecs_instance():
             self.ecsutil.invalidate_cache(events)
     return events
    def test_render_template(self):
        """Test _render_template"""
        valid_configs = [
            (({}, {'host': '%%host%%'}, {'host': 'foo'}),
             ({}, {'host': 'foo'})),
            (({}, {'host': '%%host%%', 'port': '%%port%%'}, {'host': 'foo', 'port': '1337'}),
             ({}, {'host': 'foo', 'port': '1337'})),
            (({'foo': '%%bar%%'}, {}, {'bar': 'w00t'}),
             ({'foo': 'w00t'}, {})),
            (({'foo': '%%bar%%'}, {'host': '%%host%%'}, {'bar': 'w00t', 'host': 'localhost'}),
             ({'foo': 'w00t'}, {'host': 'localhost'}))
        ]

        invalid_configs = [
            ({}, {'host': '%%host%%'}, {}),  # no value to use
            ({}, {'host': '%%host%%'}, {'port': 42}),  # the variable name doesn't match
            ({'foo': '%%bar%%'}, {'host': '%%host%%'}, {'host': 'foo'})  # not enough value/no matching var name
        ]

        with mock.patch('utils.dockerutil.DockerUtil.client', return_value=None):
            with mock.patch.object(EtcdStore, 'get_client', return_value=None):
                with mock.patch.object(ConsulStore, 'get_client', return_value=None):
                    for agentConfig in self.agentConfigs:
                        sd_backend = get_sd_backend(agentConfig=agentConfig)
                        for tpl, res in valid_configs:
                            init, instance, variables = tpl
                            config = sd_backend._render_template(init, instance, variables)
                            self.assertEquals(config, res)
                        for init, instance, variables in invalid_configs:
                            config = sd_backend._render_template(init, instance, variables)
                            self.assertEquals(config, None)
                            clear_singletons(agentConfig)
    def test_render_template(self):
        """Test _render_template"""
        valid_configs = [
            (({}, {'host': '%%host%%'}, {'host': 'foo'}),
             ({}, {'host': 'foo'})),
            (({}, {'host': '%%host%%', 'port': '%%port%%'}, {'host': 'foo', 'port': '1337'}),
             ({}, {'host': 'foo', 'port': '1337'})),
            (({'foo': '%%bar%%'}, {}, {'bar': 'w00t'}),
             ({'foo': 'w00t'}, {})),
            (({'foo': '%%bar%%'}, {'host': '%%host%%'}, {'bar': 'w00t', 'host': 'localhost'}),
             ({'foo': 'w00t'}, {'host': 'localhost'}))
        ]

        invalid_configs = [
            ({}, {'host': '%%host%%'}, {}),  # no value to use
            ({}, {'host': '%%host%%'}, {'port': 42}),  # the variable name doesn't match
            ({'foo': '%%bar%%'}, {'host': '%%host%%'}, {'host': 'foo'})  # not enough value/no matching var name
        ]

        with mock.patch('utils.dockerutil.DockerUtil.client', return_value=None):
            with mock.patch.object(EtcdStore, 'get_client', return_value=None):
                with mock.patch.object(ConsulStore, 'get_client', return_value=None):
                    for agentConfig in self.agentConfigs:
                        sd_backend = get_sd_backend(agentConfig=agentConfig)
                        for tpl, res in valid_configs:
                            init, instance, variables = tpl
                            config = sd_backend._render_template(init, instance, variables)
                            self.assertEquals(config, res)
                        for init, instance, variables in invalid_configs:
                            config = sd_backend._render_template(init, instance, variables)
                            self.assertEquals(config, None)
                            clear_singletons(agentConfig)
Exemple #7
0
    def test_get_host(self, mock_check_yaml, mock_get):
        kubernetes_config = {'instances': [{'kubelet_port': 1337}]}
        pod_list = {
            'items': [{
                'status': {
                    'podIP':
                    '127.0.0.1',
                    'containerStatuses': [{
                        'containerID':
                        'docker://389dc8a4361f3d6c866e9e9a7b6972b26a31c589c4e2f097375d55656a070bc9'
                    }]
                }
            }]
        }

        mock_check_yaml.return_value = kubernetes_config
        mock_get.return_value = Response(pod_list)

        for c_ins, expected_ip, _ in self.container_inspects:
            with mock.patch.object(AbstractConfigStore,
                                   '__init__',
                                   return_value=None):
                with mock.patch('utils.dockerutil.DockerUtil.client',
                                return_value=None):
                    with mock.patch('utils.kubeutil.get_conf_path',
                                    return_value=None):
                        sd_backend = get_sd_backend(
                            agentConfig=self.auto_conf_agentConfig)
                        self.assertEqual(sd_backend._get_host(c_ins),
                                         expected_ip)
                        clear_singletons(self.auto_conf_agentConfig)
Exemple #8
0
 def _get_events(self):
     """Get the list of events."""
     events, conf_reload_set = self.docker_util.get_events()
     if conf_reload_set and self._service_discovery:
         get_sd_backend(
             self.agentConfig).reload_check_configs = conf_reload_set
     return events
    def test_get_host_address(self, mock_check_yaml, mock_get, *args):
        kubernetes_config = {'instances': [{'kubelet_port': 1337}]}
        pod_list = {
            'items': [{
                'status': {
                    'podIP': '127.0.0.1',
                    'containerStatuses': [
                        {'containerID': 'docker://389dc8a4361f3d6c866e9e9a7b6972b26a31c589c4e2f097375d55656a070bc9'}
                    ]
                }
            }]
        }

        # (inspect, tpl_var, expected_result)
        ip_address_inspects = [
            ({'NetworkSettings': {}}, 'host', None),
            ({'NetworkSettings': {'IPAddress': ''}}, 'host', None),

            ({'NetworkSettings': {'IPAddress': '127.0.0.1'}}, 'host', '127.0.0.1'),
            ({'NetworkSettings': {'IPAddress': '127.0.0.1', 'Networks': {}}}, 'host', '127.0.0.1'),
            ({'NetworkSettings': {
                'IPAddress': '127.0.0.1',
                'Networks': {'bridge': {'IPAddress': '127.0.0.1'}}}},
             'host', '127.0.0.1'),
            ({'NetworkSettings': {
                'IPAddress': '',
                'Networks': {'bridge': {'IPAddress': '127.0.0.1'}}}},
             'host_bridge', '127.0.0.1'),
            ({'NetworkSettings': {
                'IPAddress': '127.0.0.1',
                'Networks': {
                    'bridge': {'IPAddress': '172.17.0.2'},
                    'foo': {'IPAddress': '192.168.0.2'}}}},
             'host', '172.17.0.2'),

            ({'NetworkSettings': {'Networks': {}}}, 'host', None),
            ({'NetworkSettings': {'Networks': {}}}, 'host_bridge', None),
            ({'NetworkSettings': {'Networks': {'bridge': {}}}}, 'host', None),
            ({'NetworkSettings': {'Networks': {'bridge': {}}}}, 'host_bridge', None),
            ({'NetworkSettings': {
                'Networks': {
                    'bridge': {'IPAddress': '172.17.0.2'}
                }}},
             'host_bridge', '172.17.0.2'),
            ({'NetworkSettings': {
                'Networks': {
                    'bridge': {'IPAddress': '172.17.0.2'},
                    'foo': {'IPAddress': '192.168.0.2'}
                }}},
             'host_foo', '192.168.0.2')
        ]

        mock_check_yaml.return_value = kubernetes_config
        mock_get.return_value = Response(pod_list)

        for c_ins, tpl_var, expected_ip in ip_address_inspects:
            state = _SDDockerBackendConfigFetchState(lambda _: c_ins)
            sd_backend = get_sd_backend(agentConfig=self.auto_conf_agentConfig)
            self.assertEquals(sd_backend._get_host_address(state, 'container id', tpl_var), expected_ip)
            clear_singletons(self.auto_conf_agentConfig)
    def test_get_host_address(self, mock_check_yaml, mock_get, *args):
        kubernetes_config = {'instances': [{'kubelet_port': 1337}]}
        pod_list = {
            'items': [{
                'status': {
                    'podIP': '127.0.0.1',
                    'containerStatuses': [
                        {'containerID': 'docker://389dc8a4361f3d6c866e9e9a7b6972b26a31c589c4e2f097375d55656a070bc9'}
                    ]
                }
            }]
        }

        # (inspect, tpl_var, expected_result)
        ip_address_inspects = [
            ({'NetworkSettings': {}}, 'host', None),
            ({'NetworkSettings': {'IPAddress': ''}}, 'host', None),

            ({'NetworkSettings': {'IPAddress': '127.0.0.1'}}, 'host', '127.0.0.1'),
            ({'NetworkSettings': {'IPAddress': '127.0.0.1', 'Networks': {}}}, 'host', '127.0.0.1'),
            ({'NetworkSettings': {
                'IPAddress': '127.0.0.1',
                'Networks': {'bridge': {'IPAddress': '127.0.0.1'}}}},
             'host', '127.0.0.1'),
            ({'NetworkSettings': {
                'IPAddress': '',
                'Networks': {'bridge': {'IPAddress': '127.0.0.1'}}}},
             'host_bridge', '127.0.0.1'),
            ({'NetworkSettings': {
                'IPAddress': '127.0.0.1',
                'Networks': {
                    'bridge': {'IPAddress': '172.17.0.2'},
                    'foo': {'IPAddress': '192.168.0.2'}}}},
             'host', '172.17.0.2'),

            ({'NetworkSettings': {'Networks': {}}}, 'host', None),
            ({'NetworkSettings': {'Networks': {}}}, 'host_bridge', None),
            ({'NetworkSettings': {'Networks': {'bridge': {}}}}, 'host', None),
            ({'NetworkSettings': {'Networks': {'bridge': {}}}}, 'host_bridge', None),
            ({'NetworkSettings': {
                'Networks': {
                    'bridge': {'IPAddress': '172.17.0.2'}
                }}},
             'host_bridge', '172.17.0.2'),
            ({'NetworkSettings': {
                'Networks': {
                    'bridge': {'IPAddress': '172.17.0.2'},
                    'foo': {'IPAddress': '192.168.0.2'}
                }}},
             'host_foo', '192.168.0.2')
        ]

        mock_check_yaml.return_value = kubernetes_config
        mock_get.return_value = Response(pod_list)

        for c_ins, tpl_var, expected_ip in ip_address_inspects:
            state = _SDDockerBackendConfigFetchState(lambda _: c_ins)
            sd_backend = get_sd_backend(agentConfig=self.auto_conf_agentConfig)
            self.assertEquals(sd_backend._get_host_address(state, 'container id', tpl_var), expected_ip)
            clear_singletons(self.auto_conf_agentConfig)
Exemple #11
0
    def __init__(self, name, init_config, agentConfig, instances=None):
        if instances is not None and len(instances) > 1:
            raise Exception('Kubernetes check only supports one configured instance.')

        AgentCheck.__init__(self, name, init_config, agentConfig, instances)

        inst = instances[0] if instances is not None else None
        self.kubeutil = KubeUtil(instance=inst)

        if not self.kubeutil.kubelet_api_url:
            raise Exception('Unable to reach kubelet. Try setting the host parameter.')

        if agentConfig.get('service_discovery') and \
           agentConfig.get('service_discovery_backend') == 'docker':
            self._sd_backend = get_sd_backend(agentConfig)
        else:
            self._sd_backend = None

        self.leader_candidate = inst.get(LEADER_CANDIDATE)
        if self.leader_candidate:
            self.kubeutil.refresh_leader()

        self.k8s_namespace_regexp = None
        if inst:
            regexp = inst.get('namespace_name_regexp', None)
            if regexp:
                try:
                    self.k8s_namespace_regexp = re.compile(regexp)
                except re.error as e:
                    self.log.warning('Invalid regexp for "namespace_name_regexp" in configuration (ignoring regexp): %s' % str(e))

            self.event_retriever = None
            self._configure_event_collection(inst)
 def test_get_container_pid(self, *args):
     for c_ins, _, var_tpl, _, _, expected_pid in self.container_inspects:
         state = _SDDockerBackendConfigFetchState(lambda _: c_ins)
         sd_backend = get_sd_backend(agentConfig=self.auto_conf_agentConfig)
         self.assertEquals(
             sd_backend._get_container_pid(state, 'container id', var_tpl),
             expected_pid)
         clear_singletons(self.auto_conf_agentConfig)
 def test_get_config_id(self, mock_get_auto_confd_path):
     """Test get_config_id"""
     with mock.patch('utils.dockerutil.DockerUtil.client', return_value=None):
         for c_ins, _, _, _, expected_ident, _ in self.container_inspects:
             sd_backend = get_sd_backend(agentConfig=self.auto_conf_agentConfig)
             self.assertEqual(
                 sd_backend.get_config_id(DockerUtil().image_name_extractor(c_ins), c_ins.get('Config', {}).get('Labels', {})),
                 expected_ident)
             clear_singletons(self.auto_conf_agentConfig)
 def test_get_port(self, *args):
     for c_ins, _, var_tpl, expected_ports, _, _ in self.container_inspects:
         state = _SDDockerBackendConfigFetchState(lambda _: c_ins)
         sd_backend = get_sd_backend(agentConfig=self.auto_conf_agentConfig)
         if isinstance(expected_ports, str):
             self.assertEquals(sd_backend._get_port(state, 'container id', var_tpl), expected_ports)
         else:
             self.assertRaises(expected_ports, sd_backend._get_port, state, 'c_id', var_tpl)
         clear_singletons(self.auto_conf_agentConfig)
 def test_get_port(self):
     with mock.patch('utils.dockerutil.DockerUtil.client', return_value=None):
         for c_ins, _, var_tpl, expected_ports, _ in self.container_inspects:
             sd_backend = get_sd_backend(agentConfig=self.auto_conf_agentConfig)
             if isinstance(expected_ports, str):
                 self.assertEquals(sd_backend._get_port(c_ins, var_tpl), expected_ports)
             else:
                 self.assertRaises(expected_ports, sd_backend._get_port, c_ins, var_tpl)
             clear_singletons(self.auto_conf_agentConfig)
 def test_get_config_id(self):
     """Test get_config_id"""
     with mock.patch('utils.dockerutil.DockerUtil.client', return_value=None):
         for c_ins, _, _, _, expected_ident in self.container_inspects:
             sd_backend = get_sd_backend(agentConfig=self.auto_conf_agentConfig)
             self.assertEqual(
                 sd_backend.get_config_id(c_ins.get('Image'), c_ins.get('Labels', {})),
                 expected_ident)
             clear_singletons(self.auto_conf_agentConfig)
 def test_get_config_id(self, mock_get_auto_confd_path):
     """Test get_config_id"""
     with mock.patch('utils.dockerutil.DockerUtil.client', return_value=None):
         for c_ins, _, _, _, expected_ident, _ in self.container_inspects:
             sd_backend = get_sd_backend(agentConfig=self.auto_conf_agentConfig)
             self.assertEqual(
                 sd_backend.get_config_id(DockerUtil().image_name_extractor(c_ins), c_ins.get('Config', {}).get('Labels', {})),
                 expected_ident)
             clear_singletons(self.auto_conf_agentConfig)
 def test_get_port(self):
     with mock.patch('utils.dockerutil.DockerUtil.client', return_value=None):
         for c_ins, _, var_tpl, expected_ports, _ in self.container_inspects:
             sd_backend = get_sd_backend(agentConfig=self.auto_conf_agentConfig)
             if isinstance(expected_ports, str):
                 self.assertEquals(sd_backend._get_port(c_ins, var_tpl), expected_ports)
             else:
                 self.assertRaises(expected_ports, sd_backend._get_port, c_ins, var_tpl)
             clear_singletons(self.auto_conf_agentConfig)
 def test_get_port(self, *args):
     for c_ins, _, var_tpl, expected_ports, _ in self.container_inspects:
         state = _SDDockerBackendConfigFetchState(lambda _: c_ins)
         sd_backend = get_sd_backend(agentConfig=self.auto_conf_agentConfig)
         if isinstance(expected_ports, str):
             self.assertEquals(sd_backend._get_port(state, 'container id', var_tpl), expected_ports)
         else:
             self.assertRaises(expected_ports, sd_backend._get_port, state, 'c_id', var_tpl)
         clear_singletons(self.auto_conf_agentConfig)
 def test_get_config_id(self):
     """Test get_config_id"""
     with mock.patch('utils.dockerutil.DockerUtil.client', return_value=None):
         for c_ins, _, _, _, expected_ident in self.container_inspects:
             sd_backend = get_sd_backend(agentConfig=self.auto_conf_agentConfig)
             self.assertEqual(
                 sd_backend.get_config_id(c_ins.get('Image'), c_ins.get('Labels', {})),
                 expected_ident)
             clear_singletons(self.auto_conf_agentConfig)
Exemple #21
0
def _service_disco_configs(agentConfig):
    """ Retrieve all the service disco configs and return their conf dicts
    """
    if agentConfig.get('service_discovery') and agentConfig.get('service_discovery_backend') in SD_BACKENDS:
        sd_backend = get_sd_backend(agentConfig=agentConfig)
        service_disco_configs = sd_backend.get_configs()
    else:
        service_disco_configs = {}

    return service_disco_configs
 def test_get_check_configs(self, *args):
     """Test get_check_config with mocked container inspect and config template"""
     c_id = self.docker_container_inspect.get('Id')
     for image in self.mock_templates.keys():
         sd_backend = get_sd_backend(agentConfig=self.auto_conf_agentConfig)
         state = _SDDockerBackendConfigFetchState(_get_container_inspect)
         self.assertEquals(
             sd_backend._get_check_configs(state, c_id, image)[0],
             self.mock_templates[image][1])
         clear_singletons(self.auto_conf_agentConfig)
 def test_get_check_configs(self, *args):
     """Test get_check_config with mocked container inspect and config template"""
     c_id = self.docker_container_inspect.get('Id')
     for image in self.mock_templates.keys():
         sd_backend = get_sd_backend(agentConfig=self.auto_conf_agentConfig)
         state = _SDDockerBackendConfigFetchState(_get_container_inspect)
         self.assertEquals(
             sd_backend._get_check_configs(state, c_id, image)[0],
             self.mock_templates[image][1])
         clear_singletons(self.auto_conf_agentConfig)
Exemple #24
0
def _service_disco_configs(agentConfig):
    """ Retrieve all the service disco configs and return their conf dicts
    """
    if agentConfig.get('service_discovery') and agentConfig.get(
            'service_discovery_backend') in SD_BACKENDS:
        sd_backend = get_sd_backend(agentConfig=agentConfig)
        service_disco_configs = sd_backend.get_configs()
    else:
        service_disco_configs = {}

    return service_disco_configs
 def test_get_check_configs(self, mock_inspect_container, mock_get_conf_tpls):
     """Test get_check_config with mocked container inspect and config template"""
     with mock.patch('utils.dockerutil.DockerUtil.client', return_value=None):
         with mock.patch.object(SDDockerBackend, '_get_host_address', return_value='127.0.0.1'):
             with mock.patch.object(SDDockerBackend, '_get_port', return_value='1337'):
                 c_id = self.docker_container_inspect.get('Id')
                 for image in self.mock_templates.keys():
                     sd_backend = get_sd_backend(agentConfig=self.auto_conf_agentConfig)
                     self.assertEquals(
                         sd_backend._get_check_configs(c_id, image)[0],
                         self.mock_templates[image][1])
                     clear_singletons(self.auto_conf_agentConfig)
 def test_get_check_configs(self, mock_inspect_container, mock_get_conf_tpls):
     """Test get_check_config with mocked container inspect and config template"""
     with mock.patch('utils.dockerutil.DockerUtil.client', return_value=None):
         with mock.patch.object(SDDockerBackend, '_get_host_address', return_value='127.0.0.1'):
             with mock.patch.object(SDDockerBackend, '_get_port', return_value='1337'):
                 c_id = self.docker_container_inspect.get('Id')
                 for image in self.mock_templates.keys():
                     sd_backend = get_sd_backend(agentConfig=self.auto_conf_agentConfig)
                     self.assertEquals(
                         sd_backend._get_check_configs(c_id, image)[0],
                         self.mock_templates[image][1])
                     clear_singletons(self.auto_conf_agentConfig)
 def test_get_config_templates(self, *args):
     """Test _get_config_templates with mocked get_check_tpls"""
     for agentConfig in self.agentConfigs:
         sd_backend = get_sd_backend(agentConfig=agentConfig)
         # normal cases
         for image in self.mock_templates.keys():
             template = sd_backend._get_config_templates(image)
             expected_template = self.mock_templates.get(image)[0]
             self.assertEquals(template, expected_template)
         # error cases
         for image in self.bad_mock_templates.keys():
             self.assertEquals(sd_backend._get_config_templates(image), None)
         clear_singletons(agentConfig)
 def test_get_config_templates(self, *args):
     """Test _get_config_templates with mocked get_check_tpls"""
     for agentConfig in self.agentConfigs:
         sd_backend = get_sd_backend(agentConfig=agentConfig)
         # normal cases
         for image in self.mock_templates.keys():
             template = sd_backend._get_config_templates(image)
             expected_template = self.mock_templates.get(image)[0]
             self.assertEquals(template, expected_template)
         # error cases
         for image in self.bad_mock_templates.keys():
             self.assertEquals(sd_backend._get_config_templates(image), None)
         clear_singletons(agentConfig)
Exemple #29
0
def _service_disco_configs(agentConfig):
    """ Retrieve all the service disco configs and return their conf dicts
    """
    if agentConfig.get('service_discovery') and agentConfig.get('service_discovery_backend') in SD_BACKENDS:
        try:
            log.info("Fetching service discovery check configurations.")
            sd_backend = get_sd_backend(agentConfig=agentConfig)
            service_disco_configs = sd_backend.get_configs()
        except Exception:
            log.exception("Loading service discovery configurations failed.")
    else:
        service_disco_configs = {}

    return service_disco_configs
Exemple #30
0
def _service_disco_configs(agentConfig):
    """ Retrieve all the service disco configs and return their conf dicts
    """
    if agentConfig.get('service_discovery') and agentConfig.get('service_discovery_backend') in SD_BACKENDS:
        try:
            log.info("Fetching service discovery check configurations.")
            sd_backend = get_sd_backend(agentConfig=agentConfig)
            service_disco_configs = sd_backend.get_configs()
        except Exception:
            log.exception("Loading service discovery configurations failed.")
    else:
        service_disco_configs = {}

    return service_disco_configs
 def test_get_config_templates(self, mock_get_check_tpls):
     """Test _get_config_templates with mocked get_check_tpls"""
     with mock.patch('utils.dockerutil.DockerUtil.client', return_value=None):
         with mock.patch.object(EtcdStore, 'get_client', return_value=None):
             with mock.patch.object(ConsulStore, 'get_client', return_value=None):
                 for agentConfig in self.agentConfigs:
                     sd_backend = get_sd_backend(agentConfig=agentConfig)
                     # normal cases
                     for image in self.mock_templates.keys():
                         template = sd_backend._get_config_templates(image)
                         expected_template = self.mock_templates.get(image)[0]
                         self.assertEquals(template, expected_template)
                     # error cases
                     for image in self.bad_mock_templates.keys():
                         self.assertEquals(sd_backend._get_config_templates(image), None)
                     clear_singletons(agentConfig)
 def test_get_config_templates(self, mock_get_check_tpls):
     """Test _get_config_templates with mocked get_check_tpls"""
     with mock.patch('utils.dockerutil.DockerUtil.client', return_value=None):
         with mock.patch.object(EtcdStore, 'get_client', return_value=None):
             with mock.patch.object(ConsulStore, 'get_client', return_value=None):
                 for agentConfig in self.agentConfigs:
                     sd_backend = get_sd_backend(agentConfig=agentConfig)
                     # normal cases
                     for image in self.mock_templates.keys():
                         template = sd_backend._get_config_templates(image)
                         expected_template = self.mock_templates.get(image)[0]
                         self.assertEquals(template, expected_template)
                     # error cases
                     for image in self.bad_mock_templates.keys():
                         self.assertEquals(sd_backend._get_config_templates(image), None)
                     clear_singletons(agentConfig)
Exemple #33
0
    def __init__(self, name, init_config, agentConfig, instances=None):
        if instances is not None and len(instances) > 1:
            raise Exception(
                'Kubernetes check only supports one configured instance.')

        AgentCheck.__init__(self, name, init_config, agentConfig, instances)

        inst = instances[0] if instances is not None else None
        self.kubeutil = KubeUtil(instance=inst)
        if not self.kubeutil.kubelet_api_url:
            raise Exception(
                'Unable to reach kubelet. Try setting the host parameter.')

        if agentConfig.get('service_discovery') and \
           agentConfig.get('service_discovery_backend') == 'docker':
            self._sd_backend = get_sd_backend(agentConfig)
        else:
            self._sd_backend = None

        self.k8s_namespace_regexp = None
        if inst:
            regexp = inst.get('namespace_name_regexp', None)
            if regexp:
                try:
                    self.k8s_namespace_regexp = re.compile(regexp)
                except re.error as e:
                    self.log.warning(
                        'Invalid regexp for "namespace_name_regexp" in configuration (ignoring regexp): %s'
                        % str(e))

            self._collect_events = _is_affirmative(
                inst.get('collect_events', DEFAULT_COLLECT_EVENTS))
            if self._collect_events:
                self.event_retriever = self.kubeutil.get_event_retriever()
            elif self.kubeutil.collect_service_tag:
                # Only fetch service and pod events for service mapping
                event_delay = inst.get('service_tag_update_freq',
                                       DEFAULT_SERVICE_EVENT_FREQ)
                self.event_retriever = self.kubeutil.get_event_retriever(
                    kinds=['Service', 'Pod'], delay=event_delay)
            else:
                self.event_retriever = None
        else:
            self._collect_events = None
            self.event_retriever = None
 def test_fill_tpl(self):
     """Test _fill_tpl with mock _get_ports"""
     valid_configs = [
         # ((inspect, instance_tpl, variables, tags), (expected_instance_tpl, expected_var_values))
         (
             ({}, {'host': 'localhost'}, [], None),
             ({'host': 'localhost'}, {})
         ),
         (
             ({'NetworkSettings': {'IPAddress': '127.0.0.1'}},
              {'host': '%%host%%', 'port': 1337}, ['host'], ['foo', 'bar:baz']),
             ({'host': '%%host%%', 'port': 1337, 'tags': ['foo', 'bar:baz']}, {'host': '127.0.0.1'})
         ),
         (
             ({'NetworkSettings': {'IPAddress': '127.0.0.1', 'Ports': {'42/tcp': None, '22/tcp': None}}},
              {'host': '%%host%%', 'port': '%%port_1%%', 'tags': ['env:test']},
              ['host', 'port_1'], ['foo', 'bar:baz']),
             ({'host': '%%host%%', 'port': '%%port_1%%', 'tags': ['env:test', 'foo', 'bar:baz']},
              {'host': '127.0.0.1', 'port_1': '42'})
         )
     ]
     with mock.patch('utils.dockerutil.DockerUtil.client', return_value=None):
         for ac in self.agentConfigs:
             sd_backend = get_sd_backend(agentConfig=ac)
             try:
                 for co in valid_configs:
                     inspect, tpl, variables, tags = co[0]
                     instance_tpl, var_values = sd_backend._fill_tpl(inspect, tpl, variables, tags)
                     for key in instance_tpl.keys():
                         if isinstance(instance_tpl[key], list):
                             self.assertEquals(len(instance_tpl[key]), len(co[1][0].get(key)))
                             for elem in instance_tpl[key]:
                                 self.assertTrue(elem in co[1][0].get(key))
                         else:
                             self.assertEquals(instance_tpl[key], co[1][0].get(key))
                     self.assertEquals(var_values, co[1][1])
                 clear_singletons(ac)
             except Exception:
                 clear_singletons(ac)
                 raise
    def __init__(self, name, init_config, agentConfig, instances=None):
        if instances is not None and len(instances) > 1:
            raise Exception('Kubernetes check only supports one configured instance.')

        AgentCheck.__init__(self, name, init_config, agentConfig, instances)

        inst = instances[0] if instances is not None else None
        self.kubeutil = KubeUtil(instance=inst)
        if not self.kubeutil.kubelet_api_url:
            raise Exception('Unable to reach kubelet. Try setting the host parameter.')

        if agentConfig.get('service_discovery') and \
           agentConfig.get('service_discovery_backend') == 'docker':
            self._sd_backend = get_sd_backend(agentConfig)
        else:
            self._sd_backend = None

        self.k8s_namespace_regexp = None
        if inst:
            regexp = inst.get('namespace_name_regexp', None)
            if regexp:
                try:
                    self.k8s_namespace_regexp = re.compile(regexp)
                except re.error as e:
                    self.log.warning('Invalid regexp for "namespace_name_regexp" in configuration (ignoring regexp): %s' % str(e))

            self._collect_events = _is_affirmative(inst.get('collect_events', DEFAULT_COLLECT_EVENTS))
            if self._collect_events:
                self.event_retriever = self.kubeutil.get_event_retriever()
            elif self.kubeutil.collect_service_tag:
                # Only fetch service and pod events for service mapping
                event_delay = inst.get('service_tag_update_freq', DEFAULT_SERVICE_EVENT_FREQ)
                self.event_retriever = self.kubeutil.get_event_retriever(kinds=['Service', 'Pod'],
                                                                         delay=event_delay)
            else:
                self.event_retriever = None
        else:
            self._collect_events = None
            self.event_retriever = None
    def test_get_host(self, mock_check_yaml, mock_get):
        kubernetes_config = {'instances': [{'kubelet_port': 1337}]}
        pod_list = {
            'items': [{
                'status': {
                    'podIP': '127.0.0.1',
                    'containerStatuses': [
                        {'containerID': 'docker://389dc8a4361f3d6c866e9e9a7b6972b26a31c589c4e2f097375d55656a070bc9'}
                    ]
                }
            }]
        }

        mock_check_yaml.return_value = kubernetes_config
        mock_get.return_value = Response(pod_list)

        for c_ins, expected_ip, _ in self.container_inspects:
            with mock.patch.object(AbstractConfigStore, '__init__', return_value=None):
                with mock.patch('utils.dockerutil.DockerUtil.client', return_value=None):
                    with mock.patch('utils.kubeutil.get_conf_path', return_value=None):
                        sd_backend = get_sd_backend(agentConfig=self.auto_conf_agentConfig)
                        self.assertEqual(sd_backend._get_host(c_ins), expected_ip)
                        clear_singletons(self.auto_conf_agentConfig)
Exemple #37
0
    osname = get_os()
    checks_paths = get_checks_paths(agentConfig, osname)
    # this can happen if check.d is not found
    if checks_paths is None:
        log.error('Check directory not found, exiting. The agent is likely misconfigured.')
        sys.exit(3)

    try:
        confd_path = get_confd_path(osname)
    except PathNotFound, e:
        log.error("No conf.d folder found at '%s' or in the directory where "
                  "the Agent is currently deployed.\n" % e.args[0])
        sys.exit(3)

    if agentConfig.get('service_discovery') and agentConfig.get('service_discovery_backend') in SD_BACKENDS:
        sd_backend = get_sd_backend(agentConfig=agentConfig)
        service_disco_configs = sd_backend.get_configs()
    else:
        service_disco_configs = {}

    # We don't support old style configs anymore
    # So we iterate over the files in the checks.d directory
    # If there is a matching configuration file in the conf.d directory
    # then we import the check
    for check in itertools.chain(*checks_paths):
        sd_init_config, sd_instances, skip_config_lookup = None, None, False
        check_name = os.path.basename(check).split('.')[0]
        check_config = None
        if check_name in initialized_checks or check_name in init_failed_checks:
            log.debug('Skipping check %s because it has already been loaded from another location', check)
            continue
Exemple #38
0
 def test_fill_tpl(self):
     """Test _fill_tpl with mock _get_ports"""
     valid_configs = [
         # ((inspect, instance_tpl, variables, tags), (expected_instance_tpl, expected_var_values))
         (({}, {
             'host': 'localhost'
         }, [], None), ({
             'host': 'localhost'
         }, {})),
         (({
             'NetworkSettings': {
                 'IPAddress': '127.0.0.1'
             }
         }, {
             'host': '%%host%%',
             'port': 1337
         }, ['host'], ['foo', 'bar:baz']), ({
             'host': '%%host%%',
             'port': 1337,
             'tags': ['foo', 'bar:baz']
         }, {
             'host': '127.0.0.1'
         })),
         (({
             'NetworkSettings': {
                 'IPAddress': '127.0.0.1',
                 'Ports': {
                     '42/tcp': None,
                     '22/tcp': None
                 }
             }
         }, {
             'host': '%%host%%',
             'port': '%%port_1%%',
             'tags': ['env:test']
         }, ['host', 'port_1'], ['foo', 'bar:baz']), ({
             'host':
             '%%host%%',
             'port':
             '%%port_1%%',
             'tags': ['env:test', 'foo', 'bar:baz']
         }, {
             'host': '127.0.0.1',
             'port_1': '42'
         }))
     ]
     with mock.patch('utils.dockerutil.DockerUtil.client',
                     return_value=None):
         for ac in self.agentConfigs:
             sd_backend = get_sd_backend(agentConfig=ac)
             try:
                 for co in valid_configs:
                     inspect, tpl, variables, tags = co[0]
                     instance_tpl, var_values = sd_backend._fill_tpl(
                         inspect, tpl, variables, tags)
                     for key in instance_tpl.keys():
                         if isinstance(instance_tpl[key], list):
                             self.assertEquals(len(instance_tpl[key]),
                                               len(co[1][0].get(key)))
                             for elem in instance_tpl[key]:
                                 self.assertTrue(elem in co[1][0].get(key))
                         else:
                             self.assertEquals(instance_tpl[key],
                                               co[1][0].get(key))
                     self.assertEquals(var_values, co[1][1])
                 clear_singletons(ac)
             except Exception:
                 clear_singletons(ac)
                 raise
    def test_get_host_address(self, mock_check_yaml, mock_get):
        kubernetes_config = {'instances': [{'kubelet_port': 1337}]}
        pod_list = {
            'items': [{
                'status': {
                    'podIP': '127.0.0.1',
                    'containerStatuses': [
                        {'containerID': 'docker://389dc8a4361f3d6c866e9e9a7b6972b26a31c589c4e2f097375d55656a070bc9'}
                    ]
                }
            }]
        }

        # (inspect, tpl_var, expected_result)
        ip_address_inspects = [
            ({'NetworkSettings': {}}, 'host', None),
            ({'NetworkSettings': {'IPAddress': ''}}, 'host', None),

            ({'NetworkSettings': {'IPAddress': '127.0.0.1'}}, 'host', '127.0.0.1'),
            ({'NetworkSettings': {'IPAddress': '127.0.0.1', 'Networks': {}}}, 'host', '127.0.0.1'),
            ({'NetworkSettings': {
                'IPAddress': '127.0.0.1',
                'Networks': {'bridge': {'IPAddress': '127.0.0.1'}}}},
             'host', '127.0.0.1'),
            ({'NetworkSettings': {
                'IPAddress': '',
                'Networks': {'bridge': {'IPAddress': '127.0.0.1'}}}},
             'host_bridge', '127.0.0.1'),
            ({'NetworkSettings': {
                'IPAddress': '127.0.0.1',
                'Networks': {
                    'bridge': {'IPAddress': '172.17.0.2'},
                    'foo': {'IPAddress': '192.168.0.2'}}}},
             'host', '127.0.0.1'),

            ({'NetworkSettings': {'Networks': {}}}, 'host', None),
            ({'NetworkSettings': {'Networks': {}}}, 'host_bridge', None),
            ({'NetworkSettings': {'Networks': {'bridge': {}}}}, 'host', None),
            ({'NetworkSettings': {'Networks': {'bridge': {}}}}, 'host_bridge', None),
            ({'NetworkSettings': {
                'Networks': {
                    'bridge': {'IPAddress': '172.17.0.2'}
                }}},
             'host_bridge', '172.17.0.2'),
            ({'NetworkSettings': {
                'Networks': {
                    'bridge': {'IPAddress': '172.17.0.2'},
                    'foo': {'IPAddress': '192.168.0.2'}
                }}},
             'host_foo', '192.168.0.2')
        ]

        mock_check_yaml.return_value = kubernetes_config
        mock_get.return_value = Response(pod_list)

        for c_ins, tpl_var, expected_ip in ip_address_inspects:
            with mock.patch.object(AbstractConfigStore, '__init__', return_value=None):
                with mock.patch('utils.dockerutil.DockerUtil.client', return_value=None):
                    with mock.patch('utils.kubeutil.get_conf_path', return_value=None):
                        sd_backend = get_sd_backend(agentConfig=self.auto_conf_agentConfig)
                        self.assertEquals(sd_backend._get_host_address(c_ins, tpl_var), expected_ip)
                        clear_singletons(self.auto_conf_agentConfig)
Exemple #40
0
    def run(self, config=None):
        """Main loop of the collector"""

        # Gracefully exit on sigterm.
        signal.signal(signal.SIGTERM, self._handle_sigterm)

        # A SIGUSR1 signals an exit with an autorestart
        signal.signal(signal.SIGUSR1, self._handle_sigusr1)

        # Handle Keyboard Interrupt
        signal.signal(signal.SIGINT, self._handle_sigterm)

        # A SIGHUP signals a configuration reload
        signal.signal(signal.SIGHUP, self._handle_sighup)

        # Save the agent start-up stats.
        CollectorStatus().persist()

        # Intialize the collector.
        if not config:
            config = get_config(parse_args=True)

        self._agentConfig = self._set_agent_config_hostname(config)
        hostname = get_hostname(self._agentConfig)
        systemStats = get_system_stats(proc_path=self._agentConfig.get(
            'procfs_path', '/proc').rstrip('/'))
        emitters = self._get_emitters()

        # Initialize service discovery
        if self._agentConfig.get('service_discovery'):
            self.sd_backend = get_sd_backend(self._agentConfig)

        # Load the checks.d checks
        self._checksd = load_check_directory(self._agentConfig, hostname)

        # Initialize the Collector
        self.collector = Collector(self._agentConfig, emitters, systemStats,
                                   hostname)

        # In developer mode, the number of runs to be included in a single collector profile
        self.collector_profile_interval = self._agentConfig.get(
            'collector_profile_interval', DEFAULT_COLLECTOR_PROFILE_INTERVAL)

        # Configure the watchdog.
        self.check_frequency = int(self._agentConfig['check_freq'])
        watchdog = self._get_watchdog(self.check_frequency)

        # Initialize the auto-restarter
        self.restart_interval = int(
            self._agentConfig.get('restart_interval', RESTART_INTERVAL))
        self.agent_start = time.time()

        profiled = False
        collector_profiled_runs = 0

        # Run the main loop.
        while self.run_forever:
            log.debug("Found {num_checks} checks".format(
                num_checks=len(self._checksd['initialized_checks'])))

            # Setup profiling if necessary
            if self.in_developer_mode and not profiled:
                try:
                    profiler = AgentProfiler()
                    profiler.enable_profiling()
                    profiled = True
                except Exception as e:
                    log.warn("Cannot enable profiler: %s" % str(e))

            # Do the work.
            self.collector.run(checksd=self._checksd,
                               start_event=self.start_event,
                               configs_reloaded=self.configs_reloaded)

            # This flag is used to know if the check configs have been reloaded at the current
            # run of the agent yet or not. It's used by the collector to know if it needs to
            # look for the AgentMetrics check and pop it out.
            # See: https://github.com/DataDog/dd-agent/blob/5.6.x/checks/collector.py#L265-L272
            self.configs_reloaded = False

            # Look for change in the config template store.
            # The self.sd_backend.reload_check_configs flag is set
            # to True if a config reload is needed.
            if self._agentConfig.get('service_discovery') and self.sd_backend and \
               not self.sd_backend.reload_check_configs:
                try:
                    self.sd_backend.reload_check_configs = get_config_store(
                        self._agentConfig).crawl_config_template()
                except Exception as e:
                    log.warn(
                        'Something went wrong while looking for config template changes: %s'
                        % str(e))

            # Check if we should run service discovery
            # The `reload_check_configs` flag can be set through the docker_daemon check or
            # using ConfigStore.crawl_config_template
            if self._agentConfig.get('service_discovery') and self.sd_backend and \
               self.sd_backend.reload_check_configs:
                self.reload_configs()
                self.configs_reloaded = True
                self.sd_backend.reload_check_configs = False

            if profiled:
                if collector_profiled_runs >= self.collector_profile_interval:
                    try:
                        profiler.disable_profiling()
                        profiled = False
                        collector_profiled_runs = 0
                    except Exception as e:
                        log.warn("Cannot disable profiler: %s" % str(e))

            # Check if we should restart.
            if self.autorestart and self._should_restart():
                self._do_restart()

            # Only plan for next loop if we will continue, otherwise exit quickly.
            if self.run_forever:
                if watchdog:
                    watchdog.reset()
                if profiled:
                    collector_profiled_runs += 1
                log.debug("Sleeping for {0} seconds".format(
                    self.check_frequency))
                time.sleep(self.check_frequency)

        # Now clean-up.
        try:
            CollectorStatus.remove_latest_status()
        except Exception:
            pass

        # Explicitly kill the process, because it might be running as a daemon.
        log.info("Exiting. Bye bye.")
        sys.exit(0)
 def test_get_container_pid(self, *args):
     for c_ins, _, var_tpl, _, _, expected_pid in self.container_inspects:
         state = _SDDockerBackendConfigFetchState(lambda _: c_ins)
         sd_backend = get_sd_backend(agentConfig=self.auto_conf_agentConfig)
         self.assertEquals(sd_backend._get_container_pid(state, 'container id', var_tpl), expected_pid)
         clear_singletons(self.auto_conf_agentConfig)
Exemple #42
0
 def _get_events(self):
     """Get the list of events."""
     events, conf_reload_set = self.docker_util.get_events()
     if conf_reload_set and self._service_discovery:
         get_sd_backend(self.agentConfig).reload_check_configs = conf_reload_set
     return events
    def test_fill_tpl(self, *args):
        """Test _fill_tpl with mocked docker client"""

        valid_configs = [
            # ((inspect, instance_tpl, variables, tags), (expected_instance_tpl, expected_var_values))
            (({}, {
                'host': 'localhost'
            }, [], None), ({
                'host': 'localhost'
            }, {})),
            (({
                'NetworkSettings': {
                    'IPAddress': ''
                }
            }, {
                'host': 'localhost'
            }, [], None), ({
                'host': 'localhost'
            }, {})),
            (({
                'NetworkSettings': {
                    'Networks': {}
                }
            }, {
                'host': 'localhost'
            }, [], None), ({
                'host': 'localhost'
            }, {})),
            (({
                'NetworkSettings': {
                    'Networks': {
                        'bridge': {}
                    }
                }
            }, {
                'host': 'localhost'
            }, [], None), ({
                'host': 'localhost'
            }, {})),
            (
                ({
                    'NetworkSettings': {
                        'IPAddress': '127.0.0.1'
                    }
                }, {
                    'host': '%%host%%',
                    'port': 1337
                }, ['host'], ['foo', 'bar:baz']),
                ({
                    'host': '%%host%%',
                    'port': 1337,
                    'tags': ['foo', 'bar:baz']
                }, {
                    'host': '127.0.0.1'
                }),
            ),
            (
                ({
                    'NetworkSettings': {
                        'IPAddress': '127.0.0.1',
                        'Networks': {}
                    }
                }, {
                    'host': '%%host%%',
                    'port': 1337
                }, ['host'], ['foo', 'bar:baz']),
                ({
                    'host': '%%host%%',
                    'port': 1337,
                    'tags': ['foo', 'bar:baz']
                }, {
                    'host': '127.0.0.1'
                }),
            ),
            (
                ({
                    'NetworkSettings': {
                        'IPAddress': '127.0.0.1',
                        'Networks': {
                            'bridge': {
                                'IPAddress': '172.17.0.2'
                            }
                        }
                    }
                }, {
                    'host': '%%host%%',
                    'port': 1337
                }, ['host'], ['foo', 'bar:baz']),
                ({
                    'host': '%%host%%',
                    'port': 1337,
                    'tags': ['foo', 'bar:baz']
                }, {
                    'host': '172.17.0.2'
                }),
            ),
            (
                ({
                    'NetworkSettings': {
                        'IPAddress': '',
                        'Networks': {
                            'bridge': {
                                'IPAddress': '172.17.0.2'
                            },
                            'foo': {
                                'IPAddress': '192.168.0.2'
                            }
                        }
                    }
                }, {
                    'host': '%%host_bridge%%',
                    'port': 1337
                }, ['host_bridge'], ['foo', 'bar:baz']),
                ({
                    'host': '%%host_bridge%%',
                    'port': 1337,
                    'tags': ['foo', 'bar:baz']
                }, {
                    'host_bridge': '172.17.0.2'
                }),
            ),
            (
                ({
                    'NetworkSettings': {
                        'IPAddress': '',
                        'Networks': {
                            'bridge': {
                                'IPAddress': '172.17.0.2'
                            },
                            'foo': {
                                'IPAddress': '192.168.0.2'
                            }
                        }
                    }
                }, {
                    'host': '%%host_foo%%',
                    'port': 1337
                }, ['host_foo'], ['foo', 'bar:baz']),
                ({
                    'host': '%%host_foo%%',
                    'port': 1337,
                    'tags': ['foo', 'bar:baz']
                }, {
                    'host_foo': '192.168.0.2'
                }),
            ),
            (({
                'NetworkSettings': {
                    'IPAddress': '127.0.0.1',
                    'Ports': {
                        '42/tcp': None,
                        '22/tcp': None
                    }
                }
            }, {
                'host': '%%host%%',
                'port': '%%port_1%%',
                'tags': ['env:test']
            }, ['host', 'port_1'], ['foo', 'bar:baz']), ({
                'host':
                '%%host%%',
                'port':
                '%%port_1%%',
                'tags': ['env:test', 'foo', 'bar:baz']
            }, {
                'host': '127.0.0.1',
                'port_1': '42'
            }))
        ]

        # should not fail but return something specific
        edge_cases = [
            # ((inspect, instance_tpl, variables, tags), (expected_instance_tpl, expected_var_values))

            # specify bridge but there is also a default IPAddress (networks should be preferred)
            (({
                'NetworkSettings': {
                    'IPAddress': '127.0.0.1',
                    'Networks': {
                        'bridge': {
                            'IPAddress': '172.17.0.2'
                        }
                    }
                }
            }, {
                'host': '%%host_bridge%%',
                'port': 1337
            }, ['host_bridge'], ['foo', 'bar:baz']), ({
                'host':
                '%%host_bridge%%',
                'port':
                1337,
                'tags': ['foo', 'bar:baz']
            }, {
                'host_bridge':
                '172.17.0.2'
            })),
            # specify index but there is a default IPAddress (there's a specifier, even if it's wrong, walking networks should be preferred)
            (
                ({
                    'NetworkSettings': {
                        'IPAddress': '127.0.0.1',
                        'Networks': {
                            'bridge': {
                                'IPAddress': '172.17.0.2'
                            }
                        }
                    }
                }, {
                    'host': '%%host_0%%',
                    'port': 1337
                }, ['host_0'], ['foo', 'bar:baz']),
                ({
                    'host': '%%host_0%%',
                    'port': 1337,
                    'tags': ['foo', 'bar:baz']
                }, {
                    'host_0': '172.17.0.2'
                }),
            ),
            # missing key for host, bridge network should be preferred
            (
                ({
                    'NetworkSettings': {
                        'Networks': {
                            'bridge': {
                                'IPAddress': '127.0.0.1'
                            },
                            'foo': {
                                'IPAddress': '172.17.0.2'
                            }
                        }
                    }
                }, {
                    'host': '%%host_bar%%',
                    'port': 1337
                }, ['host_bar'], []),
                ({
                    'host': '%%host_bar%%',
                    'port': 1337
                }, {
                    'host_bar': '127.0.0.1'
                }),
            ),
            # missing index for port
            (({
                'NetworkSettings': {
                    'IPAddress': '127.0.0.1',
                    'Ports': {
                        '42/tcp': None,
                        '22/tcp': None
                    }
                }
            }, {
                'host': '%%host%%',
                'port': '%%port_2%%',
                'tags': ['env:test']
            }, ['host', 'port_2'], ['foo', 'bar:baz']), ({
                'host':
                '%%host%%',
                'port':
                '%%port_2%%',
                'tags': ['env:test', 'foo', 'bar:baz']
            }, {
                'host': '127.0.0.1',
                'port_2': '42'
            }))
        ]

        # should raise
        invalid_config = [
            # ((inspect, instance_tpl, variables, tags), expected_exception)

            # template variable but no IPAddress available
            (
                ({
                    'NetworkSettings': {
                        'Networks': {}
                    }
                }, {
                    'host': '%%host%%',
                    'port': 1337
                }, ['host'], ['foo', 'bar:baz']),
                Exception,
            ),
            # index but no IPAddress available
            (
                ({
                    'NetworkSettings': {
                        'Networks': {}
                    }
                }, {
                    'host': '%%host_0%%',
                    'port': 1337
                }, ['host_0'], ['foo', 'bar:baz']),
                Exception,
            ),
            # key but no IPAddress available
            (
                ({
                    'NetworkSettings': {
                        'Networks': {}
                    }
                }, {
                    'host': '%%host_foo%%',
                    'port': 1337
                }, ['host_foo'], ['foo', 'bar:baz']),
                Exception,
            ),

            # template variable but no port available
            (
                ({
                    'NetworkSettings': {
                        'Networks': {}
                    }
                }, {
                    'host': 'localhost',
                    'port': '%%port%%'
                }, ['port'], []),
                Exception,
            ),
            # index but no port available
            (
                ({
                    'NetworkSettings': {
                        'Networks': {}
                    }
                }, {
                    'host': 'localhost',
                    'port_0': '%%port%%'
                }, ['port_0'], []),
                Exception,
            ),
            # key but no port available
            (
                ({
                    'NetworkSettings': {
                        'Networks': {}
                    }
                }, {
                    'host': 'localhost',
                    'port': '%%port_foo%%'
                }, ['port_foo'], []),
                Exception,
            )
        ]

        for ac in self.agentConfigs:
            sd_backend = get_sd_backend(agentConfig=ac)
            try:
                for co in valid_configs + edge_cases:
                    inspect, tpl, variables, tags = co[0]
                    state = _SDDockerBackendConfigFetchState(lambda _: inspect)
                    instance_tpl, var_values = sd_backend._fill_tpl(
                        state, 'c_id', tpl, variables, tags)
                    for key in instance_tpl.keys():
                        if isinstance(instance_tpl[key], list):
                            self.assertEquals(len(instance_tpl[key]),
                                              len(co[1][0].get(key)))
                            for elem in instance_tpl[key]:
                                self.assertTrue(elem in co[1][0].get(key))
                        else:
                            self.assertEquals(instance_tpl[key],
                                              co[1][0].get(key))
                    self.assertEquals(var_values, co[1][1])

                for co in invalid_config:
                    inspect, tpl, variables, tags = co[0]
                    state = _SDDockerBackendConfigFetchState(lambda _: inspect)
                    self.assertRaises(
                        co[1],
                        sd_backend._fill_tpl(state, 'c_id', tpl, variables,
                                             tags))
            finally:
                clear_singletons(ac)
Exemple #44
0
    def run(self, config=None):
        """Main loop of the collector"""

        # Gracefully exit on sigterm.
        signal.signal(signal.SIGTERM, self._handle_sigterm)

        # A SIGUSR1 signals an exit with an autorestart
        signal.signal(signal.SIGUSR1, self._handle_sigusr1)

        # Handle Keyboard Interrupt
        signal.signal(signal.SIGINT, self._handle_sigterm)

        # A SIGHUP signals a configuration reload
        signal.signal(signal.SIGHUP, self._handle_sighup)

        # Save the agent start-up stats.
        CollectorStatus().persist()

        # Intialize the collector.
        if not config:
            config = get_config(parse_args=True)

        self._agentConfig = self._set_agent_config_hostname(config)
        hostname = get_hostname(self._agentConfig)
        systemStats = get_system_stats(
            proc_path=self._agentConfig.get('procfs_path', '/proc').rstrip('/')
        )
        emitters = self._get_emitters()

        # Initialize service discovery
        if self._agentConfig.get('service_discovery'):
            self.sd_backend = get_sd_backend(self._agentConfig)

        if _is_affirmative(self._agentConfig.get('sd_jmx_enable')):
            pipe_path = get_jmx_pipe_path()
            if Platform.is_windows():
                pipe_name = pipe_path.format(pipename=SD_PIPE_NAME)
            else:
                pipe_name = os.path.join(pipe_path, SD_PIPE_NAME)

            if os.access(pipe_path, os.W_OK):
                if not os.path.exists(pipe_name):
                    os.mkfifo(pipe_name)
                self.sd_pipe = os.open(pipe_name, os.O_RDWR) # RW to avoid blocking (will only W)

                # Initialize Supervisor proxy
                self.supervisor_proxy = self._get_supervisor_socket(self._agentConfig)
            else:
                log.debug('Unable to create pipe in temporary directory. JMX service discovery disabled.')

        # Load the checks.d checks
        self._checksd = load_check_directory(self._agentConfig, hostname)

        # Load JMX configs if available
        if self._jmx_service_discovery_enabled:
            jmx_sd_configs = generate_jmx_configs(self._agentConfig, hostname)
            if jmx_sd_configs:
                self._submit_jmx_service_discovery(jmx_sd_configs)

        # Initialize the Collector
        self.collector = Collector(self._agentConfig, emitters, systemStats, hostname)

        # In developer mode, the number of runs to be included in a single collector profile
        try:
            self.collector_profile_interval = int(
                self._agentConfig.get('collector_profile_interval', DEFAULT_COLLECTOR_PROFILE_INTERVAL))
        except ValueError:
            log.warn('collector_profile_interval is invalid. '
                     'Using default value instead (%s).' % DEFAULT_COLLECTOR_PROFILE_INTERVAL)
            self.collector_profile_interval = DEFAULT_COLLECTOR_PROFILE_INTERVAL

        # Configure the watchdog.
        self.check_frequency = int(self._agentConfig['check_freq'])
        watchdog = self._get_watchdog(self.check_frequency)

        # Initialize the auto-restarter
        self.restart_interval = int(self._agentConfig.get('restart_interval', RESTART_INTERVAL))
        self.agent_start = time.time()

        self.allow_profiling = self._agentConfig.get('allow_profiling', True)

        profiled = False
        collector_profiled_runs = 0

        # Run the main loop.
        while self.run_forever:
            # Setup profiling if necessary
            if self.allow_profiling and self.in_developer_mode and not profiled:
                try:
                    profiler = AgentProfiler()
                    profiler.enable_profiling()
                    profiled = True
                except Exception as e:
                    log.warn("Cannot enable profiler: %s" % str(e))

            if self.reload_configs_flag:
                if isinstance(self.reload_configs_flag, set):
                    self.reload_configs(checks_to_reload=self.reload_configs_flag)
                else:
                    self.reload_configs()

            # Do the work. Pass `configs_reloaded` to let the collector know if it needs to
            # look for the AgentMetrics check and pop it out.
            self.collector.run(checksd=self._checksd,
                               start_event=self.start_event,
                               configs_reloaded=True if self.reload_configs_flag else False)

            self.reload_configs_flag = False

            # Look for change in the config template store.
            # The self.sd_backend.reload_check_configs flag is set
            # to True if a config reload is needed.
            if self._agentConfig.get('service_discovery') and self.sd_backend and \
               not self.sd_backend.reload_check_configs:
                try:
                    self.sd_backend.reload_check_configs = get_config_store(
                        self._agentConfig).crawl_config_template()
                except Exception as e:
                    log.warn('Something went wrong while looking for config template changes: %s' % str(e))

            # Check if we should run service discovery
            # The `reload_check_configs` flag can be set through the docker_daemon check or
            # using ConfigStore.crawl_config_template
            if self._agentConfig.get('service_discovery') and self.sd_backend and \
               self.sd_backend.reload_check_configs:
                self.reload_configs_flag = self.sd_backend.reload_check_configs
                self.sd_backend.reload_check_configs = False

            if profiled:
                if collector_profiled_runs >= self.collector_profile_interval:
                    try:
                        profiler.disable_profiling()
                        profiled = False
                        collector_profiled_runs = 0
                    except Exception as e:
                        log.warn("Cannot disable profiler: %s" % str(e))

            # Check if we should restart.
            if self.autorestart and self._should_restart():
                self._do_restart()

            # Only plan for next loop if we will continue, otherwise exit quickly.
            if self.run_forever:
                if watchdog:
                    watchdog.reset()
                if profiled:
                    collector_profiled_runs += 1
                log.debug("Sleeping for {0} seconds".format(self.check_frequency))
                time.sleep(self.check_frequency)

        # Now clean-up.
        try:
            CollectorStatus.remove_latest_status()
        except Exception:
            pass

        # Explicitly kill the process, because it might be running as a daemon.
        log.info("Exiting. Bye bye.")
        sys.exit(0)
def clear_singletons(agentConfig):
    get_config_store(agentConfig)._drop()
    get_sd_backend(agentConfig)._drop()
Exemple #46
0
    if checks_paths is None:
        log.error(
            'Check directory not found, exiting. The agent is likely misconfigured.'
        )
        sys.exit(3)

    try:
        confd_path = get_confd_path(osname)
    except PathNotFound, e:
        log.error("No conf.d folder found at '%s' or in the directory where "
                  "the Agent is currently deployed.\n" % e.args[0])
        sys.exit(3)

    if agentConfig.get('service_discovery') and agentConfig.get(
            'service_discovery_backend') in SD_BACKENDS:
        sd_backend = get_sd_backend(agentConfig=agentConfig)
        service_disco_configs = sd_backend.get_configs()
    else:
        service_disco_configs = {}

    # We don't support old style configs anymore
    # So we iterate over the files in the checks.d directory
    # If there is a matching configuration file in the conf.d directory
    # then we import the check
    for check in itertools.chain(*checks_paths):
        sd_init_config, sd_instances, skip_config_lookup = None, None, False
        check_name = os.path.basename(check).split('.')[0]
        check_config = None
        if check_name in initialized_checks or check_name in init_failed_checks:
            log.debug(
                'Skipping check %s because it has already been loaded from another location',
Exemple #47
0
 def _get_events(self):
     """Get the list of events."""
     events, should_reload_conf = self.docker_util.get_events()
     if should_reload_conf and self._service_discovery:
         get_sd_backend(self.agentConfig).reload_check_configs = True
     return events
Exemple #48
0
 def _get_events(self):
     """Get the list of events."""
     events, changed_container_ids = self.docker_util.get_events()
     if changed_container_ids and self._service_discovery:
         get_sd_backend(self.agentConfig).update_checks(changed_container_ids)
     return events
 def _get_events(self):
     """Get the list of events."""
     events, should_reload_conf = self.docker_util.get_events()
     if should_reload_conf and self._service_discovery:
         get_sd_backend(self.agentConfig).reload_check_configs = True
     return events
    def test_fill_tpl(self):
        """Test _fill_tpl with mocked docker client"""

        valid_configs = [
            # ((inspect, instance_tpl, variables, tags), (expected_instance_tpl, expected_var_values))
            (({}, {'host': 'localhost'}, [], None), ({'host': 'localhost'}, {})),
            (
                ({'NetworkSettings': {'IPAddress': ''}}, {'host': 'localhost'}, [], None),
                ({'host': 'localhost'}, {})
            ),
            (
                ({'NetworkSettings': {'Networks': {}}}, {'host': 'localhost'}, [], None),
                ({'host': 'localhost'}, {})
            ),
            (
                ({'NetworkSettings': {'Networks': {'bridge': {}}}}, {'host': 'localhost'}, [], None),
                ({'host': 'localhost'}, {})
            ),
            (
                ({'NetworkSettings': {'IPAddress': '127.0.0.1'}},
                 {'host': '%%host%%', 'port': 1337}, ['host'], ['foo', 'bar:baz']),
                ({'host': '%%host%%', 'port': 1337, 'tags': ['foo', 'bar:baz']}, {'host': '127.0.0.1'}),
            ),
            (
                ({'NetworkSettings': {'IPAddress': '127.0.0.1', 'Networks': {}}},
                 {'host': '%%host%%', 'port': 1337}, ['host'], ['foo', 'bar:baz']),
                ({'host': '%%host%%', 'port': 1337, 'tags': ['foo', 'bar:baz']}, {'host': '127.0.0.1'}),
            ),
            (
                ({'NetworkSettings': {
                    'IPAddress': '127.0.0.1',
                    'Networks': {'bridge': {'IPAddress': '172.17.0.2'}}}
                  },
                 {'host': '%%host%%', 'port': 1337}, ['host'], ['foo', 'bar:baz']),
                ({'host': '%%host%%', 'port': 1337, 'tags': ['foo', 'bar:baz']}, {'host': '127.0.0.1'}),
            ),
            (
                ({'NetworkSettings': {
                    'IPAddress': '',
                    'Networks': {
                        'bridge': {'IPAddress': '172.17.0.2'},
                        'foo': {'IPAddress': '192.168.0.2'}
                    }}
                  },
                 {'host': '%%host_bridge%%', 'port': 1337}, ['host_bridge'], ['foo', 'bar:baz']),
                ({'host': '%%host_bridge%%', 'port': 1337, 'tags': ['foo', 'bar:baz']},
                 {'host_bridge': '172.17.0.2'}),
            ),
            (
                ({'NetworkSettings': {
                    'IPAddress': '',
                    'Networks': {
                        'bridge': {'IPAddress': '172.17.0.2'},
                        'foo': {'IPAddress': '192.168.0.2'}
                    }}
                  },
                 {'host': '%%host_foo%%', 'port': 1337}, ['host_foo'], ['foo', 'bar:baz']),
                ({'host': '%%host_foo%%', 'port': 1337, 'tags': ['foo', 'bar:baz']},
                 {'host_foo': '192.168.0.2'}),
            ),
            (
                ({'NetworkSettings': {'IPAddress': '127.0.0.1', 'Ports': {'42/tcp': None, '22/tcp': None}}},
                 {'host': '%%host%%', 'port': '%%port_1%%', 'tags': ['env:test']},
                 ['host', 'port_1'], ['foo', 'bar:baz']),
                ({'host': '%%host%%', 'port': '%%port_1%%', 'tags': ['env:test', 'foo', 'bar:baz']},
                 {'host': '127.0.0.1', 'port_1': '42'})
            )
        ]

        # should not fail but return something specific
        edge_cases = [
            # ((inspect, instance_tpl, variables, tags), (expected_instance_tpl, expected_var_values))

            # specify bridge but there is also a default IPAddress (networks should be preferred)
            (
                ({'NetworkSettings': {
                    'IPAddress': '127.0.0.1',
                    'Networks': {'bridge': {'IPAddress': '172.17.0.2'}}}},
                 {'host': '%%host_bridge%%', 'port': 1337}, ['host_bridge'], ['foo', 'bar:baz']),
                ({'host': '%%host_bridge%%', 'port': 1337, 'tags': ['foo', 'bar:baz']},
                 {'host_bridge': '172.17.0.2'})
            ),
            # specify index but there is a default IPAddress (there's a specifier, even if it's wrong, walking networks should be preferred)
            (
                ({'NetworkSettings': {
                    'IPAddress': '127.0.0.1',
                    'Networks': {'bridge': {'IPAddress': '172.17.0.2'}}}},
                 {'host': '%%host_0%%', 'port': 1337}, ['host_0'], ['foo', 'bar:baz']),
                ({'host': '%%host_0%%', 'port': 1337, 'tags': ['foo', 'bar:baz']}, {'host_0': '172.17.0.2'}),
            ),
            # missing key for host, bridge network should be preferred
            (
                ({'NetworkSettings': {'Networks': {
                    'bridge': {'IPAddress': '127.0.0.1'},
                    'foo': {'IPAddress': '172.17.0.2'}}}},
                 {'host': '%%host_bar%%', 'port': 1337}, ['host_bar'], []),
                ({'host': '%%host_bar%%', 'port': 1337}, {'host_bar': '127.0.0.1'}),
            ),
            # missing index for port
            (
                ({'NetworkSettings': {'IPAddress': '127.0.0.1', 'Ports': {'42/tcp': None, '22/tcp': None}}},
                 {'host': '%%host%%', 'port': '%%port_2%%', 'tags': ['env:test']},
                 ['host', 'port_2'], ['foo', 'bar:baz']),
                ({'host': '%%host%%', 'port': '%%port_2%%', 'tags': ['env:test', 'foo', 'bar:baz']},
                 {'host': '127.0.0.1', 'port_2': '42'})
            )
        ]

        # should raise
        invalid_config = [
            # ((inspect, instance_tpl, variables, tags), expected_exception)

            # template variable but no IPAddress available
            (
                ({'NetworkSettings': {'Networks': {}}},
                 {'host': '%%host%%', 'port': 1337}, ['host'], ['foo', 'bar:baz']),
                Exception,
            ),
            # index but no IPAddress available
            (
                ({'NetworkSettings': {'Networks': {}}},
                 {'host': '%%host_0%%', 'port': 1337}, ['host_0'], ['foo', 'bar:baz']),
                Exception,
            ),
            # key but no IPAddress available
            (
                ({'NetworkSettings': {'Networks': {}}},
                 {'host': '%%host_foo%%', 'port': 1337}, ['host_foo'], ['foo', 'bar:baz']),
                Exception,
            ),

            # template variable but no port available
            (
                ({'NetworkSettings': {'Networks': {}}},
                 {'host': 'localhost', 'port': '%%port%%'}, ['port'], []),
                Exception,
            ),
            # index but no port available
            (
                ({'NetworkSettings': {'Networks': {}}},
                 {'host': 'localhost', 'port_0': '%%port%%'}, ['port_0'], []),
                Exception,
            ),
            # key but no port available
            (
                ({'NetworkSettings': {'Networks': {}}},
                 {'host': 'localhost', 'port': '%%port_foo%%'}, ['port_foo'], []),
                Exception,
            )
        ]

        with mock.patch('utils.dockerutil.DockerUtil.client', return_value=None):
            with mock.patch.object(EtcdStore, 'get_client', return_value=None):
                with mock.patch.object(ConsulStore, 'get_client', return_value=None):
                    for ac in self.agentConfigs:
                        sd_backend = get_sd_backend(agentConfig=ac)
                        try:
                            for co in valid_configs + edge_cases:
                                inspect, tpl, variables, tags = co[0]
                                instance_tpl, var_values = sd_backend._fill_tpl(inspect, tpl, variables, tags)
                                for key in instance_tpl.keys():
                                    if isinstance(instance_tpl[key], list):
                                        self.assertEquals(len(instance_tpl[key]), len(co[1][0].get(key)))
                                        for elem in instance_tpl[key]:
                                            self.assertTrue(elem in co[1][0].get(key))
                                    else:
                                        self.assertEquals(instance_tpl[key], co[1][0].get(key))
                                self.assertEquals(var_values, co[1][1])

                            for co in invalid_config:
                                inspect, tpl, variables, tags = co[0]
                                self.assertRaises(co[1], sd_backend._fill_tpl(inspect, tpl, variables, tags))

                            clear_singletons(ac)
                        except Exception:
                            clear_singletons(ac)
                            raise
Exemple #51
0
    def run(self, config=None):
        """Main loop of the collector"""

        # Gracefully exit on sigterm.
        signal.signal(signal.SIGTERM, self._handle_sigterm)

        # A SIGUSR1 signals an exit with an autorestart
        signal.signal(signal.SIGUSR1, self._handle_sigusr1)

        # Handle Keyboard Interrupt
        signal.signal(signal.SIGINT, self._handle_sigterm)

        # A SIGHUP signals a configuration reload
        signal.signal(signal.SIGHUP, self._handle_sighup)

        # Save the agent start-up stats.
        CollectorStatus().persist()

        # Intialize the collector.
        if not config:
            config = get_config(parse_args=True)

        self._agentConfig = self._set_agent_config_hostname(config)
        hostname = get_hostname(self._agentConfig)
        systemStats = get_system_stats()
        emitters = self._get_emitters()

        # Initialize service discovery
        if self._agentConfig.get("service_discovery"):
            self.sd_backend = get_sd_backend(self._agentConfig)

        # Load the checks.d checks
        self._checksd = load_check_directory(self._agentConfig, hostname)

        # Initialize the Collector
        self.collector = Collector(self._agentConfig, emitters, systemStats, hostname)

        # In developer mode, the number of runs to be included in a single collector profile
        self.collector_profile_interval = self._agentConfig.get(
            "collector_profile_interval", DEFAULT_COLLECTOR_PROFILE_INTERVAL
        )

        # Configure the watchdog.
        self.check_frequency = int(self._agentConfig["check_freq"])
        watchdog = self._get_watchdog(self.check_frequency)

        # Initialize the auto-restarter
        self.restart_interval = int(self._agentConfig.get("restart_interval", RESTART_INTERVAL))
        self.agent_start = time.time()

        profiled = False
        collector_profiled_runs = 0

        # Run the main loop.
        while self.run_forever:
            log.debug("Found {num_checks} checks".format(num_checks=len(self._checksd["initialized_checks"])))

            # Setup profiling if necessary
            if self.in_developer_mode and not profiled:
                try:
                    profiler = AgentProfiler()
                    profiler.enable_profiling()
                    profiled = True
                except Exception as e:
                    log.warn("Cannot enable profiler: %s" % str(e))

            # Do the work.
            self.collector.run(
                checksd=self._checksd, start_event=self.start_event, configs_reloaded=self.configs_reloaded
            )

            # This flag is used to know if the check configs have been reloaded at the current
            # run of the agent yet or not. It's used by the collector to know if it needs to
            # look for the AgentMetrics check and pop it out.
            # See: https://github.com/DataDog/dd-agent/blob/5.6.x/checks/collector.py#L265-L272
            self.configs_reloaded = False

            # Look for change in the config template store.
            # The self.sd_backend.reload_check_configs flag is set
            # to True if a config reload is needed.
            if (
                self._agentConfig.get("service_discovery")
                and self.sd_backend
                and not self.sd_backend.reload_check_configs
            ):
                try:
                    self.sd_backend.reload_check_configs = get_config_store(self._agentConfig).crawl_config_template()
                except Exception as e:
                    log.warn("Something went wrong while looking for config template changes: %s" % str(e))

            # Check if we should run service discovery
            # The `reload_check_configs` flag can be set through the docker_daemon check or
            # using ConfigStore.crawl_config_template
            if self._agentConfig.get("service_discovery") and self.sd_backend and self.sd_backend.reload_check_configs:
                self.reload_configs()
                self.configs_reloaded = True
                self.sd_backend.reload_check_configs = False

            if profiled:
                if collector_profiled_runs >= self.collector_profile_interval:
                    try:
                        profiler.disable_profiling()
                        profiled = False
                        collector_profiled_runs = 0
                    except Exception as e:
                        log.warn("Cannot disable profiler: %s" % str(e))

            # Check if we should restart.
            if self.autorestart and self._should_restart():
                self._do_restart()

            # Only plan for next loop if we will continue, otherwise exit quickly.
            if self.run_forever:
                if watchdog:
                    watchdog.reset()
                if profiled:
                    collector_profiled_runs += 1
                log.debug("Sleeping for {0} seconds".format(self.check_frequency))
                time.sleep(self.check_frequency)

        # Now clean-up.
        try:
            CollectorStatus.remove_latest_status()
        except Exception:
            pass

        # Explicitly kill the process, because it might be running as a daemon.
        log.info("Exiting. Bye bye.")
        sys.exit(0)
 def test_get_image_ident(self, *args):
     sd_backend = get_sd_backend(agentConfig=self.auto_conf_agentConfig)
     # normal cases
     for image, ident in self.image_formats.iteritems():
         self.assertEquals(ident, sd_backend.config_store._get_image_ident(image))
def clear_singletons(agentConfig):
    get_config_store(agentConfig)._drop()
    get_sd_backend(agentConfig)._drop()
Exemple #54
0
    def run(self, config=None):
        """Main loop of the collector"""

        # Gracefully exit on sigterm.
        signal.signal(signal.SIGTERM, self._handle_sigterm)

        if not Platform.is_windows():
            # A SIGUSR1 signals an exit with an autorestart
            signal.signal(signal.SIGUSR1, self._handle_sigusr1)

            # Handle Keyboard Interrupt
            signal.signal(signal.SIGINT, self._handle_sigterm)

            # A SIGHUP signals a configuration reload
            signal.signal(signal.SIGHUP, self._handle_sighup)

        # Save the agent start-up stats.
        CollectorStatus().persist()

        # Intialize the collector.
        if not config:
            config = get_config(parse_args=True)

        self._agentConfig = self._set_agent_config_hostname(config)
        hostname = get_hostname(self._agentConfig)
        systemStats = get_system_stats(proc_path=self._agentConfig.get(
            'procfs_path', '/proc').rstrip('/'))
        emitters = self._get_emitters()

        # Initialize service discovery
        if self._agentConfig.get('service_discovery'):
            self.sd_backend = get_sd_backend(self._agentConfig)

        if _is_affirmative(self._agentConfig.get('sd_jmx_enable', False)):
            pipe_path = get_jmx_pipe_path()
            if Platform.is_windows():
                pipe_name = pipe_path.format(pipename=SD_PIPE_NAME)
            else:
                pipe_name = os.path.join(pipe_path, SD_PIPE_NAME)

            if os.access(pipe_path, os.W_OK):
                if not os.path.exists(pipe_name):
                    os.mkfifo(pipe_name)
                self.sd_pipe = os.open(
                    pipe_name, os.O_RDWR)  # RW to avoid blocking (will only W)

                # Initialize Supervisor proxy
                self.supervisor_proxy = self._get_supervisor_socket(
                    self._agentConfig)
            else:
                log.debug(
                    'Unable to create pipe in temporary directory. JMX service discovery disabled.'
                )

        # Load the checks.d checks
        self._checksd = load_check_directory(self._agentConfig, hostname)

        # Load JMX configs if available
        if self._jmx_service_discovery_enabled:
            jmx_sd_configs = generate_jmx_configs(self._agentConfig, hostname)
            if jmx_sd_configs:
                self._submit_jmx_service_discovery(jmx_sd_configs)

        # Initialize the Collector
        self.collector = Collector(self._agentConfig, emitters, systemStats,
                                   hostname)

        # In developer mode, the number of runs to be included in a single collector profile
        try:
            self.collector_profile_interval = int(
                self._agentConfig.get('collector_profile_interval',
                                      DEFAULT_COLLECTOR_PROFILE_INTERVAL))
        except ValueError:
            log.warn('collector_profile_interval is invalid. '
                     'Using default value instead (%s).' %
                     DEFAULT_COLLECTOR_PROFILE_INTERVAL)
            self.collector_profile_interval = DEFAULT_COLLECTOR_PROFILE_INTERVAL

        # Configure the watchdog.
        self.check_frequency = int(self._agentConfig['check_freq'])
        watchdog = self._get_watchdog(self.check_frequency)

        # Initialize the auto-restarter
        self.restart_interval = int(
            self._agentConfig.get('restart_interval', RESTART_INTERVAL))
        self.agent_start = time.time()

        self.allow_profiling = self._agentConfig.get('allow_profiling', True)

        profiled = False
        collector_profiled_runs = 0

        # Run the main loop.
        while self.run_forever:
            # Setup profiling if necessary
            if self.allow_profiling and self.in_developer_mode and not profiled:
                try:
                    profiler = AgentProfiler()
                    profiler.enable_profiling()
                    profiled = True
                except Exception as e:
                    log.warn("Cannot enable profiler: %s" % str(e))

            if self.reload_configs_flag:
                if isinstance(self.reload_configs_flag, set):
                    self.reload_configs(
                        checks_to_reload=self.reload_configs_flag)
                else:
                    self.reload_configs()

            # Do the work. Pass `configs_reloaded` to let the collector know if it needs to
            # look for the AgentMetrics check and pop it out.
            self.collector.run(
                checksd=self._checksd,
                start_event=self.start_event,
                configs_reloaded=True if self.reload_configs_flag else False)

            self.reload_configs_flag = False

            # Look for change in the config template store.
            # The self.sd_backend.reload_check_configs flag is set
            # to True if a config reload is needed.
            if self._agentConfig.get('service_discovery') and self.sd_backend and \
               not self.sd_backend.reload_check_configs:
                try:
                    self.sd_backend.reload_check_configs = get_config_store(
                        self._agentConfig).crawl_config_template()
                except Exception as e:
                    log.warn(
                        'Something went wrong while looking for config template changes: %s'
                        % str(e))

            # Check if we should run service discovery
            # The `reload_check_configs` flag can be set through the docker_daemon check or
            # using ConfigStore.crawl_config_template
            if self._agentConfig.get('service_discovery') and self.sd_backend and \
               self.sd_backend.reload_check_configs:
                self.reload_configs_flag = self.sd_backend.reload_check_configs
                self.sd_backend.reload_check_configs = False

            if profiled:
                if collector_profiled_runs >= self.collector_profile_interval:
                    try:
                        profiler.disable_profiling()
                        profiled = False
                        collector_profiled_runs = 0
                    except Exception as e:
                        log.warn("Cannot disable profiler: %s" % str(e))

            # Check if we should restart.
            if self.autorestart and self._should_restart():
                self._do_restart()

            # Only plan for next loop if we will continue, otherwise exit quickly.
            if self.run_forever:
                if watchdog:
                    watchdog.reset()
                if profiled:
                    collector_profiled_runs += 1
                log.debug("Sleeping for {0} seconds".format(
                    self.check_frequency))
                time.sleep(self.check_frequency)

        # Now clean-up.
        try:
            CollectorStatus.remove_latest_status()
        except Exception:
            pass

        # Explicitly kill the process, because it might be running as a daemon.
        log.info("Exiting. Bye bye.")
        sys.exit(0)
    def test_get_host_address(self, mock_check_yaml, mock_get):
        kubernetes_config = {'instances': [{'kubelet_port': 1337}]}
        pod_list = {
            'items': [{
                'status': {
                    'podIP': '127.0.0.1',
                    'containerStatuses': [
                        {'containerID': 'docker://389dc8a4361f3d6c866e9e9a7b6972b26a31c589c4e2f097375d55656a070bc9'}
                    ]
                }
            }]
        }

        # (inspect, tpl_var, expected_result)
        ip_address_inspects = [
            ({'NetworkSettings': {}}, 'host', None),
            ({'NetworkSettings': {'IPAddress': ''}}, 'host', None),

            ({'NetworkSettings': {'IPAddress': '127.0.0.1'}}, 'host', '127.0.0.1'),
            ({'NetworkSettings': {'IPAddress': '127.0.0.1', 'Networks': {}}}, 'host', '127.0.0.1'),
            ({'NetworkSettings': {
                'IPAddress': '127.0.0.1',
                'Networks': {'bridge': {'IPAddress': '127.0.0.1'}}}},
             'host', '127.0.0.1'),
            ({'NetworkSettings': {
                'IPAddress': '',
                'Networks': {'bridge': {'IPAddress': '127.0.0.1'}}}},
             'host_bridge', '127.0.0.1'),
            ({'NetworkSettings': {
                'IPAddress': '127.0.0.1',
                'Networks': {
                    'bridge': {'IPAddress': '172.17.0.2'},
                    'foo': {'IPAddress': '192.168.0.2'}}}},
             'host', '172.17.0.2'),

            ({'NetworkSettings': {'Networks': {}}}, 'host', None),
            ({'NetworkSettings': {'Networks': {}}}, 'host_bridge', None),
            ({'NetworkSettings': {'Networks': {'bridge': {}}}}, 'host', None),
            ({'NetworkSettings': {'Networks': {'bridge': {}}}}, 'host_bridge', None),
            ({'NetworkSettings': {
                'Networks': {
                    'bridge': {'IPAddress': '172.17.0.2'}
                }}},
             'host_bridge', '172.17.0.2'),
            ({'NetworkSettings': {
                'Networks': {
                    'bridge': {'IPAddress': '172.17.0.2'},
                    'foo': {'IPAddress': '192.168.0.2'}
                }}},
             'host_foo', '192.168.0.2')
        ]

        mock_check_yaml.return_value = kubernetes_config
        mock_get.return_value = Response(pod_list)

        for c_ins, tpl_var, expected_ip in ip_address_inspects:
            with mock.patch.object(AbstractConfigStore, '__init__', return_value=None):
                with mock.patch('utils.dockerutil.DockerUtil.client', return_value=None):
                    with mock.patch('utils.kubeutil.get_conf_path', return_value=None):
                        sd_backend = get_sd_backend(agentConfig=self.auto_conf_agentConfig)
                        self.assertEquals(sd_backend._get_host_address(c_ins, tpl_var), expected_ip)
                        clear_singletons(self.auto_conf_agentConfig)