Пример #1
0
    def __init__(self, name, init_config, instances):
        super(AmazonMskCheck, self).__init__(
            name,
            init_config,
            instances,
            default_instances={'aws.msk': {'ssl_verify': False}},
            default_namespace='aws.msk',
        )
        self._region_name = None
        self._cluster_arn = None
        self._assume_role = None
        self._exporter_data = (
            (int(self.instance.get('jmx_exporter_port', 11001)), JMX_METRICS_MAP, JMX_METRICS_OVERRIDES),
            (int(self.instance.get('node_exporter_port', 11002)), NODE_METRICS_MAP, NODE_METRICS_OVERRIDES),
        )
        self._prometheus_metrics_path = self.instance.get('prometheus_metrics_path', '/metrics')
        proxies = self.instance.get('proxy', init_config.get('proxy', datadog_agent.get_config('proxy')))
        try:
            self._boto_config = construct_boto_config(self.instance.get('boto_config', {}), proxies=proxies)
        except TypeError as e:
            self.log.debug("Got error when constructing Config object: %s", str(e))
            self.log.debug("Boto Config parameters: %s", self.instance.get('boto_config'))
            self._boto_config = None

        instance = self.instance.copy()
        instance['prometheus_url'] = 'necessary for scraper creation'

        self._scraper_config = self.create_scraper_configuration(instance)
        self._endpoint_prefix = 'https' if self._scraper_config['ssl_verify'] else 'http'

        self.check_initializations.append(self.parse_config)
Пример #2
0
    def __init__(self, name, init_config, instances):
        super(SnmpCheck, self).__init__(name, init_config, instances)

        # Set OID batch size
        self.oid_batch_size = int(init_config.get('oid_batch_size', DEFAULT_OID_BATCH_SIZE))

        # Load Custom MIB directory
        self.mibs_path = init_config.get('mibs_folder')
        self.ignore_nonincreasing_oid = is_affirmative(init_config.get('ignore_nonincreasing_oid', False))
        self.profiles = init_config.get('profiles', {})
        self.profiles_by_oid = {}
        confd = get_config('confd_path')
        for profile, profile_data in self.profiles.items():
            filename = profile_data.get('definition_file')
            if filename:
                if not os.path.isabs(filename):
                    filename = os.path.join(confd, 'snmp.d', 'profiles', filename)
                try:
                    with open(filename) as f:
                        data = yaml.safe_load(f)
                except Exception:
                    raise ConfigurationError("Couldn't read profile '{}' in '{}'".format(profile, filename))
            else:
                data = profile_data['definition']
            self.profiles[profile] = {'definition': data}
            sys_object_oid = profile_data.get('sysobjectid')
            if sys_object_oid:
                self.profiles_by_oid[sys_object_oid] = profile

        self.instance['name'] = self._get_instance_key(self.instance)
        self._config = self._build_config(self.instance)
Пример #3
0
def traced_class(cls):
    if os.getenv('DDEV_TRACE_ENABLED', 'false') == 'true' or (
            datadog_agent is not None and is_affirmative(
                datadog_agent.get_config('integration_tracing'))):
        try:
            from ddtrace import patch_all, tracer

            patch_all()

            def decorate(cls):
                for attr in cls.__dict__:
                    attribute = getattr(cls, attr)
                    # Ignoring staticmethod and classmethod because they don't need cls in args
                    # also ignore nested classes
                    if (callable(attribute) and not inspect.isclass(attribute)
                            and
                            not isinstance(cls.__dict__[attr], staticmethod)
                            and not isinstance(cls.__dict__[attr], classmethod)
                            # Get rid of SnmpCheck._thread_factory and related
                            and getattr(attribute, '__module__',
                                        'threading') not in EXCLUDED_MODULES):
                        setattr(cls, attr, tracing_method(attribute, tracer))
                return cls

            return decorate(cls)
        except Exception:
            pass

    return cls
Пример #4
0
    def _get_record(kind, name, namespace):
        global client, config
        if client is config is None:
            from kubernetes import client, config  # noqa F401

        kubeconfig_path = datadog_agent.get_config(
            'kubernetes_kubeconfig_path')
        if kubeconfig_path:
            config.load_kube_config(config_file=kubeconfig_path)
        else:
            config.load_incluster_config()

        if kind.lower() == "auto":
            # Try lease object
            try:
                return KubeLeaderElectionMixin._get_record_from_lease(
                    client, name, namespace)
            except client.exceptions.ApiException:
                pass

            # Default to endpoints object
            return KubeLeaderElectionMixin._get_record_from_annotation(
                client, "endpoints", name, namespace)

        elif kind.lower() in ["leases", "lease"]:
            return KubeLeaderElectionMixin._get_record_from_lease(
                client, name, namespace)
        else:
            return KubeLeaderElectionMixin._get_record_from_annotation(
                client, kind, name, namespace)
Пример #5
0
def init_logging():
    # type: () -> None
    """
    Initialize logging (set up forwarding to Go backend and sane defaults)
    """
    # Forward to Go backend
    logging.addLevelName(TRACE_LEVEL, 'TRACE')
    logging.setLoggerClass(AgentLogger)
    logging.captureWarnings(
        True
    )  # Capture warnings as logs so it's easier for log parsers to handle them.

    rootLogger = logging.getLogger()
    rootLogger.addHandler(AgentLogHandler())
    rootLogger.setLevel(_get_py_loglevel(
        datadog_agent.get_config('log_level')))

    # We log instead of emit warnings for unintentionally insecure HTTPS requests
    warnings.simplefilter('ignore', InsecureRequestWarning)

    # `requests` (used in a lot of checks) imports `urllib3`, which logs a bunch of stuff at the info level
    # Therefore, pre emptively increase the default level of that logger to `WARN`
    urllib_logger = logging.getLogger("requests.packages.urllib3")
    urllib_logger.setLevel(logging.WARN)
    urllib_logger.propagate = True
Пример #6
0
    def _get_record(kind, name, namespace):
        kubeconfig_path = datadog_agent.get_config(
            'kubernetes_kubeconfig_path')
        if kubeconfig_path:
            config.load_kube_config(config_file=kubeconfig_path)
        else:
            config.load_incluster_config()
        v1 = client.CoreV1Api()

        obj = None
        if kind.lower() in ["endpoints", "endpoint", "ep"]:
            obj = v1.read_namespaced_endpoints(name, namespace)
        elif kind.lower() in ["configmap", "cm"]:
            obj = v1.read_namespaced_config_map(name, namespace)
        else:
            raise ValueError("Unknown kind {}".format(kind))

        if not obj:
            raise ValueError("Empty input object")

        try:
            annotations = obj.metadata.annotations
        except AttributeError:
            raise ValueError("Invalid input object type")

        for name in ELECTION_ANNOTATION_NAMES:
            if name in annotations:
                return ElectionRecord(annotations[name])

        # Could not find annotation
        raise ValueError("Object has no leader election annotation")
Пример #7
0
 def _is_dogstatsd_configured(self):
     """ Check if the agent has a consul dogstatsd profile configured """
     dogstatsd_mapper = datadog_agent.get_config('dogstatsd_mapper_profiles')
     if dogstatsd_mapper:
         for profile in dogstatsd_mapper:
             if profile.get('name') == 'consul':
                 return True
     return False
Пример #8
0
def compute_pod_expiration_datetime():
    try:
        seconds = int(get_config('kubernetes_pod_expiration_duration'))
        # expiration disabled
        if seconds == 0:
            return None
        return datetime.utcnow().replace(tzinfo=UTC) - timedelta(
            seconds=seconds)
    except (ValueError, TypeError):
        return None
Пример #9
0
    def traced_wrapper(self, *args, **kwargs):
        if datadog_agent is None:
            return fn(self, *args, **kwargs)

        trace_check = is_affirmative(self.init_config.get('trace_check'))
        integration_tracing = is_affirmative(datadog_agent.get_config('integration_tracing'))

        if integration_tracing and trace_check:
            with tracer.trace('integration.check', service='integrations-tracing', resource=self.name):
                return fn(self, *args, **kwargs)
        return fn(self, *args, **kwargs)
Пример #10
0
    def set_paths(self):
        proc_location = (datadog_agent.get_config('procfs_path') or '/proc').rstrip('/')

        self.proc_path_map = {
            "inode_info": "sys/fs/inode-nr",
            "stat_info": "stat",
            "entropy_info": "sys/kernel/random/entropy_avail",
            "interrupts_info": "interrupts",
        }

        for key, path in iteritems(self.proc_path_map):
            self.proc_path_map[key] = "{procfs}/{path}".format(procfs=proc_location, path=path)
Пример #11
0
    def __init__(self, name, init_config, instances):
        super(ProcessCheck, self).__init__(name, init_config, instances)

        self.name = self.instance.get('name', None)
        self.tags = self.instance.get('tags', [])
        self.exact_match = is_affirmative(
            self.instance.get('exact_match', True))
        self.search_string = self.instance.get('search_string', None)
        self.ignore_ad = is_affirmative(
            self.instance.get('ignore_denied_access', True))
        self.pid = self.instance.get('pid')
        self.pid_file = self.instance.get('pid_file')
        self.collect_children = is_affirmative(
            self.instance.get('collect_children', False))
        self.user = self.instance.get('user', False)
        self.try_sudo = self.instance.get('try_sudo', False)

        # ad stands for access denied
        # We cache the PIDs getting this error and don't iterate on them more often than `access_denied_cache_duration``
        # This cache is for all PIDs so it's global, but it should be refreshed by instance
        self.last_ad_cache_ts = {}
        self.ad_cache = set()
        self.access_denied_cache_duration = int(
            init_config.get('access_denied_cache_duration',
                            DEFAULT_AD_CACHE_DURATION))

        # By default cache the PID list for a while
        # Sometimes it's not wanted b/c it can mess with no-data monitoring
        # This cache is indexed per instance
        self.last_pid_cache_ts = {}
        self.pid_cache = {}
        self.pid_cache_duration = int(
            init_config.get('pid_cache_duration', DEFAULT_PID_CACHE_DURATION))

        self._conflicting_procfs = False
        self._deprecated_init_procfs = False
        if Platform.is_linux():
            procfs_path = init_config.get('procfs_path')
            if procfs_path:
                agent_procfs_path = datadog_agent.get_config('procfs_path')
                if agent_procfs_path and procfs_path != agent_procfs_path.rstrip(
                        '/'):
                    self._conflicting_procfs = True
                else:
                    self._deprecated_init_procfs = True
                    psutil.PROCFS_PATH = procfs_path

        # Process cache, indexed by instance
        self.process_cache = defaultdict(dict)

        self.process_list_cache.cache_duration = int(
            init_config.get('shared_process_list_cache_duration',
                            DEFAULT_SHARED_PROCESS_LIST_CACHE_DURATION))
Пример #12
0
def traced(wrapped, instance, args, kwargs):
    if datadog_agent is None:
        return wrapped(*args, **kwargs)

    trace_check = is_affirmative(instance.init_config.get('trace_check'))
    integration_tracing = is_affirmative(datadog_agent.get_config('integration_tracing'))

    if integration_tracing and trace_check:
        with tracer.trace('integration.check', service='integrations-tracing', resource=instance.name):
            return wrapped(*args, **kwargs)

    return wrapped(*args, **kwargs)
Пример #13
0
 def _compute_pod_expiration_datetime():
     """
     Looks up the agent's kubernetes_pod_expiration_duration option and returns either:
       - None if expiration is disabled (set to 0)
       - A (timezone aware) datetime object to compare against
     """
     try:
         seconds = int(get_config("kubernetes_pod_expiration_duration"))
         if seconds == 0:  # Expiration disabled
             return None
         return datetime.utcnow().replace(tzinfo=UTC) - timedelta(seconds=seconds)
     except (ValueError, TypeError):
         return None
Пример #14
0
def init_logging():
    """
    Initialize logging (set up forwarding to Go backend and sane defaults)
    """
    # Forward to Go backend
    rootLogger = logging.getLogger()
    rootLogger.addHandler(AgentLogHandler())
    rootLogger.setLevel(_get_py_loglevel(datadog_agent.get_config('log_level')))

    # `requests` (used in a lot of checks) imports `urllib3`, which logs a bunch of stuff at the info level
    # Therefore, pre-emptively increase the default level of that logger to `WARN`
    urllib_logger = logging.getLogger("requests.packages.urllib3")
    urllib_logger.setLevel(logging.WARN)
    urllib_logger.propagate = True
Пример #15
0
    def _get_requests_proxy(self):
        # TODO: Remove with Agent 5
        no_proxy_settings = {"http": None, "https": None, "no": []}

        # First we read the proxy configuration from datadog.conf
        proxies = self.agentConfig.get('proxy', datadog_agent.get_config('proxy'))
        if proxies:
            proxies = proxies.copy()

        # requests compliant dict
        if proxies and 'no_proxy' in proxies:
            proxies['no'] = proxies.pop('no_proxy')

        return proxies if proxies else no_proxy_settings
Пример #16
0
def init_logging():
    """
    Initialize logging (set up forwarding to Go backend and sane defaults)
    """
    # Forward to Go backend
    rootLogger = logging.getLogger()
    rootLogger.addHandler(AgentLogHandler())
    rootLogger.setLevel(_get_py_loglevel(datadog_agent.get_config('log_level')))

    # `requests` (used in a lot of checks) imports `urllib3`, which logs a bunch of stuff at the info level
    # Therefore, pre-emptively increase the default level of that logger to `WARN`
    urllib_logger = logging.getLogger("requests.packages.urllib3")
    urllib_logger.setLevel(logging.WARN)
    urllib_logger.propagate = True
Пример #17
0
def _load_event_endpoints_from_config(config_prefix, default_url):
    """
    Returns a list of requests sessions and their endpoint urls [(http, url), ...]
    Requests sessions are initialized the first time this is called and reused thereafter
    :return: list of (http, url)

    :param config_prefix:
    :param default_url:
    :return:
    """
    url = _event_intake_url(
        datadog_agent.get_config('{}.dd_url'.format(config_prefix))
        or default_url)
    endpoints = [(_new_api_session(datadog_agent.get_config('api_key')), url)]
    logger.debug("initializing event endpoints from %s. url=%s", config_prefix,
                 url)

    for additional_endpoint in datadog_agent.get_config(
            '{}.additional_endpoints'.format(config_prefix)) or []:
        api_key, host = additional_endpoint.get(
            'api_key'), additional_endpoint.get('host')
        missing_keys = [
            k for k, v in [('api_key', api_key), ('host', host)] if not v
        ]
        if missing_keys:
            logger.warning(
                "invalid event endpoint found in %s.additional_endpoints. missing required keys %s",
                config_prefix,
                ', '.join(missing_keys),
            )
            continue
        url = _event_intake_url(host)
        endpoints.append((_new_api_session(api_key), url))
        logger.debug("initializing additional event endpoint from %s. url=%s",
                     config_prefix, url)

    return endpoints
Пример #18
0
    def __init__(self, name, init_config, instances):
        super().__init__(name, init_config, instances)

        self._region_name = None
        self._exporter_data = None
        self._endpoint_prefix = None
        self._static_tags = None
        self._service_check_tags = None
        proxies = self.instance.get('proxy', init_config.get('proxy', datadog_agent.get_config('proxy')))
        try:
            self._boto_config = construct_boto_config(self.instance.get('boto_config', {}), proxies=proxies)
        except TypeError as e:
            self.log.debug("Got error when constructing Config object: %s", str(e))
            self.log.debug("Boto Config parameters: %s", self.instance.get('boto_config'))
            self._boto_config = None
        self.check_initializations.append(self.parse_config)
Пример #19
0
    def _get_requests_proxy(self):
        no_proxy_settings = {
            'http': None,
            'https': None,
            'no': [],
        }

        # First we read the proxy configuration from datadog.conf
        proxies = self.agentConfig.get('proxy', datadog_agent.get_config('proxy'))
        if proxies:
            proxies = proxies.copy()

        # requests compliant dict
        if proxies and 'no_proxy' in proxies:
            proxies['no'] = proxies.pop('no_proxy')

        return proxies if proxies else no_proxy_settings
Пример #20
0
def get_requests_proxy(agentConfig):
    no_proxy_settings = {
        "http": None,
        "https": None,
        "no": [],
    }

    config = {} if agentConfig is None else agentConfig

    # First we read the proxy configuration from datadog.conf
    proxies = config.get('proxy', datadog_agent.get_config('proxy'))
    if proxies:
        proxies = proxies.copy()

    # requests compliant dict
    if proxies and 'no_proxy' in proxies:
        proxies['no'] = proxies.pop('no_proxy')

    return proxies if proxies else no_proxy_settings
Пример #21
0
def get_requests_proxy(agentConfig):
    no_proxy_settings = {
        "http": None,
        "https": None,
        "no": [],
    }

    config = {} if agentConfig is None else agentConfig

    # First we read the proxy configuration from datadog.conf
    proxies = config.get('proxy', datadog_agent.get_config('proxy'))
    if proxies:
        proxies = proxies.copy()

    # requests compliant dict
    if proxies and 'no_proxy' in proxies:
        proxies['no'] = proxies.pop('no_proxy')

    return proxies if proxies else no_proxy_settings
Пример #22
0
    def traced_wrapper(self, *args, **kwargs):
        if datadog_agent is None:
            return fn(self, *args, **kwargs)

        trace_check = is_affirmative(self.init_config.get('trace_check'))
        integration_tracing = is_affirmative(
            datadog_agent.get_config('integration_tracing'))

        if integration_tracing and trace_check:
            try:
                from ddtrace import patch_all, tracer

                patch_all()
                with tracer.trace(fn.__name__,
                                  service='{}-integration'.format(self.name),
                                  resource=fn.__name__):
                    return fn(self, *args, **kwargs)
            except Exception:
                pass
        return fn(self, *args, **kwargs)
Пример #23
0
    def __init__(self, name, init_config, instances=None):
        super(ProcessCheck, self).__init__(name, init_config, instances)

        # ad stands for access denied
        # We cache the PIDs getting this error and don't iterate on them more often than `access_denied_cache_duration``
        # This cache is for all PIDs so it's global, but it should be refreshed by instance
        self.last_ad_cache_ts = {}
        self.ad_cache = set()
        self.access_denied_cache_duration = int(
            init_config.get('access_denied_cache_duration',
                            DEFAULT_AD_CACHE_DURATION))

        # By default cache the PID list for a while
        # Sometimes it's not wanted b/c it can mess with no-data monitoring
        # This cache is indexed per instance
        self.last_pid_cache_ts = {}
        self.pid_cache = {}
        self.pid_cache_duration = int(
            init_config.get('pid_cache_duration', DEFAULT_PID_CACHE_DURATION))

        self._conflicting_procfs = False
        self._deprecated_init_procfs = False
        if Platform.is_linux():
            procfs_path = init_config.get('procfs_path')
            if procfs_path:
                agent_procfs_path = datadog_agent.get_config('procfs_path')
                if agent_procfs_path and procfs_path != agent_procfs_path.rstrip(
                        '/'):
                    self._conflicting_procfs = True
                else:
                    self._deprecated_init_procfs = True
                    psutil.PROCFS_PATH = procfs_path

        # Process cache, indexed by instance
        self.process_cache = defaultdict(dict)

        self.process_list_cache.cache_duration = int(
            init_config.get('shared_process_list_cache_duration',
                            DEFAULT_SHARED_PROCESS_LIST_CACHE_DURATION))
Пример #24
0
    def __init__(self, instance, init_config, remapper=None, logger=None):
        self.logger = logger or LOGGER
        default_fields = dict(STANDARD_FIELDS)

        # Update the default behavior for global settings
        default_fields['log_requests'] = init_config.get(
            'log_requests', default_fields['log_requests'])
        default_fields['skip_proxy'] = init_config.get(
            'skip_proxy', default_fields['skip_proxy'])
        default_fields['timeout'] = init_config.get('timeout',
                                                    default_fields['timeout'])
        default_fields['tls_ignore_warning'] = init_config.get(
            'tls_ignore_warning', default_fields['tls_ignore_warning'])

        # Populate with the default values
        config = {
            field: instance.get(field, value)
            for field, value in iteritems(default_fields)
        }

        # Support non-standard (usually legacy) configurations, for example:
        # {
        #     'disable_ssl_validation': {
        #         'name': 'tls_verify',
        #         'default': False,
        #         'invert': True,
        #     },
        #     ...
        # }
        if remapper is None:
            remapper = {}

        remapper.update(DEFAULT_REMAPPED_FIELDS)

        for remapped_field, data in iteritems(remapper):
            field = data.get('name')

            # Ignore fields we don't recognize
            if field not in STANDARD_FIELDS:
                continue

            # Ignore remapped fields if the standard one is already used
            if field in instance:
                continue

            # Invert default booleans if need be
            default = default_fields[field]
            if data.get('invert'):
                default = not default

            # Get value, with a possible default
            value = instance.get(remapped_field, data.get('default', default))

            # Invert booleans if need be
            if data.get('invert'):
                value = not is_affirmative(value)

            config[field] = value

        # http://docs.python-requests.org/en/master/user/advanced/#timeouts
        connect_timeout = read_timeout = float(config['timeout'])
        if config['connect_timeout'] is not None:
            connect_timeout = float(config['connect_timeout'])

        if config['read_timeout'] is not None:
            read_timeout = float(config['read_timeout'])

        # http://docs.python-requests.org/en/master/user/quickstart/#custom-headers
        # http://docs.python-requests.org/en/master/user/advanced/#header-ordering
        headers = get_default_headers()
        if config['headers']:
            headers.clear()
            update_headers(headers, config['headers'])

        if config['extra_headers']:
            update_headers(headers, config['extra_headers'])

        # https://toolbelt.readthedocs.io/en/latest/adapters.html#hostheaderssladapter
        self.tls_use_host_header = is_affirmative(
            config['tls_use_host_header']) and 'Host' in headers

        # http://docs.python-requests.org/en/master/user/authentication/
        auth_type = config['auth_type'].lower()
        if auth_type not in AUTH_TYPES:
            self.logger.warning(
                'auth_type %s is not supported, defaulting to basic',
                auth_type)
            auth_type = 'basic'

        if auth_type == 'basic':
            if config['kerberos_auth']:
                self.logger.warning(
                    'The ability to use Kerberos auth without explicitly setting auth_type to '
                    '`kerberos` is deprecated and will be removed in Agent 8')
                auth_type = 'kerberos'
            elif config['ntlm_domain']:
                self.logger.warning(
                    'The ability to use NTLM auth without explicitly setting auth_type to '
                    '`ntlm` is deprecated and will be removed in Agent 8')
                auth_type = 'ntlm'

        auth = AUTH_TYPES[auth_type](config)

        # http://docs.python-requests.org/en/master/user/advanced/#ssl-cert-verification
        verify = True
        if isinstance(config['tls_ca_cert'], string_types):
            verify = config['tls_ca_cert']
        elif not is_affirmative(config['tls_verify']):
            verify = False

        # http://docs.python-requests.org/en/master/user/advanced/#client-side-certificates
        cert = None
        if isinstance(config['tls_cert'], string_types):
            if isinstance(config['tls_private_key'], string_types):
                cert = (config['tls_cert'], config['tls_private_key'])
            else:
                cert = config['tls_cert']

        # http://docs.python-requests.org/en/master/user/advanced/#proxies
        no_proxy_uris = None
        if is_affirmative(config['skip_proxy']):
            proxies = PROXY_SETTINGS_DISABLED.copy()
        else:
            # Order of precedence is:
            # 1. instance
            # 2. init_config
            # 3. agent config
            proxies = config['proxy'] or init_config.get('proxy')

            # TODO: Deprecate this flag now that we support skip_proxy in init_config
            if not proxies and is_affirmative(
                    init_config.get('use_agent_proxy', True)):
                proxies = datadog_agent.get_config('proxy')

            if proxies:
                proxies = proxies.copy()

                # TODO: Pass `no_proxy` directly to `requests` once this issue is fixed:
                # https://github.com/kennethreitz/requests/issues/5000
                if 'no_proxy' in proxies:
                    no_proxy_uris = proxies.pop('no_proxy')

                    if isinstance(no_proxy_uris, string_types):
                        no_proxy_uris = no_proxy_uris.replace(';',
                                                              ',').split(',')
            else:
                proxies = None

        # Default options
        self.options = {
            'auth': auth,
            'cert': cert,
            'headers': headers,
            'proxies': proxies,
            'timeout': (connect_timeout, read_timeout),
            'verify': verify,
        }

        # For manual parsing until `requests` properly handles `no_proxy`
        self.no_proxy_uris = no_proxy_uris

        # Ignore warnings for lack of SSL validation
        self.ignore_tls_warning = is_affirmative(config['tls_ignore_warning'])

        # For connection and cookie persistence, if desired. See:
        # https://en.wikipedia.org/wiki/HTTP_persistent_connection#Advantages
        # http://docs.python-requests.org/en/master/user/advanced/#session-objects
        # http://docs.python-requests.org/en/master/user/advanced/#keep-alive
        self.persist_connections = self.tls_use_host_header or is_affirmative(
            config['persist_connections'])
        self._session = None

        # Whether or not to log request information like method and url
        self.log_requests = is_affirmative(config['log_requests'])

        # Set up any auth token handlers
        if config['auth_token'] is not None:
            self.auth_token_handler = create_auth_token_handler(
                config['auth_token'])
        else:
            self.auth_token_handler = None

        # Context managers that should wrap all requests
        self.request_hooks = []

        if config['kerberos_keytab']:
            self.request_hooks.append(
                lambda: handle_kerberos_keytab(config['kerberos_keytab']))
        if config['kerberos_cache']:
            self.request_hooks.append(
                lambda: handle_kerberos_cache(config['kerberos_cache']))
Пример #25
0
    def __init__(self, instance, init_config, remapper=None, logger=None):
        self.logger = logger or LOGGER
        default_fields = dict(STANDARD_FIELDS)

        # Update the default behavior for global settings
        default_fields['log_requests'] = init_config.get(
            'log_requests', default_fields['log_requests'])
        default_fields['skip_proxy'] = init_config.get(
            'skip_proxy', default_fields['skip_proxy'])
        default_fields['timeout'] = init_config.get('timeout',
                                                    default_fields['timeout'])

        # Populate with the default values
        config = {
            field: instance.get(field, value)
            for field, value in iteritems(default_fields)
        }

        # Support non-standard (usually legacy) configurations, for example:
        # {
        #     'disable_ssl_validation': {
        #         'name': 'tls_verify',
        #         'default': False,
        #         'invert': True,
        #     },
        #     ...
        # }
        if remapper is None:
            remapper = {}

        remapper.update(DEFAULT_REMAPPED_FIELDS)

        for remapped_field, data in iteritems(remapper):
            field = data.get('name')

            # Ignore fields we don't recognize
            if field not in STANDARD_FIELDS:
                continue

            # Ignore remapped fields if the standard one is already used
            if field in instance:
                continue

            # Get value, with a possible default
            value = instance.get(remapped_field,
                                 data.get('default', default_fields[field]))

            # Invert booleans if need be
            if data.get('invert'):
                value = not is_affirmative(value)

            config[field] = value

        # http://docs.python-requests.org/en/master/user/advanced/#timeouts
        connect_timeout = read_timeout = float(config['timeout'])
        if config['connect_timeout'] is not None:
            connect_timeout = float(config['connect_timeout'])

        if config['read_timeout'] is not None:
            read_timeout = float(config['read_timeout'])

        # http://docs.python-requests.org/en/master/user/quickstart/#custom-headers
        # http://docs.python-requests.org/en/master/user/advanced/#header-ordering
        headers = get_default_headers()
        if config['headers']:
            headers.clear()
            update_headers(headers, config['headers'])

        if config['extra_headers']:
            update_headers(headers, config['extra_headers'])

        # http://docs.python-requests.org/en/master/user/authentication/
        auth = None
        if config['password']:
            if config['username']:
                auth = (config['username'], config['password'])
            elif config['ntlm_domain']:
                ensure_ntlm()

                auth = requests_ntlm.HttpNtlmAuth(config['ntlm_domain'],
                                                  config['password'])

        if auth is None and config['kerberos_auth']:
            ensure_kerberos()

            # For convenience
            if is_affirmative(config['kerberos_auth']):
                config['kerberos_auth'] = 'required'

            if config['kerberos_auth'] not in KERBEROS_STRATEGIES:
                raise ConfigurationError(
                    'Invalid Kerberos strategy `{}`, must be one of: {}'.
                    format(config['kerberos_auth'],
                           ' | '.join(KERBEROS_STRATEGIES)))

            auth = requests_kerberos.HTTPKerberosAuth(
                mutual_authentication=KERBEROS_STRATEGIES[
                    config['kerberos_auth']],
                delegate=is_affirmative(config['kerberos_delegate']),
                force_preemptive=is_affirmative(
                    config['kerberos_force_initiate']),
                hostname_override=config['kerberos_hostname'],
                principal=config['kerberos_principal'],
            )

        # http://docs.python-requests.org/en/master/user/advanced/#ssl-cert-verification
        verify = True
        if isinstance(config['tls_ca_cert'], string_types):
            verify = config['tls_ca_cert']
        elif not is_affirmative(config['tls_verify']):
            verify = False

        # http://docs.python-requests.org/en/master/user/advanced/#client-side-certificates
        cert = None
        if isinstance(config['tls_cert'], string_types):
            if isinstance(config['tls_private_key'], string_types):
                cert = (config['tls_cert'], config['tls_private_key'])
            else:
                cert = config['tls_cert']

        # http://docs.python-requests.org/en/master/user/advanced/#proxies
        no_proxy_uris = None
        if is_affirmative(config['skip_proxy']):
            proxies = PROXY_SETTINGS_DISABLED.copy()
        else:
            # Order of precedence is:
            # 1. instance
            # 2. init_config
            # 3. agent config
            proxies = config['proxy'] or init_config.get('proxy')

            # TODO: Deprecate this flag now that we support skip_proxy in init_config
            if not proxies and is_affirmative(
                    init_config.get('use_agent_proxy', True)):
                proxies = datadog_agent.get_config('proxy')

            if proxies:
                proxies = proxies.copy()

                # TODO: Pass `no_proxy` directly to `requests` once this issue is fixed:
                # https://github.com/kennethreitz/requests/issues/5000
                if 'no_proxy' in proxies:
                    no_proxy_uris = proxies.pop('no_proxy')

                    if isinstance(no_proxy_uris, string_types):
                        no_proxy_uris = no_proxy_uris.replace(';',
                                                              ',').split(',')
            else:
                proxies = None

        # Default options
        self.options = {
            'auth': auth,
            'cert': cert,
            'headers': headers,
            'proxies': proxies,
            'timeout': (connect_timeout, read_timeout),
            'verify': verify,
        }

        # For manual parsing until `requests` properly handles `no_proxy`
        self.no_proxy_uris = no_proxy_uris

        # Ignore warnings for lack of SSL validation
        self.ignore_tls_warning = is_affirmative(config['tls_ignore_warning'])

        # For connection and cookie persistence, if desired. See:
        # https://en.wikipedia.org/wiki/HTTP_persistent_connection#Advantages
        # http://docs.python-requests.org/en/master/user/advanced/#session-objects
        # http://docs.python-requests.org/en/master/user/advanced/#keep-alive
        self.persist_connections = is_affirmative(
            config['persist_connections'])
        self._session = None

        # Whether or not to log request information like method and url
        self.log_requests = is_affirmative(config['log_requests'])

        # Context managers that should wrap all requests
        self.request_hooks = [self.handle_tls_warning]

        if config['kerberos_keytab']:
            self.request_hooks.append(
                lambda: handle_kerberos_keytab(config['kerberos_keytab']))
Пример #26
0
 def test_get_config(self):
     self.assertEqual(datadog_agent.get_config('dd_url'), "https://test.datadoghq.com")
Пример #27
0
 def is_metadata_collection_enabled(self):
     return is_affirmative(
         datadog_agent.get_config('enable_metadata_collection'))
Пример #28
0
    def _check_linux(self, instance):
        """
        _check_linux can be run inside a container and still collects the network metrics from the host
        For that procfs_path can be set to something like "/host/proc"
        When a custom procfs_path is set, the collect_connection_state option is ignored
        """
        proc_location = datadog_agent.get_config('procfs_path')
        if not proc_location:
            proc_location = '/proc'
        proc_location = proc_location.rstrip('/')
        custom_tags = instance.get('tags', [])

        net_proc_base_location = self._get_net_proc_base_location(
            proc_location)

        if self._is_collect_cx_state_runnable(net_proc_base_location):
            try:
                self.log.debug("Using `ss` to collect connection state")
                # Try using `ss` for increased performance over `netstat`
                ss_env = {"PROC_ROOT": net_proc_base_location}

                # By providing the environment variables in ss_env, the PATH will be overriden. In CentOS,
                # datadog-agent PATH is "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin", while sh PATH
                # will be '/usr/local/bin:/usr/bin'. In CentOS, ss is located in /sbin and /usr/sbin, not
                # in the sh PATH, which will result in network metric collection failure.
                #
                # The line below will set sh PATH explicitly as the datadog-agent PATH to fix that issue.
                if "PATH" in os.environ:
                    ss_env["PATH"] = os.environ["PATH"]

                metrics = self._get_metrics()
                for ip_version in ['4', '6']:
                    # Call `ss` for each IP version because there's no built-in way of distinguishing
                    # between the IP versions in the output
                    # Also calls `ss` for each protocol, because on some systems (e.g. Ubuntu 14.04), there is a
                    # bug that print `tcp` even if it's `udp`
                    # The `-H` flag isn't available on old versions of `ss`.
                    cmd = "ss --numeric --tcp --all --ipv{} | cut -d ' ' -f 1 | sort | uniq -c".format(
                        ip_version)
                    output, _, _ = get_subprocess_output(["sh", "-c", cmd],
                                                         self.log,
                                                         env=ss_env)

                    # 7624 CLOSE-WAIT
                    #   72 ESTAB
                    #    9 LISTEN
                    #    1 State
                    #   37 TIME-WAIT
                    lines = output.splitlines()

                    self._parse_short_state_lines(lines,
                                                  metrics,
                                                  self.tcp_states['ss'],
                                                  ip_version=ip_version)

                    cmd = "ss --numeric --udp --all --ipv{} | wc -l".format(
                        ip_version)
                    output, _, _ = get_subprocess_output(["sh", "-c", cmd],
                                                         self.log,
                                                         env=ss_env)
                    metric = self.cx_state_gauge[('udp{}'.format(ip_version),
                                                  'connections')]
                    metrics[metric] = int(output) - 1  # Remove header

                    if self._collect_cx_queues:
                        cmd = "ss --numeric --tcp --all --ipv{}".format(
                            ip_version)
                        output, _, _ = get_subprocess_output(["sh", "-c", cmd],
                                                             self.log,
                                                             env=ss_env)
                        for (state, recvq,
                             sendq) in self._parse_queues("ss", output):
                            self.histogram('system.net.tcp.recv_q', recvq,
                                           custom_tags + ["state:" + state])
                            self.histogram('system.net.tcp.send_q', sendq,
                                           custom_tags + ["state:" + state])

                for metric, value in iteritems(metrics):
                    self.gauge(metric, value, tags=custom_tags)

            except OSError as e:
                self.log.info(
                    "`ss` invocation failed: %s. Using `netstat` as a fallback",
                    str(e))
                output, _, _ = get_subprocess_output(
                    ["netstat", "-n", "-u", "-t", "-a"], self.log)
                lines = output.splitlines()
                # Active Internet connections (w/o servers)
                # Proto Recv-Q Send-Q Local Address           Foreign Address         State
                # tcp        0      0 46.105.75.4:80          79.220.227.193:2032     SYN_RECV
                # tcp        0      0 46.105.75.4:143         90.56.111.177:56867     ESTABLISHED
                # tcp        0      0 46.105.75.4:50468       107.20.207.175:443      TIME_WAIT
                # tcp6       0      0 46.105.75.4:80          93.15.237.188:58038     FIN_WAIT2
                # tcp6       0      0 46.105.75.4:80          79.220.227.193:2029     ESTABLISHED
                # udp        0      0 0.0.0.0:123             0.0.0.0:*
                # udp6       0      0 :::41458                :::*

                metrics = self._parse_linux_cx_state(
                    lines[2:], self.tcp_states['netstat'], 5)
                for metric, value in iteritems(metrics):
                    self.gauge(metric, value, tags=custom_tags)

                if self._collect_cx_queues:
                    for (state, recvq,
                         sendq) in self._parse_queues("netstat", output):
                        self.histogram('system.net.tcp.recv_q', recvq,
                                       custom_tags + ["state:" + state])
                        self.histogram('system.net.tcp.send_q', sendq,
                                       custom_tags + ["state:" + state])

            except SubprocessOutputEmptyError:
                self.log.exception("Error collecting connection states.")

        proc_dev_path = "{}/net/dev".format(net_proc_base_location)
        try:
            with open(proc_dev_path, 'r') as proc:
                lines = proc.readlines()
        except IOError:
            # On Openshift, /proc/net/snmp is only readable by root
            self.log.debug("Unable to read %s.", proc_dev_path)
            lines = []

        # Inter-|   Receive                                                 |  Transmit
        #  face |bytes     packets errs drop fifo frame compressed multicast|bytes       packets errs drop fifo colls carrier compressed # noqa: E501
        #     lo:45890956   112797   0    0    0     0          0         0    45890956   112797    0    0    0     0       0          0 # noqa: E501
        #   eth0:631947052 1042233   0   19    0   184          0      1206  1208625538  1320529    0    0    0     0       0          0 # noqa: E501
        #   eth1:       0        0   0    0    0     0          0         0           0        0    0    0    0     0       0          0 # noqa: E501
        for line in lines[2:]:
            cols = line.split(':', 1)
            x = cols[1].split()
            # Filter inactive interfaces
            if self._parse_value(x[0]) or self._parse_value(x[8]):
                iface = cols[0].strip()
                metrics = {
                    'bytes_rcvd':
                    self._parse_value(x[0]),
                    'bytes_sent':
                    self._parse_value(x[8]),
                    'packets_in.count':
                    self._parse_value(x[1]),
                    'packets_in.error':
                    self._parse_value(x[2]) + self._parse_value(x[3]),
                    'packets_out.count':
                    self._parse_value(x[9]),
                    'packets_out.error':
                    self._parse_value(x[10]) + self._parse_value(x[11]),
                }
                self._submit_devicemetrics(iface, metrics, custom_tags)

                # read ENA metrics, if configured and available
                if self._collect_ena_metrics:
                    ena_metrics = self._collect_ena(iface)
                    if ena_metrics:
                        self._submit_ena_metrics(iface, ena_metrics,
                                                 custom_tags)

        netstat_data = {}
        for f in ['netstat', 'snmp']:
            proc_data_path = "{}/net/{}".format(net_proc_base_location, f)
            try:
                with open(proc_data_path, 'r') as netstat:
                    while True:
                        n_header = netstat.readline()
                        if not n_header:
                            break  # No more? Abort!
                        n_data = netstat.readline()

                        h_parts = n_header.strip().split(' ')
                        h_values = n_data.strip().split(' ')
                        ns_category = h_parts[0][:-1]
                        netstat_data[ns_category] = {}
                        # Turn the data into a dictionary
                        for idx, hpart in enumerate(h_parts[1:]):
                            netstat_data[ns_category][hpart] = h_values[idx +
                                                                        1]
            except IOError:
                # On Openshift, /proc/net/snmp is only readable by root
                self.log.debug("Unable to read %s.", proc_data_path)

        nstat_metrics_names = {
            'Tcp': {
                'RetransSegs': 'system.net.tcp.retrans_segs',
                'InSegs': 'system.net.tcp.in_segs',
                'OutSegs': 'system.net.tcp.out_segs',
            },
            'TcpExt': {
                'ListenOverflows': 'system.net.tcp.listen_overflows',
                'ListenDrops': 'system.net.tcp.listen_drops',
                'TCPBacklogDrop': 'system.net.tcp.backlog_drops',
                'TCPRetransFail': 'system.net.tcp.failed_retransmits',
            },
            'Udp': {
                'InDatagrams': 'system.net.udp.in_datagrams',
                'NoPorts': 'system.net.udp.no_ports',
                'InErrors': 'system.net.udp.in_errors',
                'OutDatagrams': 'system.net.udp.out_datagrams',
                'RcvbufErrors': 'system.net.udp.rcv_buf_errors',
                'SndbufErrors': 'system.net.udp.snd_buf_errors',
                'InCsumErrors': 'system.net.udp.in_csum_errors',
            },
        }

        # Skip the first line, as it's junk
        for k in nstat_metrics_names:
            for met in nstat_metrics_names[k]:
                if met in netstat_data.get(k, {}):
                    self._submit_netmetric(nstat_metrics_names[k][met],
                                           self._parse_value(
                                               netstat_data[k][met]),
                                           tags=custom_tags)

        # Get the conntrack -S information
        conntrack_path = instance.get('conntrack_path')
        use_sudo_conntrack = is_affirmative(
            instance.get('use_sudo_conntrack', True))
        if conntrack_path is not None:
            self._add_conntrack_stats_metrics(conntrack_path,
                                              use_sudo_conntrack, custom_tags)

        # Get the rest of the metric by reading the files. Metrics available since kernel 3.6
        conntrack_files_location = os.path.join(proc_location, 'sys', 'net',
                                                'netfilter')
        # By default, only max and count are reported. However if the blacklist is set,
        # the whitelist is loosing its default value
        blacklisted_files = instance.get('blacklist_conntrack_metrics')
        whitelisted_files = instance.get('whitelist_conntrack_metrics')
        if blacklisted_files is None and whitelisted_files is None:
            whitelisted_files = ['max', 'count']

        available_files = []

        # Get the metrics to read
        try:
            for metric_file in os.listdir(conntrack_files_location):
                if (os.path.isfile(
                        os.path.join(conntrack_files_location, metric_file))
                        and 'nf_conntrack_' in metric_file):
                    available_files.append(metric_file[len('nf_conntrack_'):])
        except Exception as e:
            self.log.debug("Unable to list the files in %s. %s",
                           conntrack_files_location, e)

        filtered_available_files = pattern_filter(available_files,
                                                  whitelist=whitelisted_files,
                                                  blacklist=blacklisted_files)

        for metric_name in filtered_available_files:
            metric_file_location = os.path.join(
                conntrack_files_location,
                'nf_conntrack_{}'.format(metric_name))
            try:
                with open(metric_file_location, 'r') as conntrack_file:
                    # Checking it's an integer
                    try:
                        value = int(conntrack_file.read().rstrip())
                        self.gauge(
                            'system.net.conntrack.{}'.format(metric_name),
                            value,
                            tags=custom_tags)
                    except ValueError:
                        self.log.debug("%s is not an integer", metric_name)
            except IOError as e:
                self.log.debug("Unable to read %s, skipping %s.",
                               metric_file_location, e)
Пример #29
0
 def is_metadata_collection_enabled():
     # type: () -> bool
     return is_affirmative(
         datadog_agent.get_config('enable_metadata_collection'))
Пример #30
0
except ImportError:
    from ..stubs import datadog_agent
    from ..stubs.log import CheckLoggingAdapter, init_logging

    init_logging()

try:
    import aggregator

    using_stub_aggregator = False
except ImportError:
    from ..stubs import aggregator

    using_stub_aggregator = True

if datadog_agent.get_config('disable_unsafe_yaml'):
    from ..ddyaml import monkey_patch_pyyaml

    monkey_patch_pyyaml()

# Metric types for which it's only useful to submit once per set of tags
ONE_PER_CONTEXT_METRIC_TYPES = [
    aggregator.GAUGE, aggregator.RATE, aggregator.MONOTONIC_COUNT
]


class AgentCheck(object):
    """The base class for any Agent based integrations.

    :cvar DEFAULT_METRIC_LIMIT: allows to set a limit on the number of metric name and tags combination
        this check can send per run. This is useful for checks that have an unbounded
Пример #31
0
# (C) Datadog, Inc. 2018
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)

try:
    import datadog_agent

    if datadog_agent.get_config('integration_tracing'):
        from ddtrace import patch

        patch(requests=True)
except ImportError:
    # Tracing Integrations is only available with Agent 6
    pass
Пример #32
0
    def __init__(self, instance, init_config, remapper=None):
        default_fields = dict(STANDARD_FIELDS)

        # Update the default behavior for skipping proxies
        default_fields['skip_proxy'] = init_config.get(
            'skip_proxy', default_fields['skip_proxy'])

        # Populate with the default values
        config = {
            field: instance.get(field, value)
            for field, value in iteritems(default_fields)
        }

        # Support non-standard (usually legacy) configurations, for example:
        # {
        #     'disable_ssl_validation': {
        #         'name': 'tls_verify',
        #         'default': False,
        #         'invert': True,
        #     },
        #     ...
        # }
        if remapper is None:
            remapper = {}

        remapper.update(DEFAULT_REMAPPED_FIELDS)

        for remapped_field, data in iteritems(remapper):
            field = data.get('name')

            # Ignore fields we don't recognize
            if field not in STANDARD_FIELDS:
                continue

            # Ignore remapped fields if the standard one is already used
            if field in instance:
                continue

            # Get value, with a possible default
            value = instance.get(remapped_field,
                                 data.get('default', default_fields[field]))

            # Invert booleans if need be
            if data.get('invert'):
                value = not is_affirmative(value)

            config[field] = value

        # http://docs.python-requests.org/en/master/user/advanced/#timeouts
        timeout = float(config['timeout'])

        # http://docs.python-requests.org/en/master/user/quickstart/#custom-headers
        # http://docs.python-requests.org/en/master/user/advanced/#header-ordering
        headers = None
        if config['headers']:
            headers = OrderedDict(
                (key, str(value))
                for key, value in iteritems(config['headers']))

        # http://docs.python-requests.org/en/master/user/authentication/
        auth = None
        if config['username'] and config['password']:
            auth = (config['username'], config['password'])

        # http://docs.python-requests.org/en/master/user/advanced/#ssl-cert-verification
        verify = True
        if isinstance(config['tls_ca_cert'], string_types):
            verify = config['tls_ca_cert']
        elif not is_affirmative(config['tls_verify']):
            verify = False

        # http://docs.python-requests.org/en/master/user/advanced/#client-side-certificates
        cert = None
        if isinstance(config['tls_cert'], string_types):
            if isinstance(config['tls_private_key'], string_types):
                cert = (config['tls_cert'], config['tls_private_key'])
            else:
                cert = config['tls_cert']

        # http://docs.python-requests.org/en/master/user/advanced/#proxies
        no_proxy_uris = None
        if is_affirmative(config['skip_proxy']):
            proxies = PROXY_SETTINGS_DISABLED.copy()
        else:
            # Order of precedence is:
            # 1. instance
            # 2. init_config
            # 3. agent config
            proxies = config['proxy'] or init_config.get('proxy')

            # TODO: Deprecate this flag now that we support skip_proxy in init_config
            if not proxies and is_affirmative(
                    init_config.get('use_agent_proxy', True)):
                proxies = datadog_agent.get_config('proxy')

            if proxies:
                proxies = proxies.copy()

                # TODO: Pass `no_proxy` directly to `requests` once this issue is fixed:
                # https://github.com/kennethreitz/requests/issues/5000
                if 'no_proxy' in proxies:
                    no_proxy_uris = proxies.pop('no_proxy')

                    if isinstance(no_proxy_uris, string_types):
                        no_proxy_uris = no_proxy_uris.replace(';',
                                                              ',').split(',')
            else:
                proxies = None

        # Default options
        self.options = {
            'auth': auth,
            'cert': cert,
            'headers': headers,
            'proxies': proxies,
            'timeout': timeout,
            'verify': verify,
        }

        # For manual parsing until `requests` properly handles `no_proxy`
        self.no_proxy_uris = no_proxy_uris

        # Ignore warnings for lack of SSL validation
        self.ignore_tls_warning = is_affirmative(config['tls_ignore_warning'])

        # For connection and cookie persistence, if desired. See:
        # https://en.wikipedia.org/wiki/HTTP_persistent_connection#Advantages
        # http://docs.python-requests.org/en/master/user/advanced/#session-objects
        # http://docs.python-requests.org/en/master/user/advanced/#keep-alive
        self.persist_connections = is_affirmative(
            config['persist_connections'])
        self._session = None
Пример #33
0
 def test_get_config(self):
     self.assertEqual(datadog_agent.get_config('dd_url'),
                      "https://test.datadoghq.com")