def test_default_cert_path(monkeypatch, system_certs_available):
    if system_certs_available:
        cert_file = "foo"
    else:
        cert_file = None

    class DefaultVerifyPaths(object):
        cafile = cert_file

        def __init__(self, *args, **kwargs):
            pass

    monkeypatch.setattr(ssl, "DefaultVerifyPaths", DefaultVerifyPaths)
    internal_metrics = CustomMetrics()
    with InternalTraceContext(internal_metrics):
        client = HttpClient("localhost", ca_bundle_path=None)

    internal_metrics = dict(internal_metrics.metrics())
    cert_metric = "Supportability/Python/Certificate/BundleRequired"
    if system_certs_available:
        assert "ca_certs" not in client._connection_kwargs
        assert cert_metric not in internal_metrics
    else:
        assert client._connection_kwargs["ca_certs"] == certs.where()
        assert internal_metrics[cert_metric][-3:-1] == [1, 1]
def api_request_kwargs():
    settings = global_settings()
    api_key = settings.api_key or "NO API KEY WAS SET IN AGENT CONFIGURATION"

    proxy_scheme = settings.proxy_scheme
    proxy_host = settings.proxy_host
    proxy_port = settings.proxy_port
    proxy_user = settings.proxy_user
    proxy_pass = settings.proxy_pass

    if proxy_scheme is None:
        proxy_scheme = "https"

    timeout = settings.agent_limits.data_collector_timeout

    proxies = proxy_details(proxy_scheme, proxy_host, proxy_port, proxy_user,
                            proxy_pass)

    cert_loc = settings.ca_bundle_path
    if cert_loc is None:
        cert_loc = certs.where()

    if settings.debug.disable_certificate_validation:
        cert_loc = False

    headers = {"X-Api-Key": api_key}

    return {
        'proxies': proxies,
        'headers': headers,
        'timeout': timeout,
        'verify': cert_loc,
    }
def test_ca_bundle_path(monkeypatch, ca_bundle_path):
    # Pretend CA certificates are not available
    class DefaultVerifyPaths(object):
        cafile = None

        def __init__(self, *args, **kwargs):
            pass

    monkeypatch.setattr(ssl, "DefaultVerifyPaths", DefaultVerifyPaths)

    settings = finalize_application_settings({"ca_bundle_path": ca_bundle_path})
    protocol = AgentProtocol(settings)
    expected = ca_bundle_path or certs.where()
    assert protocol.client._connection_kwargs["ca_certs"] == expected
Beispiel #4
0
def local_config(args):
    import sys

    if len(args) < 2:
        usage('record-deploy')
        sys.exit(1)

    def _args(config_file, description, revision=None, changelog=None,
            user=None, *args):
        return config_file, description, revision, changelog, user

    config_file, description, revision, changelog, user = _args(*args)

    settings = global_settings()

    settings.monitor_mode = False

    initialize(config_file)

    app_name = settings.app_name

    api_key = settings.api_key or 'NO API KEY WAS SET IN AGENT CONFIGURATION'

    host = settings.host

    if host == 'collector.newrelic.com':
        host = 'api.newrelic.com'
    elif host == 'staging-collector.newrelic.com':
        host = 'staging-api.newrelic.com'

    port = settings.port
    ssl = settings.ssl

    url = '%s://%s/deployments.xml'

    scheme = ssl and 'https' or 'http'
    server = port and '%s:%d' % (host, port) or host

    url = url % (scheme, server)

    proxy_host = settings.proxy_host
    proxy_port = settings.proxy_port
    proxy_user = settings.proxy_user
    proxy_pass = settings.proxy_pass

    timeout = settings.agent_limits.data_collector_timeout

    proxies = proxy_details(None, proxy_host, proxy_port, proxy_user,
            proxy_pass)

    if user is None:
        user = pwd.getpwuid(os.getuid()).pw_gecos

    data = {}

    data['deployment[app_name]'] = app_name

    if description is not None:
        data['deployment[description]'] = description
    if revision is not None:
        data['deployment[revision]'] = revision
    if changelog is not None:
        data['deployment[changelog]'] = changelog
    if user is not None:
        data['deployment[user]'] = user

    headers = {}

    headers['X-API-Key'] = api_key

    cert_loc = certs.where()

    r = requests.post(url, proxies=proxies, headers=headers,
            timeout=timeout, data=data, verify=cert_loc)

    if r.status_code != 201:
        raise RuntimeError('An unexpected HTTP response of %r was received '
                'for request made to %r. The API key for the request was '
                '%r. The payload for the request was %r. If this issue '
                'persists then please report this problem to New Relic '
                'support for further investigation.' % (r.status_code,
                url, api_key, data))
Beispiel #5
0
def send_request(session, url, method, license_key, agent_run_id=None,
            payload=()):
    """Constructs and sends a request to the data collector."""

    params = {}
    headers = {}

    settings = global_settings()

    start = time.time()

    # Validate that the license key was actually set and if not replace
    # it with a string which makes it more obvious it was not set.

    if not license_key:
        license_key = 'NO LICENSE KEY WAS SET IN AGENT CONFIGURATION'

    # The agent formats requests and is able to handle responses for
    # protocol version 14.

    params['method'] = method
    params['license_key'] = license_key
    params['protocol_version'] = '14'
    params['marshal_format'] = 'json'

    if agent_run_id:
        params['run_id'] = str(agent_run_id)

    headers['User-Agent'] = USER_AGENT
    headers['Content-Encoding'] = 'identity'

    # Set up definitions for proxy server in case that has been set.

    proxies = proxy_server()

    # At this time we use JSON content encoding for the data being sent.
    # If an error does occur when encoding the JSON, then it isn't
    # likely going to work later on in a subsequent request with same
    # data, even if aggregated with other data, so we need to log the
    # details and then flag that data should be thrown away. Don't mind
    # being noisy in the the log in this situation as it would indicate
    # a problem with the implementation of the agent.

    try:
        data = json_encode(payload)

    except Exception:
        _logger.exception('Error encoding data for JSON payload for '
                'method %r with payload of %r. Please report this problem '
                'to New Relic support.', method, payload)

        raise DiscardDataForRequest(str(sys.exc_info()[1]))

    # Log details of call and/or payload for debugging. Use the JSON
    # encoded value so know that what is encoded is correct.

    if settings.debug.log_data_collector_payloads:
        _logger.debug('Calling data collector with url=%r, method=%r and '
                'payload=%r.', url, method, data)
    elif settings.debug.log_data_collector_calls:
        _logger.debug('Calling data collector with url=%r and method=%r.',
                url, method)

    # Compress the serialized JSON being sent as content if over 64KiB
    # in size and not in message types that further compression is
    # excluded.

    threshold = settings.agent_limits.data_compression_threshold

    if method not in _deflate_exclude_list and len(data) > threshold:
        headers['Content-Encoding'] = 'deflate'

        level = settings.agent_limits.data_compression_level
        level = level or zlib.Z_DEFAULT_COMPRESSION
        data = zlib.compress(six.b(data), level)

    # If there is no requests session object provided for making
    # requests create one now. We want to close this as soon as we
    # are done with it.

    auto_close_session = False

    if not session:
        session = requests.session()
        auto_close_session = True

    # The 'requests' library can raise a number of exception derived
    # from 'RequestException' before we even manage to get a connection
    # to the data collector.
    #
    # The data collector can the generate a number of different types of
    # HTTP errors for requests. These are:
    #
    # 400 Bad Request - For incorrect method type or incorrectly
    # construct parameters. We should not get this and if we do it would
    # likely indicate a problem with the implementation of the agent.
    #
    # 413 Request Entity Too Large - Where the request content was too
    # large. The limits on number of nodes in slow transaction traces
    # should in general prevent this, but not everything has size limits
    # and so rogue data could still blow things out. Same data is not
    # going to work later on in a subsequent request, even if aggregated
    # with other data, so we need to log the details and then flag that
    # data should be thrown away.
    #
    # 415 Unsupported Media Type - This occurs when the JSON which was
    # sent can't be decoded by the data collector. If this is a true
    # problem with the JSON formatting, then sending again, even if
    # aggregated with other data, may not work, so we need to log the
    # details and then flag that data should be thrown away.
    #
    # 503 Service Unavailable - This occurs when data collector, or core
    # application is being restarted and not in state to be able to
    # accept requests. It should be a transient issue so should be able
    # to retain data and try again.

    # If audit logging is enabled, log the requests details.

    log_id = _log_request(url, params, headers, data)

    connection = connection_type(proxies)

    try:
        # The timeout value in the requests module is only on
        # the initial connection and doesn't apply to how long
        # it takes to get back a response.

        cert_loc = certs.where()

        if settings.debug.disable_certificate_validation:
            cert_loc = False

        timeout = settings.agent_limits.data_collector_timeout

        with warnings.catch_warnings():
            warnings.simplefilter("ignore")

            r = session.post(url, params=params, headers=headers,
                    proxies=proxies, timeout=timeout, data=data,
                    verify=cert_loc)

        # Read the content now so we can force close the socket
        # connection if this is a transient session as quickly
        # as possible.

        content = r.content

    except requests.RequestException:
        exc_type, message = sys.exc_info()[:2]

        internal_metric('Supportability/Python/Collector/Failures', 1)
        internal_metric('Supportability/Python/Collector/Failures/'
                '%s' % connection, 1)

        internal_metric('Supportability/Python/Collector/Exception/'
                '%s' % callable_name(exc_type), 1)

        if not settings.proxy_host or not settings.proxy_port:
            _logger.warning('Data collector is not contactable. This can be '
                    'because of a network issue or because of the data '
                    'collector being restarted. In the event that contact '
                    'cannot be made after a period of time then please '
                    'report this problem to New Relic support for further '
                    'investigation. The error raised was %r.', message)

        else:
            _logger.warning('Data collector is not contactable via the proxy '
                    'host %r on port %r with proxy user of %r. This can be '
                    'because of a network issue or because of the data '
                    'collector being restarted. In the event that contact '
                    'cannot be made after a period of time then please '
                    'report this problem to New Relic support for further '
                    'investigation. The error raised was %r.',
                    settings.proxy_host, settings.proxy_port,
                    settings.proxy_user, message)

        raise RetryDataForRequest(str(message))

    except Exception:
        # Any unexpected exception will be caught by higher layer, but
        # still attempt to log a metric here just in case agent run
        # doesn't get shutdown as a result of the exception.

        exc_type = sys.exc_info()[0]

        internal_metric('Supportability/Python/Collector/Failures', 1)
        internal_metric('Supportability/Python/Collector/Failures/'
                '%s' % connection, 1)

        internal_metric('Supportability/Python/Collector/Exception/'
                '%s' % callable_name(exc_type), 1)

        raise

    finally:
        if auto_close_session:
            session.close()
            session = None

    if r.status_code != 200:
        _logger.warning('Received a non 200 HTTP response from the data '
                'collector where url=%r, method=%r, license_key=%r, '
                'agent_run_id=%r, params=%r, headers=%r, status_code=%r '
                'and content=%r.', url, method, license_key, agent_run_id,
                params, headers, r.status_code, content)

        internal_metric('Supportability/Python/Collector/Failures', 1)
        internal_metric('Supportability/Python/Collector/Failures/'
                '%s' % connection, 1)

        internal_metric('Supportability/Python/Collector/HTTPError/%d'
                % r.status_code, 1)

    if r.status_code == 400:
        _logger.error('Data collector is indicating that a bad '
                'request has been submitted for url %r, headers of %r, '
                'params of %r and payload of %r. Please report this '
                'problem to New Relic support.', url, headers, params,
                payload)

        raise DiscardDataForRequest()

    elif r.status_code == 413:
        _logger.warning('Data collector is indicating that a request for '
                'method %r was received where the request content size '
                'was over the maximum allowed size limit. The length of '
                'the request content was %d. If this keeps occurring on a '
                'regular basis, please report this problem to New Relic '
                'support for further investigation.', method, len(data))

        raise DiscardDataForRequest()

    elif r.status_code == 415:
        _logger.warning('Data collector is indicating that it was sent '
                'malformed JSON data for method %r. If this keeps occurring '
                'on a regular basis, please report this problem to New '
                'Relic support for further investigation.', method)

        if settings.debug.log_malformed_json_data:
            if headers['Content-Encoding'] == 'deflate':
                data = zlib.decompress(data)

            _logger.info('JSON data which was rejected by the data '
                    'collector was %r.', data)

        raise DiscardDataForRequest(content)

    elif r.status_code == 503:
        _logger.warning('Data collector is unavailable. This can be a '
                'transient issue because of the data collector or our '
                'core application being restarted. If the issue persists '
                'it can also be indicative of a problem with our servers. '
                'In the event that availability of our servers is not '
                'restored after a period of time then please report this '
                'problem to New Relic support for further investigation.')

        raise ServerIsUnavailable()

    elif r.status_code != 200:
        if not settings.proxy_host or not settings.proxy_port:
            _logger.warning('An unexpected HTTP response was received from '
                    'the data collector of %r for method %r. The payload for '
                    'the request was %r. If this issue persists then please '
                    'report this problem to New Relic support for further '
                    'investigation.', r.status_code, method, payload)

        else:
            _logger.warning('An unexpected HTTP response was received from '
                    'the data collector of %r for method %r while connecting '
                    'via proxy host %r on port %r with proxy user of %r. '
                    'The payload for the request was %r. If this issue '
                    'persists then please report this problem to New Relic '
                    'support for further investigation.', r.status_code,
                    method, settings.proxy_host, settings.proxy_port,
                    settings.proxy_user, payload)

        raise DiscardDataForRequest()

    # Log details of response payload for debugging. Use the JSON
    # encoded value so know that what original encoded value was.

    duration = time.time() - start

    if settings.debug.log_data_collector_payloads:
        _logger.debug('Valid response from data collector after %.2f '
                'seconds with content=%r.', duration, content)
    elif settings.debug.log_data_collector_calls:
        _logger.debug('Valid response from data collector after %.2f '
                'seconds.', duration)

    # If we got this far we should have a legitimate response from the
    # data collector. The response is JSON so need to decode it.

    try:
        if six.PY3:
            content = content.decode('UTF-8')

        result = json_decode(content)

    except Exception:
        _logger.exception('Error decoding data for JSON payload for '
                'method %r with payload of %r. Please report this problem '
                'to New Relic support.', method, content)

        if settings.debug.log_malformed_json_data:
            _logger.info('JSON data received from data collector which '
                    'could not be decoded was %r.', content)

        raise DiscardDataForRequest(str(sys.exc_info()[1]))

    # The decoded JSON can be either for a successful response or an
    # error. A successful response has a 'return_value' element and on
    # error an 'exception' element.

    if log_id is not None:
        _log_response(log_id, result)

    if 'return_value' in result:
        return result['return_value']

    error_type = result['exception']['error_type']
    message = result['exception']['message']

    # Now need to check for server side exceptions. The following
    # exceptions can occur for abnormal events.

    _logger.warning('Received an exception from the data collector where '
            'url=%r, method=%r, license_key=%r, agent_run_id=%r, params=%r, '
            'headers=%r, error_type=%r and message=%r', url, method,
            license_key, agent_run_id, params, headers, error_type,
            message)

    # Technically most server side errors will result in the active
    # agent run being abandoned and so there is no point trying to
    # create a metric for when they occur. Leave this here though to at
    # least log a metric for the case where a completely unexpected
    # server error response is received and the agent run does manage to
    # continue and further requests don't just keep failing. Since do
    # not even expect the metric to be retained, use the original error
    # type as sent.

    internal_metric('Supportability/Python/Collector/ServerError/'
            '%s' % error_type, 1)

    if error_type == 'NewRelic::Agent::LicenseException':
        _logger.error('Data collector is indicating that an incorrect '
                'license key has been supplied by the agent. The value '
                'which was used by the agent is %r. Please correct any '
                'problem with the license key or report this problem to '
                'New Relic support.', license_key)

        raise DiscardDataForRequest(message)

    elif error_type == 'NewRelic::Agent::PostTooBigException':
        # As far as we know we should never see this type of server side
        # error as for JSON API we should always get back a HTTP 413
        # error response instead.

        internal_metric('Supportability/Python/Collector/Failures', 1)
        internal_metric('Supportability/Python/Collector/Failures/'
                '%s' % connection, 1)

        _logger.warning('Core application is indicating that a request for '
                'method %r was received where the request content size '
                'was over the maximum allowed size limit. The length of '
                'the request content was %d. If this keeps occurring on a '
                'regular basis, please report this problem to New Relic '
                'support for further investigation.', method, len(data))

        raise DiscardDataForRequest(message)

    # Server side exceptions are also used to inform the agent to
    # perform certain actions such as restart when server side
    # configuration has changed for this application or when agent is
    # being disabled remotely for some reason.

    if error_type == 'NewRelic::Agent::ForceRestartException':
        _logger.info('An automatic internal agent restart has been '
                'requested by the data collector for the application '
                'where the agent run was %r. The reason given for the '
                'forced restart is %r.', agent_run_id, message)

        raise ForceAgentRestart(message)

    elif error_type == 'NewRelic::Agent::ForceDisconnectException':
        _logger.critical('Disconnection of the agent has been requested by '
                'the data collector for the application where the '
                'agent run was %r. The reason given for the forced '
                'disconnection is %r. Please contact New Relic support '
                'for further information.', agent_run_id, message)

        raise ForceAgentDisconnect(message)

    # We received an unexpected server side error we don't know what to
    # do with. Ignoring PostTooBigException which we expect that we
    # should never receive, unexpected server side errors are the only
    # ones we record a failure metric for as other server side errors
    # are really commands to have the agent do something.

    internal_metric('Supportability/Python/Collector/Failures', 1)
    internal_metric('Supportability/Python/Collector/Failures/'
            '%s' % connection, 1)

    _logger.warning('An unexpected server error was received from the '
            'data collector for method %r with payload of %r. The error '
            'was of type %r with message %r. If this issue persists '
            'then please report this problem to New Relic support for '
            'further investigation.', method, payload, error_type, message)

    raise DiscardDataForRequest(message)
    def __init__(
        self,
        host,
        port=443,
        proxy_scheme=None,
        proxy_host=None,
        proxy_port=None,
        proxy_user=None,
        proxy_pass=None,
        timeout=None,
        ca_bundle_path=None,
        disable_certificate_validation=False,
        compression_threshold=64 * 1024,
        compression_level=None,
        compression_method="gzip",
        max_payload_size_in_bytes=1000000,
        audit_log_fp=None,
    ):
        self._host = host
        port = self._port = port
        self._compression_threshold = compression_threshold
        self._compression_level = compression_level
        self._compression_method = compression_method
        self._max_payload_size_in_bytes = max_payload_size_in_bytes
        self._audit_log_fp = audit_log_fp

        self._prefix = ""

        self._headers = dict(self.BASE_HEADERS)
        self._connection_kwargs = connection_kwargs = {
            "timeout": timeout,
        }
        self._urlopen_kwargs = urlopen_kwargs = {}

        if self.CONNECTION_CLS.scheme == "https":
            if not ca_bundle_path:
                verify_path = get_default_verify_paths()

                # If there is no resolved cafile, assume the bundled certs are
                # required and report this condition as a supportability metric.
                if not verify_path.cafile:
                    ca_bundle_path = certs.where()
                    internal_metric(
                        "Supportability/Python/Certificate/BundleRequired", 1
                    )

            if ca_bundle_path:
                if os.path.isdir(ca_bundle_path):
                    connection_kwargs["ca_cert_dir"] = ca_bundle_path
                else:
                    connection_kwargs["ca_certs"] = ca_bundle_path

            if disable_certificate_validation:
                connection_kwargs["cert_reqs"] = "NONE"

        proxy = self._parse_proxy(
            proxy_scheme, proxy_host, proxy_port, proxy_user, proxy_pass,
        )
        proxy_headers = (
            proxy and proxy.auth and urllib3.make_headers(proxy_basic_auth=proxy.auth)
        )

        if proxy:
            if self.CONNECTION_CLS.scheme == "https" and proxy.scheme != "https":
                connection_kwargs["_proxy"] = proxy
                connection_kwargs["_proxy_headers"] = proxy_headers
            else:
                self._host = proxy.host
                self._port = proxy.port or 443
                self._prefix = self.PREFIX_SCHEME + host + ":" + str(port)
                urlopen_kwargs["assert_same_host"] = False
                if proxy_headers:
                    self._headers.update(proxy_headers)

        # Logging
        self._proxy = proxy

        self._connection_attr = None
Beispiel #7
0
def local_config(args):
    import sys

    if len(args) < 2:
        usage("record-deploy")
        sys.exit(1)

    def _args(app_id,
              config_file,
              revision,
              description=None,
              changelog=None,
              user=None,
              timestamp=None,
              *args):
        return (
            app_id,
            config_file,
            revision,
            description,
            changelog,
            user,
            timestamp,
        )

    app_id, config_file, revision, description, changelog, user, timestamp = _args(
        *args)

    settings = global_settings()

    settings.monitor_mode = False

    initialize(config_file)

    api_key = settings.api_key or "NO API KEY WAS SET IN AGENT CONFIGURATION"

    host = settings.host

    if host == "collector.newrelic.com":
        host = "api.newrelic.com"
    elif host.startswith("collector.eu"):
        host = "api.eu.newrelic.com"
    elif host == "staging-collector.newrelic.com":
        host = "staging-api.newrelic.com"

    port = settings.port

    url = "%s://%s/v2/applications/%s/deployments.json"

    scheme = "https"
    server = port and "%s:%d" % (host, port) or host

    url = url % (scheme, server, app_id)

    proxy_scheme = settings.proxy_scheme
    proxy_host = settings.proxy_host
    proxy_port = settings.proxy_port
    proxy_user = settings.proxy_user
    proxy_pass = settings.proxy_pass

    if proxy_scheme is None:
        proxy_scheme = "https"

    timeout = settings.agent_limits.data_collector_timeout

    proxies = proxy_details(proxy_scheme, proxy_host, proxy_port, proxy_user,
                            proxy_pass)

    if user is None:
        user = pwd.getpwuid(os.getuid()).pw_gecos

    data = {}
    data["deployment"] = {}
    data["deployment"]["revision"] = revision

    if description is not None:
        data["deployment"]["description"] = description
    if changelog is not None:
        data["deployment"]["changelog"] = changelog
    if user is not None:
        data["deployment"]["user"] = user
    if timestamp is not None:
        data["deployment"]["timestamp"] = timestamp

    headers = {}
    headers["X-Api-Key"] = api_key
    headers["Content-Type"] = "application/json"

    cert_loc = settings.ca_bundle_path
    if cert_loc is None:
        cert_loc = certs.where()

    if settings.debug.disable_certificate_validation:
        cert_loc = False

    data = json.dumps(data)

    r = requests.post(
        url,
        proxies=proxies,
        headers=headers,
        timeout=timeout,
        data=data,
        verify=cert_loc,
    )

    if r.status_code != 201:
        raise RuntimeError(
            "An unexpected HTTP response of %r was received "
            "for request made to %r. The API key for the request was "
            "%r. The payload for the request was %r. The response "
            "payload for the request was %r. If this issue "
            "persists then please report this problem to New Relic "
            "support for further investigation." %
            (r.status_code, url, api_key, data, r.json()))