Beispiel #1
0
    def process_response_headers(self, response_headers):
        """
        Decode the response headers and create appropriate metics based on the
        header values. The response_headers are passed in as a list of tuples.
        [(HEADER_NAME0, HEADER_VALUE0), (HEADER_NAME1, HEADER_VALUE1)]

        """

        settings = self.settings
        if not settings:
            return

        if not settings.cross_application_tracer.enabled:
            return

        appdata = None
        try:
            for k, v in response_headers:
                if k.upper() == self.cat_appdata_key.upper():
                    appdata = json_decode(deobfuscate(v,
                                                      settings.encoding_key))
                    break

            if appdata:
                self.params['cross_process_id'] = appdata[0]
                self.params['external_txn_name'] = appdata[1]
                self.params['transaction_guid'] = appdata[5]

        except Exception:
            pass
Beispiel #2
0
    def get_values(cls, response):
        if response is None:
            return

        try:
            return json_decode(response.decode('utf-8'))
        except ValueError:
            _logger.debug('Invalid %s data (%s%s): %r', cls.VENDOR_NAME,
                          cls.METADATA_HOST, cls.METADATA_PATH, response)
Beispiel #3
0
    def decode_newrelic_header(self, environ, header_name):
        encoded_header = environ.get(header_name)
        if encoded_header:
            try:
                decoded_header = json_decode(deobfuscate(
                        encoded_header, self._settings.encoding_key))
            except Exception:
                decoded_header = None

        return decoded_header
def test_connect_metadata(monkeypatch):
    monkeypatch.setenv("NEW_RELIC_METADATA_FOOBAR", "foobar")
    monkeypatch.setenv("_NEW_RELIC_METADATA_WRONG", "wrong")
    protocol = AgentProtocol.connect(
        APP_NAME,
        LINKED_APPS,
        ENVIRONMENT,
        finalize_application_settings(),
        client_cls=HttpClientRecorder,
    )
    connect = HttpClientRecorder.SENT[1]
    assert connect.params["method"] == "connect"
    connect_payload = json_decode(connect.payload.decode("utf-8"))[0]
    assert connect_payload["metadata"] == {"NEW_RELIC_METADATA_FOOBAR": "foobar"}
Beispiel #5
0
    def log_response(cls,
                     fp,
                     log_id,
                     status,
                     headers,
                     data,
                     connection="direct"):
        if not status:
            # Exclude traceback in order to prevent a reference cycle
            exc_info = sys.exc_info()[:2]
        else:
            exc_info = None

        cls._supportability_response(status, exc_info and exc_info[0],
                                     connection)

        if not fp:
            return

        try:
            result = json_decode(data)
        except Exception:
            result = data

        print("TIME: %r" %
              time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()),
              file=fp)
        print(file=fp)
        print("ID: %r" % log_id, file=fp)
        print(file=fp)
        print("PID: %r" % os.getpid(), file=fp)
        print(file=fp)

        if exc_info:
            print("Exception: %r" % exc_info[1], file=fp)
            print(file=fp)
        else:
            print("STATUS: %r" % status, file=fp)
            print(file=fp)
            print("HEADERS:", end=" ", file=fp)
            pprint(dict(headers), stream=fp)
            print(file=fp)
            print("RESULT:", end=" ", file=fp)
            pprint(result, stream=fp)
            print(file=fp)

        print(78 * "=", file=fp)
        print(file=fp)

        fp.flush()
Beispiel #6
0
    def log_request(cls,
                    fp,
                    method,
                    url,
                    params,
                    payload,
                    headers,
                    body=None,
                    compression_time=None):
        cls._supportability_request(params, payload, body, compression_time)

        if not fp:
            return

        # Maintain a global AUDIT_LOG_ID attached to all class instances
        # NOTE: this is not thread safe so this class cannot be used
        # across threads when audit logging is on
        cls.AUDIT_LOG_ID += 1

        print(
            "TIME: %r" % time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()),
            file=fp,
        )
        print(file=fp)
        print("ID: %r" % cls.AUDIT_LOG_ID, file=fp)
        print(file=fp)
        print("PID: %r" % os.getpid(), file=fp)
        print(file=fp)
        print("URL: %r" % url, file=fp)
        print(file=fp)
        print("PARAMS: %r" % params, file=fp)
        print(file=fp)
        print("HEADERS: %r" % headers, file=fp)
        print(file=fp)
        print("DATA:", end=" ", file=fp)

        try:
            data = json_decode(payload.decode("utf-8"))
        except Exception:
            data = payload

        pprint(data, stream=fp)

        print(file=fp)
        print(78 * "=", file=fp)
        print(file=fp)

        fp.flush()

        return cls.AUDIT_LOG_ID
        def _validate():
            assert payloads

            for payload in payloads:
                obj = json_decode(payload)
                decoded = serverless_payload_decode(obj[2])

                assert 'metadata' in decoded
                metadata = decoded['metadata']

                if exact_metadata:
                    for key, value in exact_metadata.items():
                        assert key in metadata
                        assert metadata[key] == value, metadata
Beispiel #8
0
    def send(self, method, payload=()):
        params, headers, payload = self._to_http(method, payload)

        try:
            response = self.client.send_request(params=params,
                                                headers=headers,
                                                payload=payload)
        except NetworkInterfaceException:
            # All HTTP errors are currently retried
            raise RetryDataForRequest

        status, data = response

        if not 200 <= status < 300:
            if status == 413:
                internal_count_metric(
                    "Supportability/Python/Collector/MaxPayloadSizeLimit/%s" %
                    method,
                    1,
                )
            level, message = self.LOG_MESSAGES.get(
                status, self.LOG_MESSAGES["default"])
            _logger.log(
                level,
                message,
                {
                    "proxy_host": self._proxy_host,
                    "proxy_port": self._proxy_port,
                    "proxy_user": self._proxy_user,
                    "method": method,
                    "status_code": status,
                    "headers": headers,
                    "params": {
                        k: v
                        for k, v in params.items()
                        if k in self.PARAMS_ALLOWLIST
                    },
                    "content": payload,
                    "agent_run_id": self._run_token,
                },
            )
            exception = self.STATUS_CODE_RESPONSE.get(status,
                                                      DiscardDataForRequest)
            raise exception
        if status == 200:
            return json_decode(data.decode("utf-8"))["return_value"]
Beispiel #9
0
        def _validate():
            assert len(payloads) == count

            for payload in payloads:
                assert isinstance(payload, str)

                obj = json_decode(payload)

                assert len(obj) == 3, obj

                assert obj[0] == 1  # Version = 1
                assert obj[1] == 'NR_LAMBDA_MONITORING'  # Marker

                decoded = serverless_payload_decode(obj[2])

                # Keys should only contain metadata / data
                set(decoded.keys()) == set(('metadata', 'data'))
    def send_request(
        self,
        method="POST",
        path="/agent_listener/invoke_raw_method",
        params=None,
        headers=None,
        payload=None,
    ):
        result = super(ServerlessModeClient, self).send_request(
            method=method, path=path, params=params, headers=headers, payload=payload
        )

        if result[0] == 200:
            agent_method = params["method"]
            self.payload[agent_method] = json_decode(payload.decode("utf-8"))

        return result
Beispiel #11
0
def fetch_app_id(app_name, client, headers):
    status, data = client.send_request(
        "GET",
        "/v2/applications.json",
        params={"filter[name]": app_name},
        headers=headers,
    )

    if not 200 <= status < 300:
        raise RuntimeError("Status not OK", status)

    response_json = encoding_utils.json_decode(encoding_utils.ensure_str(data))
    if "applications" not in response_json:
        return

    for application in response_json["applications"]:
        if application["name"] == app_name:
            return application["id"]
def test_serverless_protocol_finalize(capsys):
    protocol = ServerlessModeProtocol(
        finalize_application_settings(
            {"aws_lambda_metadata": {"foo": "bar", "agent_version": "x"}}
        )
    )
    response = protocol.send("metric_data", (1, 2, 3))
    assert response is None

    payload = protocol.finalize()
    captured = capsys.readouterr()
    assert captured.out.rstrip("\n") == payload

    payload = json_decode(payload)
    assert payload[:2] == [1, "NR_LAMBDA_MONITORING"]

    data = serverless_payload_decode(payload[2])
    assert data["data"] == {"metric_data": [1, 2, 3]}

    assert data["metadata"]["foo"] == "bar"
    assert data["metadata"]["agent_version"] != "x"
def validate_outbound_headers(header_id='X-NewRelic-ID',
                              header_transaction='X-NewRelic-Transaction'):
    transaction = current_transaction()
    headers = transaction._test_request_headers
    settings = transaction.settings
    encoding_key = settings.encoding_key

    assert header_id in headers

    values = headers[header_id]
    if isinstance(values, list):
        assert len(values) == 1, headers
        assert isinstance(values[0], type(''))
        value = values[0]
    else:
        value = values

    cross_process_id = deobfuscate(value, encoding_key)
    assert cross_process_id == settings.cross_process_id

    assert header_transaction in headers

    values = headers[header_transaction]
    if isinstance(values, list):
        assert len(values) == 1, headers
        assert isinstance(values[0], type(''))
        value = values[0]
    else:
        value = values

    (guid, record_tt, trip_id, path_hash) = \
            json_decode(deobfuscate(value, encoding_key))

    assert guid == transaction.guid
    assert record_tt == transaction.record_tt
    assert trip_id == transaction.trip_id
    assert path_hash == transaction.path_hash
def decode_header(header, encoding_key=ENCODING_KEY):
    result = deobfuscate(header, encoding_key)
    return json_decode(result)
Beispiel #15
0
 def process_response_metadata(self, cat_linking_value):
     payload = base64_decode(cat_linking_value)
     nr_headers = json_decode(payload)
     self.process_response_headers(nr_headers.items())
def test_connect(with_aws, with_pcf, with_gcp, with_azure, with_docker,
                 with_kubernetes, with_ip):
    global AWS, AZURE, GCP, PCF, BOOT_ID, DOCKER, KUBERNETES, IP_ADDRESS
    if not with_aws:
        AWS = Exception
    if not with_pcf:
        PCF = Exception
    if not with_gcp:
        GCP = Exception
    if not with_azure:
        AZURE = Exception
    if not with_docker:
        DOCKER = Exception
    if not with_kubernetes:
        KUBERNETES = Exception
    if not with_ip:
        IP_ADDRESS = None
    settings = finalize_application_settings({
        "browser_monitoring.loader": BROWSER_MONITORING_LOADER,
        "browser_monitoring.debug": BROWSER_MONITORING_DEBUG,
        "capture_params": CAPTURE_PARAMS,
        "process_host.display_name": DISPLAY_NAME,
        "transaction_tracer.record_sql": RECORD_SQL,
        "high_security": HIGH_SECURITY,
        "labels": LABELS,
        "utilization.detect_aws": with_aws,
        "utilization.detect_pcf": with_pcf,
        "utilization.detect_gcp": with_gcp,
        "utilization.detect_azure": with_azure,
        "utilization.detect_docker": with_docker,
        "utilization.detect_kubernetes": with_kubernetes,
        "event_harvest_config": {
            "harvest_limits": {
                "analytic_event_data": ANALYTIC_EVENT_DATA,
                "span_event_data": SPAN_EVENT_DATA,
                "custom_event_data": CUSTOM_EVENT_DATA,
                "error_event_data": ERROR_EVENT_DATA,
            }
        },
    })
    protocol = AgentProtocol.connect(
        APP_NAME,
        LINKED_APPS,
        ENVIRONMENT,
        settings,
        client_cls=HttpClientRecorder,
    )

    # verify there are exactly 3 calls to HttpClientRecorder
    assert len(HttpClientRecorder.SENT) == 3

    # Verify preconnect call
    preconnect = HttpClientRecorder.SENT[0]
    assert preconnect.params["method"] == "preconnect"
    assert preconnect.payload == b"[]"

    # Verify connect call
    connect = HttpClientRecorder.SENT[1]
    assert connect.params["method"] == "connect"
    connect_payload = json_decode(connect.payload.decode("utf-8"))
    connect_payload_asserts(
        connect_payload,
        with_aws=with_aws,
        with_pcf=with_pcf,
        with_gcp=with_gcp,
        with_azure=with_azure,
        with_docker=with_docker,
        with_kubernetes=with_kubernetes,
    )

    # Verify agent_settings call is done with the finalized settings
    agent_settings = HttpClientRecorder.SENT[2]
    assert agent_settings.params["method"] == "agent_settings"
    agent_settings_payload = json_decode(
        agent_settings.payload.decode("utf-8"))
    assert len(agent_settings_payload) == 1
    agent_settings_payload = agent_settings_payload[0]

    # Finalized settings will have a non-None agent_run_id
    assert agent_settings_payload["agent_run_id"] is not None
    assert protocol.configuration.agent_run_id is not None

    # Verify that agent settings sent have converted null, containers, and
    # unserializable types to string
    assert agent_settings_payload["proxy_host"] == "None"
    assert agent_settings_payload["attributes.include"] == "[]"
    assert agent_settings_payload["feature_flag"] == str(set())
    assert isinstance(agent_settings_payload["attribute_filter"],
                      six.string_types)

    # Verify that the connection is closed
    assert HttpClientRecorder.STATE == 0
Beispiel #17
0
def send_request(session, url, method, license_key, agent_run_id=None,
            payload=()):
    """Constructs and sends a request to the data collector."""

    params = {}
    headers = {}

    settings = global_settings()

    start = time.time()

    # Validate that the license key was actually set and if not replace
    # it with a string which makes it more obvious it was not set.

    if not license_key:
        license_key = 'NO LICENSE KEY WAS SET IN AGENT CONFIGURATION'

    # The agent formats requests and is able to handle responses for
    # protocol version 14.

    params['method'] = method
    params['license_key'] = license_key
    params['protocol_version'] = '14'
    params['marshal_format'] = 'json'

    if agent_run_id:
        params['run_id'] = str(agent_run_id)

    headers['User-Agent'] = USER_AGENT
    headers['Content-Encoding'] = 'identity'

    # Set up definitions for proxy server in case that has been set.

    proxies = proxy_server()

    # At this time we use JSON content encoding for the data being sent.
    # If an error does occur when encoding the JSON, then it isn't
    # likely going to work later on in a subsequent request with same
    # data, even if aggregated with other data, so we need to log the
    # details and then flag that data should be thrown away. Don't mind
    # being noisy in the the log in this situation as it would indicate
    # a problem with the implementation of the agent.

    try:
        data = json_encode(payload)

    except Exception:
        _logger.exception('Error encoding data for JSON payload for '
                'method %r with payload of %r. Please report this problem '
                'to New Relic support.', method, payload)

        raise DiscardDataForRequest(str(sys.exc_info()[1]))

    # Log details of call and/or payload for debugging. Use the JSON
    # encoded value so know that what is encoded is correct.

    if settings.debug.log_data_collector_payloads:
        _logger.debug('Calling data collector with url=%r, method=%r and '
                'payload=%r.', url, method, data)
    elif settings.debug.log_data_collector_calls:
        _logger.debug('Calling data collector with url=%r and method=%r.',
                url, method)

    # Compress the serialized JSON being sent as content if over 64KiB
    # in size and not in message types that further compression is
    # excluded.

    threshold = settings.agent_limits.data_compression_threshold

    if method not in _deflate_exclude_list and len(data) > threshold:
        headers['Content-Encoding'] = 'deflate'

        level = settings.agent_limits.data_compression_level
        level = level or zlib.Z_DEFAULT_COMPRESSION
        data = zlib.compress(six.b(data), level)

    # If there is no requests session object provided for making
    # requests create one now. We want to close this as soon as we
    # are done with it.

    auto_close_session = False

    if not session:
        session = requests.session()
        auto_close_session = True

    # The 'requests' library can raise a number of exception derived
    # from 'RequestException' before we even manage to get a connection
    # to the data collector.
    #
    # The data collector can the generate a number of different types of
    # HTTP errors for requests. These are:
    #
    # 400 Bad Request - For incorrect method type or incorrectly
    # construct parameters. We should not get this and if we do it would
    # likely indicate a problem with the implementation of the agent.
    #
    # 413 Request Entity Too Large - Where the request content was too
    # large. The limits on number of nodes in slow transaction traces
    # should in general prevent this, but not everything has size limits
    # and so rogue data could still blow things out. Same data is not
    # going to work later on in a subsequent request, even if aggregated
    # with other data, so we need to log the details and then flag that
    # data should be thrown away.
    #
    # 415 Unsupported Media Type - This occurs when the JSON which was
    # sent can't be decoded by the data collector. If this is a true
    # problem with the JSON formatting, then sending again, even if
    # aggregated with other data, may not work, so we need to log the
    # details and then flag that data should be thrown away.
    #
    # 503 Service Unavailable - This occurs when data collector, or core
    # application is being restarted and not in state to be able to
    # accept requests. It should be a transient issue so should be able
    # to retain data and try again.

    # If audit logging is enabled, log the requests details.

    log_id = _log_request(url, params, headers, data)

    connection = connection_type(proxies)

    try:
        # The timeout value in the requests module is only on
        # the initial connection and doesn't apply to how long
        # it takes to get back a response.

        cert_loc = certs.where()

        if settings.debug.disable_certificate_validation:
            cert_loc = False

        timeout = settings.agent_limits.data_collector_timeout

        with warnings.catch_warnings():
            warnings.simplefilter("ignore")

            r = session.post(url, params=params, headers=headers,
                    proxies=proxies, timeout=timeout, data=data,
                    verify=cert_loc)

        # Read the content now so we can force close the socket
        # connection if this is a transient session as quickly
        # as possible.

        content = r.content

    except requests.RequestException:
        exc_type, message = sys.exc_info()[:2]

        internal_metric('Supportability/Python/Collector/Failures', 1)
        internal_metric('Supportability/Python/Collector/Failures/'
                '%s' % connection, 1)

        internal_metric('Supportability/Python/Collector/Exception/'
                '%s' % callable_name(exc_type), 1)

        if not settings.proxy_host or not settings.proxy_port:
            _logger.warning('Data collector is not contactable. This can be '
                    'because of a network issue or because of the data '
                    'collector being restarted. In the event that contact '
                    'cannot be made after a period of time then please '
                    'report this problem to New Relic support for further '
                    'investigation. The error raised was %r.', message)

        else:
            _logger.warning('Data collector is not contactable via the proxy '
                    'host %r on port %r with proxy user of %r. This can be '
                    'because of a network issue or because of the data '
                    'collector being restarted. In the event that contact '
                    'cannot be made after a period of time then please '
                    'report this problem to New Relic support for further '
                    'investigation. The error raised was %r.',
                    settings.proxy_host, settings.proxy_port,
                    settings.proxy_user, message)

        raise RetryDataForRequest(str(message))

    except Exception:
        # Any unexpected exception will be caught by higher layer, but
        # still attempt to log a metric here just in case agent run
        # doesn't get shutdown as a result of the exception.

        exc_type = sys.exc_info()[0]

        internal_metric('Supportability/Python/Collector/Failures', 1)
        internal_metric('Supportability/Python/Collector/Failures/'
                '%s' % connection, 1)

        internal_metric('Supportability/Python/Collector/Exception/'
                '%s' % callable_name(exc_type), 1)

        raise

    finally:
        if auto_close_session:
            session.close()
            session = None

    if r.status_code != 200:
        _logger.warning('Received a non 200 HTTP response from the data '
                'collector where url=%r, method=%r, license_key=%r, '
                'agent_run_id=%r, params=%r, headers=%r, status_code=%r '
                'and content=%r.', url, method, license_key, agent_run_id,
                params, headers, r.status_code, content)

        internal_metric('Supportability/Python/Collector/Failures', 1)
        internal_metric('Supportability/Python/Collector/Failures/'
                '%s' % connection, 1)

        internal_metric('Supportability/Python/Collector/HTTPError/%d'
                % r.status_code, 1)

    if r.status_code == 400:
        _logger.error('Data collector is indicating that a bad '
                'request has been submitted for url %r, headers of %r, '
                'params of %r and payload of %r. Please report this '
                'problem to New Relic support.', url, headers, params,
                payload)

        raise DiscardDataForRequest()

    elif r.status_code == 413:
        _logger.warning('Data collector is indicating that a request for '
                'method %r was received where the request content size '
                'was over the maximum allowed size limit. The length of '
                'the request content was %d. If this keeps occurring on a '
                'regular basis, please report this problem to New Relic '
                'support for further investigation.', method, len(data))

        raise DiscardDataForRequest()

    elif r.status_code == 415:
        _logger.warning('Data collector is indicating that it was sent '
                'malformed JSON data for method %r. If this keeps occurring '
                'on a regular basis, please report this problem to New '
                'Relic support for further investigation.', method)

        if settings.debug.log_malformed_json_data:
            if headers['Content-Encoding'] == 'deflate':
                data = zlib.decompress(data)

            _logger.info('JSON data which was rejected by the data '
                    'collector was %r.', data)

        raise DiscardDataForRequest(content)

    elif r.status_code == 503:
        _logger.warning('Data collector is unavailable. This can be a '
                'transient issue because of the data collector or our '
                'core application being restarted. If the issue persists '
                'it can also be indicative of a problem with our servers. '
                'In the event that availability of our servers is not '
                'restored after a period of time then please report this '
                'problem to New Relic support for further investigation.')

        raise ServerIsUnavailable()

    elif r.status_code != 200:
        if not settings.proxy_host or not settings.proxy_port:
            _logger.warning('An unexpected HTTP response was received from '
                    'the data collector of %r for method %r. The payload for '
                    'the request was %r. If this issue persists then please '
                    'report this problem to New Relic support for further '
                    'investigation.', r.status_code, method, payload)

        else:
            _logger.warning('An unexpected HTTP response was received from '
                    'the data collector of %r for method %r while connecting '
                    'via proxy host %r on port %r with proxy user of %r. '
                    'The payload for the request was %r. If this issue '
                    'persists then please report this problem to New Relic '
                    'support for further investigation.', r.status_code,
                    method, settings.proxy_host, settings.proxy_port,
                    settings.proxy_user, payload)

        raise DiscardDataForRequest()

    # Log details of response payload for debugging. Use the JSON
    # encoded value so know that what original encoded value was.

    duration = time.time() - start

    if settings.debug.log_data_collector_payloads:
        _logger.debug('Valid response from data collector after %.2f '
                'seconds with content=%r.', duration, content)
    elif settings.debug.log_data_collector_calls:
        _logger.debug('Valid response from data collector after %.2f '
                'seconds.', duration)

    # If we got this far we should have a legitimate response from the
    # data collector. The response is JSON so need to decode it.

    try:
        if six.PY3:
            content = content.decode('UTF-8')

        result = json_decode(content)

    except Exception:
        _logger.exception('Error decoding data for JSON payload for '
                'method %r with payload of %r. Please report this problem '
                'to New Relic support.', method, content)

        if settings.debug.log_malformed_json_data:
            _logger.info('JSON data received from data collector which '
                    'could not be decoded was %r.', content)

        raise DiscardDataForRequest(str(sys.exc_info()[1]))

    # The decoded JSON can be either for a successful response or an
    # error. A successful response has a 'return_value' element and on
    # error an 'exception' element.

    if log_id is not None:
        _log_response(log_id, result)

    if 'return_value' in result:
        return result['return_value']

    error_type = result['exception']['error_type']
    message = result['exception']['message']

    # Now need to check for server side exceptions. The following
    # exceptions can occur for abnormal events.

    _logger.warning('Received an exception from the data collector where '
            'url=%r, method=%r, license_key=%r, agent_run_id=%r, params=%r, '
            'headers=%r, error_type=%r and message=%r', url, method,
            license_key, agent_run_id, params, headers, error_type,
            message)

    # Technically most server side errors will result in the active
    # agent run being abandoned and so there is no point trying to
    # create a metric for when they occur. Leave this here though to at
    # least log a metric for the case where a completely unexpected
    # server error response is received and the agent run does manage to
    # continue and further requests don't just keep failing. Since do
    # not even expect the metric to be retained, use the original error
    # type as sent.

    internal_metric('Supportability/Python/Collector/ServerError/'
            '%s' % error_type, 1)

    if error_type == 'NewRelic::Agent::LicenseException':
        _logger.error('Data collector is indicating that an incorrect '
                'license key has been supplied by the agent. The value '
                'which was used by the agent is %r. Please correct any '
                'problem with the license key or report this problem to '
                'New Relic support.', license_key)

        raise DiscardDataForRequest(message)

    elif error_type == 'NewRelic::Agent::PostTooBigException':
        # As far as we know we should never see this type of server side
        # error as for JSON API we should always get back a HTTP 413
        # error response instead.

        internal_metric('Supportability/Python/Collector/Failures', 1)
        internal_metric('Supportability/Python/Collector/Failures/'
                '%s' % connection, 1)

        _logger.warning('Core application is indicating that a request for '
                'method %r was received where the request content size '
                'was over the maximum allowed size limit. The length of '
                'the request content was %d. If this keeps occurring on a '
                'regular basis, please report this problem to New Relic '
                'support for further investigation.', method, len(data))

        raise DiscardDataForRequest(message)

    # Server side exceptions are also used to inform the agent to
    # perform certain actions such as restart when server side
    # configuration has changed for this application or when agent is
    # being disabled remotely for some reason.

    if error_type == 'NewRelic::Agent::ForceRestartException':
        _logger.info('An automatic internal agent restart has been '
                'requested by the data collector for the application '
                'where the agent run was %r. The reason given for the '
                'forced restart is %r.', agent_run_id, message)

        raise ForceAgentRestart(message)

    elif error_type == 'NewRelic::Agent::ForceDisconnectException':
        _logger.critical('Disconnection of the agent has been requested by '
                'the data collector for the application where the '
                'agent run was %r. The reason given for the forced '
                'disconnection is %r. Please contact New Relic support '
                'for further information.', agent_run_id, message)

        raise ForceAgentDisconnect(message)

    # We received an unexpected server side error we don't know what to
    # do with. Ignoring PostTooBigException which we expect that we
    # should never receive, unexpected server side errors are the only
    # ones we record a failure metric for as other server side errors
    # are really commands to have the agent do something.

    internal_metric('Supportability/Python/Collector/Failures', 1)
    internal_metric('Supportability/Python/Collector/Failures/'
            '%s' % connection, 1)

    _logger.warning('An unexpected server error was received from the '
            'data collector for method %r with payload of %r. The error '
            'was of type %r with message %r. If this issue persists '
            'then please report this problem to New Relic support for '
            'further investigation.', method, payload, error_type, message)

    raise DiscardDataForRequest(message)
Beispiel #18
0
def _log_request(url, params, headers, data):
    settings = global_settings()

    if not settings.audit_log_file:
        return

    global _audit_log_fp

    if not _audit_log_fp:
        log_file = settings.audit_log_file
        try:
            _audit_log_fp = open(log_file, 'a')
        except Exception:
            _logger.exception('Unable to open audit log file %r.', log_file)
            settings.audit_log_file = None
            return

    global _audit_log_id

    _audit_log_id += 1

    print('TIME: %r' % time.strftime('%Y-%m-%d %H:%M:%S',
            time.localtime()), file=_audit_log_fp)
    print(file=_audit_log_fp)
    print('ID: %r' % _audit_log_id, file=_audit_log_fp)
    print(file=_audit_log_fp)
    print('PID: %r' % os.getpid(), file=_audit_log_fp)
    print(file=_audit_log_fp)
    print('URL: %r' % url, file=_audit_log_fp)
    print(file=_audit_log_fp)
    print('PARAMS: %r' % params, file=_audit_log_fp)
    print(file=_audit_log_fp)
    print('HEADERS: %r' % headers, file=_audit_log_fp)
    print(file=_audit_log_fp)
    print('DATA:', end=' ', file=_audit_log_fp)

    if headers.get('Content-Encoding') == 'deflate':
        data = zlib.decompress(data)

        if isinstance(data, bytes):
            data = data.decode('Latin-1')

    object_from_json = json_decode(data)

    pprint(object_from_json, stream=_audit_log_fp)

    if params.get('method') == 'transaction_sample_data':
        for i, sample in enumerate(object_from_json[1]):
            field_as_json = unpack_field(sample[4])
            print(file=_audit_log_fp)
            print('DATA[1][%d][4]:' % i, end=' ', file=_audit_log_fp)
            pprint(field_as_json, stream=_audit_log_fp)

    elif params.get('method') == 'profile_data':
        for i, sample in enumerate(object_from_json[1]):
            field_as_json = unpack_field(sample[4])
            print(file=_audit_log_fp)
            print('DATA[1][%d][4]:' % i, end=' ', file=_audit_log_fp)
            pprint(field_as_json, stream=_audit_log_fp)

    elif params.get('method') == 'sql_trace_data':
        for i, sample in enumerate(object_from_json[0]):
            field_as_json = unpack_field(sample[9])
            print(file=_audit_log_fp)
            print('DATA[0][%d][9]:' % i, end=' ', file=_audit_log_fp)
            pprint(field_as_json, stream=_audit_log_fp)

    print(file=_audit_log_fp)
    print(78 * '=', file=_audit_log_fp)
    print(file=_audit_log_fp)

    _audit_log_fp.flush()

    return _audit_log_id