예제 #1
0
    def generate_request_headers(transaction):
        """
        Return a list of NewRelic specific headers as tuples
        [(HEADER_NAME0, HEADER_VALUE0), (HEADER_NAME1, HEADER_VALUE1)]

        """

        if transaction is None:
            return []

        settings = transaction.settings

        nr_headers = []

        if settings.cross_application_tracer.enabled:

            transaction.is_part_of_cat = True
            encoded_cross_process_id = obfuscate(settings.cross_process_id,
                                                 settings.encoding_key)
            nr_headers.append(('X-NewRelic-ID', encoded_cross_process_id))

            transaction_data = [
                transaction.guid, transaction.record_tt, transaction.trip_id,
                transaction.path_hash
            ]
            encoded_transaction = obfuscate(json_encode(transaction_data),
                                            settings.encoding_key)
            nr_headers.append(('X-NewRelic-Transaction', encoded_transaction))

        if transaction.synthetics_header:
            nr_headers.append(
                ('X-NewRelic-Synthetics', transaction.synthetics_header))

        return nr_headers
    def send_request(
        self,
        method="POST",
        path="/agent_listener/invoke_raw_method",
        params=None,
        headers=None,
        payload=None,
    ):
        request_id = self.log_request(
            self._audit_log_fp,
            "POST",
            "https://fake-collector.newrelic.com" + path,
            params,
            payload,
            headers,
        )
        if not params or "method" not in params:
            return 400, b"Missing method parameter"

        method = params["method"]
        if method not in self.RESPONSES:
            return 400, b"Invalid method received"

        result = self.RESPONSES[method]
        payload = {"return_value": result}
        response_data = json_encode(payload).encode("utf-8")
        self.log_response(
            self._audit_log_fp, request_id, 200, {}, response_data,
        )
        return 200, response_data
def create_incoming_headers(transaction):
    settings = transaction.settings
    encoding_key = settings.encoding_key

    headers = []

    cross_process_id = '1#2'
    path = 'test'
    queue_time = 1.0
    duration = 2.0
    read_length = 1024
    guid = '0123456789012345'
    record_tt = False

    payload = (cross_process_id, path, queue_time, duration, read_length,
            guid, record_tt)
    app_data = json_encode(payload)

    value = obfuscate(app_data, encoding_key)

    assert isinstance(value, type(''))

    headers.append(('X-NewRelic-App-Data', value))

    return headers
예제 #4
0
    def profile_data(self):

        # Generic profiling sessions have to wait for completion before
        # reporting data.

        if self.state == SessionState.RUNNING:
            return None

        # We prune the number of nodes sent if we are over the specified
        # limit. This is just to avoid having the response be too large
        # and get rejected by the data collector.

        settings = global_settings()
        self._prune_call_trees(settings.agent_limits.thread_profiler_nodes)

        flat_tree = {}
        thread_count = 0

        for category, bucket in six.iteritems(self.call_buckets):

            # Only flatten buckets that have data in them. No need to send
            # empty buckets.

            if bucket:
                flat_tree[category] = [x.flatten() for x in bucket.values()]
                thread_count += len(bucket)

        # Construct the actual final data for sending. The actual call
        # data is turned into JSON, compressed and then base64 encoded at
        # this point to cut its size.

        if settings.debug.log_thread_profile_payload:
            _logger.debug('Encoding thread profile data where '
                          'payload=%r.', flat_tree)

        json_call_tree = json_encode(flat_tree)

        level = settings.agent_limits.data_compression_level
        level = level or zlib.Z_DEFAULT_COMPRESSION

        encoded_tree = base64.standard_b64encode(
            zlib.compress(six.b(json_call_tree), level))

        if six.PY3:
            encoded_tree = encoded_tree.decode('Latin-1')

        profile = [[
            self.profile_id, self.start_time_s * 1000,
            (self.actual_stop_time_s or time.time()) * 1000, self.sample_count,
            encoded_tree, thread_count, 0, None
        ]]

        # Reset the data structures to default.

        self.reset_profile_data()
        return profile
예제 #5
0
def target_wsgi_application(environ, start_response):
    status = '200 OK'

    txn_name = environ.get('txn')
    if six.PY2:
        txn_name = txn_name.decode('UTF-8')
    txn_name = txn_name.split('/', 3)

    guid = environ.get('guid')
    old_cat = environ.get('old_cat') == 'True'
    txn = current_transaction()

    txn.guid = guid
    for req in OUTBOUD_REQUESTS:
        # Change the transaction name before making an outbound call.
        outgoing_name = req['outboundTxnName'].split('/', 3)
        if outgoing_name[0] != 'WebTransaction':
            set_background_task(True)

        set_transaction_name(outgoing_name[2], group=outgoing_name[1])

        expected_outbound_header = obfuscate(
            json_encode(req['expectedOutboundPayload']), ENCODING_KEY)
        generated_outbound_header = dict(
            ExternalTrace.generate_request_headers(txn))

        # A 500 error is returned because 'assert' statements in the wsgi app
        # are ignored.

        if old_cat:
            if (expected_outbound_header !=
                    generated_outbound_header['X-NewRelic-Transaction']):
                status = '500 Outbound Headers Check Failed.'
        else:
            if 'X-NewRelic-Transaction' in generated_outbound_header:
                status = '500 Outbound Headers Check Failed.'
        r = urlopen(environ['server_url'])
        r.read(10)

    # Set the final transaction name.

    if txn_name[0] != 'WebTransaction':
        set_background_task(True)
    set_transaction_name(txn_name[2], group=txn_name[1])

    text = '<html><head>%s</head><body><p>RESPONSE</p>%s</body></html>'

    output = (text % (get_browser_timing_header(),
                      get_browser_timing_footer())).encode('UTF-8')

    response_headers = [('Content-type', 'text/html; charset=utf-8'),
                        ('Content-Length', str(len(output)))]
    start_response(status, response_headers)

    return [output]
예제 #6
0
    def send_request(cls, session, url, method, license_key,
            agent_run_id=None, payload=()):

        assert method in _developer_mode_responses

        # Create fake details for the request being made so that we
        # can use the same audit logging functionality.

        params = {}
        headers = {}

        if not license_key:
            license_key = 'NO LICENSE KEY WAS SET IN AGENT CONFIGURATION'

        params['method'] = method
        params['license_key'] = license_key
        params['protocol_version'] = '14'
        params['marshal_format'] = 'json'

        if agent_run_id:
            params['run_id'] = str(agent_run_id)

        headers['User-Agent'] = USER_AGENT
        headers['Content-Encoding'] = 'identity'

        data = json_encode(payload)

        log_id = _log_request(url, params, headers, data)

        # Now create the fake responses so the agent still runs okay.

        result = _developer_mode_responses[method]

        if method == 'connect':
            settings = global_settings()
            if settings.high_security:
                result = dict(result)
                result['high_security'] = True

        # Even though they are always fake responses, still log them.

        if log_id is not None:
            _log_response(log_id, dict(return_value=result))

        return result
예제 #7
0
    def finalize(self):
        for key in self.configuration.aws_lambda_metadata:
            if key not in self._metadata:
                self._metadata[key] = self.configuration.aws_lambda_metadata[key]

        data = self.client.finalize()

        payload = {
            "metadata": self._metadata,
            "data": data,
        }

        encoded = serverless_payload_encode(payload)
        payload = json_encode((1, "NR_LAMBDA_MONITORING", encoded))

        print(payload)

        return payload
예제 #8
0
    def generate_request_headers(cls, transaction):
        """
        Return a list of NewRelic specific headers as tuples
        [(HEADER_NAME0, HEADER_VALUE0), (HEADER_NAME1, HEADER_VALUE1)]

        """

        if transaction is None:
            return []

        settings = transaction.settings

        nr_headers = []

        if settings.distributed_tracing.enabled:
            payload = transaction.create_distributed_trace_payload()
            if not payload:
                return []

            encoded_header = payload.http_safe()
            nr_headers.append((cls.cat_distributed_trace_key, encoded_header))

        elif settings.cross_application_tracer.enabled:
            transaction.is_part_of_cat = True
            encoded_cross_process_id = obfuscate(settings.cross_process_id,
                                                 settings.encoding_key)
            nr_headers.append((cls.cat_id_key, encoded_cross_process_id))

            transaction_data = [
                transaction.guid, transaction.record_tt, transaction.trip_id,
                transaction.path_hash
            ]
            encoded_transaction = obfuscate(json_encode(transaction_data),
                                            settings.encoding_key)
            nr_headers.append((cls.cat_transaction_key, encoded_transaction))

        if transaction.synthetics_header:
            nr_headers.append(
                (cls.cat_synthetics_key, transaction.synthetics_header))

        return nr_headers
예제 #9
0
    def _validate_log_event_collector_json(wrapped, instance, args, kwargs):
        try:
            result = wrapped(*args, **kwargs)
        except:
            raise
        else:

            samples = list(instance.log_events)
            s_info = instance.log_events.sampling_info
            agent_run_id = 666

            # emulate the payload used in data_collector.py

            payload = (agent_run_id, s_info, samples)
            collector_json = json_encode(payload)

            decoded_json = json.loads(collector_json)

            assert decoded_json[0] == agent_run_id

            sampling_info = decoded_json[1]

            reservoir_size = instance.settings.application_logging.max_samples_stored

            assert sampling_info["reservoir_size"] == reservoir_size
            assert sampling_info["events_seen"] == num_logs

            log_events = decoded_json[2]

            assert len(log_events) == num_logs
            for event in log_events:

                # event is an array containing intrinsics, user-attributes,
                # and agent-attributes

                assert len(event) == 3
                for d in event:
                    assert isinstance(d, dict)

        return result
    def generate_request_headers(cls, transaction):
        """
        Return a list of NewRelic specific headers as tuples
        [(HEADER_NAME0, HEADER_VALUE0), (HEADER_NAME1, HEADER_VALUE1)]

        """

        if transaction is None or transaction.settings is None:
            return []

        settings = transaction.settings

        nr_headers = []

        if settings.distributed_tracing.enabled:
            transaction.insert_distributed_trace_headers(nr_headers)

        elif settings.cross_application_tracer.enabled:
            transaction.is_part_of_cat = True
            path_hash = transaction.path_hash
            if path_hash is None:
                # Disable cat if path_hash fails to generate.
                transaction.is_part_of_cat = False
            else:
                encoded_cross_process_id = obfuscate(settings.cross_process_id,
                        settings.encoding_key)
                nr_headers.append((cls.cat_id_key, encoded_cross_process_id))

                transaction_data = [transaction.guid, transaction.record_tt,
                        transaction.trip_id, path_hash]
                encoded_transaction = obfuscate(json_encode(transaction_data),
                        settings.encoding_key)
                nr_headers.append(
                        (cls.cat_transaction_key, encoded_transaction))

        if transaction.synthetics_header:
            nr_headers.append(
                    (cls.cat_synthetics_key, transaction.synthetics_header))

        return nr_headers
 def send_request(
     self,
     method="POST",
     path="/agent_listener/invoke_raw_method",
     params=None,
     headers=None,
     payload=None,
 ):
     agent_method = params["method"]
     if agent_method == "connect" and self.connect_response_fields is not DEFAULT:
         connect_response = dict(self.RESPONSES[agent_method])
         connect_response.update(self.connect_response_fields)
         payload = {"return_value": connect_response}
         response_data = json_encode(payload).encode("utf-8")
         return (200, response_data)
     else:
         return super(CustomTestClient, self).send_request(
             method,
             path,
             params,
             headers,
             payload,
         )
    def profile_data(self):

        # Generic profiling sessions have to wait for completion before
        # reporting data.
        #
        # X-ray profile session can send partial profile data on every harvest.

        if ((self.profiler_type == SessionType.GENERIC)
                and (self.state == SessionState.RUNNING)):
            return None

        # We prune the number of nodes sent if we are over the specified
        # limit. This is just to avoid having the response be too large
        # and get rejected by the data collector.

        settings = global_settings()
        self._prune_call_trees(settings.agent_limits.thread_profiler_nodes)

        flat_tree = {}
        thread_count = 0

        for category, bucket in six.iteritems(self.call_buckets):

            # Only flatten buckets that have data in them. No need to send
            # empty buckets.

            if bucket:
                flat_tree[category] = [x.flatten() for x in bucket.values()]
                thread_count += len(bucket)

        # If no profile data was captured for an x-ray session return None
        # instead of sending an encoded empty data-structure. For a generic
        # profiler continue to send an empty tree. This can happen on a system
        # that uses green threads (coroutines), so sending an empty tree marks
        # the end of a profile session. If we don't send anything then the UI
        # times out after a very long time (~15mins) which is frustrating for
        # the customer.

        if (thread_count == 0) and (self.profiler_type == SessionType.XRAY):
            return None

        # Construct the actual final data for sending. The actual call
        # data is turned into JSON, compressed and then base64 encoded at
        # this point to cut its size.

        if settings.debug.log_thread_profile_payload:
            _logger.debug('Encoding thread profile data where '
                          'payload=%r.', flat_tree)

        json_call_tree = json_encode(flat_tree)

        level = settings.agent_limits.data_compression_level
        level = level or zlib.Z_DEFAULT_COMPRESSION

        encoded_tree = base64.standard_b64encode(
            zlib.compress(six.b(json_call_tree), level))

        if six.PY3:
            encoded_tree = encoded_tree.decode('Latin-1')

        profile = [[
            self.profile_id, self.start_time_s * 1000,
            (self.actual_stop_time_s or time.time()) * 1000, self.sample_count,
            encoded_tree, thread_count, 0, self.xray_id
        ]]

        # Reset the data structures to default. For x-ray profile sessions we
        # report the partial call tree at every harvest cycle. It is required
        # to reset the data structures to avoid aggregating the call trees
        # across harvest cycles.

        self.reset_profile_data()
        return profile
예제 #13
0
    def process_response(self, status, response_headers, *args):
        """Processes response status and headers, extracting any
        details required and returning a set of additional headers
        to merge into that being returned for the web transaction.

        """

        additional_headers = []

        # Extract the HTTP status response code.

        try:
            self.response_code = int(status.split(' ')[0])
        except Exception:
            pass

        # Extract response content length and type for inclusion in agent
        # attributes

        try:

            for header, value in response_headers:
                lower_header = header.lower()
                if 'content-length' == lower_header:
                    self._response_properties['CONTENT_LENGTH'] = int(value)
                elif 'content-type' == lower_header:
                    self._response_properties['CONTENT_TYPE'] = value

        except Exception:
            pass

        # Generate metrics and response headers for inbound cross
        # process web external calls.

        if self.client_cross_process_id is not None:

            # Need to work out queueing time and duration up to this
            # point for inclusion in metrics and response header. If the
            # recording of the transaction had been prematurely stopped
            # via an API call, only return time up until that call was
            # made so it will match what is reported as duration for the
            # transaction.

            if self.queue_start:
                queue_time = self.start_time - self.queue_start
            else:
                queue_time = 0

            if self.end_time:
                duration = self.end_time = self.start_time
            else:
                duration = time.time() - self.start_time

            # Generate the metric identifying the caller.

            metric_name = 'ClientApplication/%s/all' % (
                    self.client_cross_process_id)
            self.record_custom_metric(metric_name, duration)

            # Generate the additional response headers which provide
            # information back to the caller. We need to freeze the
            # transaction name before adding to the header.

            self._freeze_path()

            payload = (self._settings.cross_process_id, self.path, queue_time,
                    duration, self._read_length, self.guid, self.record_tt)
            app_data = json_encode(payload)

            additional_headers.append(('X-NewRelic-App-Data', obfuscate(
                    app_data, self._settings.encoding_key)))

        # The additional headers returned need to be merged into the
        # original response headers passed back by the application.

        return additional_headers
예제 #14
0
 def _to_http(self, method, payload=()):
     params = dict(self._params)
     params["method"] = method
     if self._run_token:
         params["run_id"] = self._run_token
     return params, self._headers, json_encode(payload).encode("utf-8")
예제 #15
0
def record_deploy(
    host,
    api_key,
    app_name,
    description,
    revision="Unknown",
    changelog=None,
    user=None,
    port=443,
    proxy_scheme=None,
    proxy_host=None,
    proxy_user=None,
    proxy_pass=None,
    timeout=None,
    ca_bundle_path=None,
    disable_certificate_validation=False,
):
    headers = {"X-Api-Key": api_key or "", "Content-Type": "application/json"}

    client = agent_http.HttpClient(
        host=host,
        port=port,
        proxy_scheme=proxy_scheme,
        proxy_host=proxy_host,
        proxy_user=proxy_user,
        proxy_pass=proxy_pass,
        timeout=timeout,
        ca_bundle_path=ca_bundle_path,
        disable_certificate_validation=disable_certificate_validation,
    )

    with client:
        app_id = fetch_app_id(app_name, client, headers)
        if app_id is None:
            raise RuntimeError(
                "The application named %r was not found in your account. Please "
                "try running the newrelic-admin server-config command to force "
                "the application to register with New Relic." % app_name)

        path = "/v2/applications/{}/deployments.json".format(app_id)

        if user is None:
            user = pwd.getpwuid(os.getuid()).pw_gecos

        deployment = {}
        deployment["revision"] = revision

        if description:
            deployment["description"] = description
        if changelog:
            deployment["changelog"] = changelog
        if user:
            deployment["user"] = user

        data = {"deployment": deployment}
        payload = encoding_utils.json_encode(data).encode("utf-8")

        status_code, response = client.send_request("POST",
                                                    path,
                                                    headers=headers,
                                                    payload=payload)

        if status_code != 201:
            raise RuntimeError(
                "An unexpected HTTP response of %r was received "
                "for request made to https://%s:%d%s. The payload for the "
                "request was %r. The response payload for the request was %r. "
                "If this issue persists then please report this problem to New "
                "Relic support for further investigation." %
                (status_code, host, port, path, data, response))
    def browser_timing_footer(self):
        """Returns the JavaScript footer to be included in any HTML
        response to perform real user monitoring. This function returns
        the footer as a native Python string. In Python 2 native strings
        are stored as bytes. In Python 3 native strings are stored as
        unicode.

        """

        if not self.enabled:
            return ''

        if self._state != self.STATE_RUNNING:
            return ''

        if self.ignore_transaction:
            return ''

        # Only generate a footer if the header had already been
        # generated and we haven't already generated the footer.

        if not self.rum_header_generated:
            return ''

        if self.rum_footer_generated:
            return ''

        # Make sure we freeze the path.

        self._freeze_path()

        # When obfuscating values for the footer, we only use the
        # first 13 characters of the account license key.

        obfuscation_key = self._settings.license_key[:13]

        attributes = {}

        user_attributes = {}
        for attr in self.user_attributes:
            if attr.destinations & DST_BROWSER_MONITORING:
                user_attributes[attr.name] = attr.value

        if user_attributes:
            attributes['u'] = user_attributes

        request_parameters = self.request_parameters
        request_parameter_attributes = self.filter_request_parameters(
            request_parameters)
        agent_attributes = {}
        for attr in request_parameter_attributes:
            if attr.destinations & DST_BROWSER_MONITORING:
                agent_attributes[attr.name] = attr.value

        if agent_attributes:
            attributes['a'] = agent_attributes

        # create the data structure that pull all our data in

        footer_data = self.browser_monitoring_intrinsics(obfuscation_key)

        if attributes:
            attributes = obfuscate(json_encode(attributes), obfuscation_key)
            footer_data['atts'] = attributes

        footer = _js_agent_footer_fragment % json_encode(footer_data)

        # To avoid any issues with browser encodings, we will make sure that
        # the javascript we inject for the browser agent is ASCII encodable.
        # Since we obfuscate all agent and user attributes, and the transaction
        # name with base 64 encoding, this will preserve those strings, if
        # they have values outside of the ASCII character set.
        # In the case of Python 2, we actually then use the encoded value
        # as we need a native string, which for Python 2 is a byte string.
        # If encoding as ASCII fails we will return an empty string.

        try:
            if six.PY2:
                footer = footer.encode('ascii')
            else:
                footer.encode('ascii')

        except UnicodeError:
            if not WebTransaction.unicode_error_reported:
                _logger.error('ASCII encoding of js-agent-footer failed.',
                              footer)
                WebTransaction.unicode_error_reported = True

            footer = ''

        # We remember if we have returned a non empty string value and
        # if called a second time we will not return it again.

        if footer:
            self.rum_footer_generated = True

        return footer
예제 #17
0
 def _convert_to_cat_metadata_value(nr_headers):
     payload = json_encode(nr_headers)
     cat_linking_value = base64_encode(payload)
     return cat_linking_value
예제 #18
0
def send_request(session, url, method, license_key, agent_run_id=None,
            payload=()):
    """Constructs and sends a request to the data collector."""

    params = {}
    headers = {}

    settings = global_settings()

    start = time.time()

    # Validate that the license key was actually set and if not replace
    # it with a string which makes it more obvious it was not set.

    if not license_key:
        license_key = 'NO LICENSE KEY WAS SET IN AGENT CONFIGURATION'

    # The agent formats requests and is able to handle responses for
    # protocol version 14.

    params['method'] = method
    params['license_key'] = license_key
    params['protocol_version'] = '14'
    params['marshal_format'] = 'json'

    if agent_run_id:
        params['run_id'] = str(agent_run_id)

    headers['User-Agent'] = USER_AGENT
    headers['Content-Encoding'] = 'identity'

    # Set up definitions for proxy server in case that has been set.

    proxies = proxy_server()

    # At this time we use JSON content encoding for the data being sent.
    # If an error does occur when encoding the JSON, then it isn't
    # likely going to work later on in a subsequent request with same
    # data, even if aggregated with other data, so we need to log the
    # details and then flag that data should be thrown away. Don't mind
    # being noisy in the the log in this situation as it would indicate
    # a problem with the implementation of the agent.

    try:
        data = json_encode(payload)

    except Exception:
        _logger.exception('Error encoding data for JSON payload for '
                'method %r with payload of %r. Please report this problem '
                'to New Relic support.', method, payload)

        raise DiscardDataForRequest(str(sys.exc_info()[1]))

    # Log details of call and/or payload for debugging. Use the JSON
    # encoded value so know that what is encoded is correct.

    if settings.debug.log_data_collector_payloads:
        _logger.debug('Calling data collector with url=%r, method=%r and '
                'payload=%r.', url, method, data)
    elif settings.debug.log_data_collector_calls:
        _logger.debug('Calling data collector with url=%r and method=%r.',
                url, method)

    # Compress the serialized JSON being sent as content if over 64KiB
    # in size and not in message types that further compression is
    # excluded.

    threshold = settings.agent_limits.data_compression_threshold

    if method not in _deflate_exclude_list and len(data) > threshold:
        headers['Content-Encoding'] = 'deflate'

        level = settings.agent_limits.data_compression_level
        level = level or zlib.Z_DEFAULT_COMPRESSION
        data = zlib.compress(six.b(data), level)

    # If there is no requests session object provided for making
    # requests create one now. We want to close this as soon as we
    # are done with it.

    auto_close_session = False

    if not session:
        session = requests.session()
        auto_close_session = True

    # The 'requests' library can raise a number of exception derived
    # from 'RequestException' before we even manage to get a connection
    # to the data collector.
    #
    # The data collector can the generate a number of different types of
    # HTTP errors for requests. These are:
    #
    # 400 Bad Request - For incorrect method type or incorrectly
    # construct parameters. We should not get this and if we do it would
    # likely indicate a problem with the implementation of the agent.
    #
    # 413 Request Entity Too Large - Where the request content was too
    # large. The limits on number of nodes in slow transaction traces
    # should in general prevent this, but not everything has size limits
    # and so rogue data could still blow things out. Same data is not
    # going to work later on in a subsequent request, even if aggregated
    # with other data, so we need to log the details and then flag that
    # data should be thrown away.
    #
    # 415 Unsupported Media Type - This occurs when the JSON which was
    # sent can't be decoded by the data collector. If this is a true
    # problem with the JSON formatting, then sending again, even if
    # aggregated with other data, may not work, so we need to log the
    # details and then flag that data should be thrown away.
    #
    # 503 Service Unavailable - This occurs when data collector, or core
    # application is being restarted and not in state to be able to
    # accept requests. It should be a transient issue so should be able
    # to retain data and try again.

    # If audit logging is enabled, log the requests details.

    log_id = _log_request(url, params, headers, data)

    connection = connection_type(proxies)

    try:
        # The timeout value in the requests module is only on
        # the initial connection and doesn't apply to how long
        # it takes to get back a response.

        cert_loc = certs.where()

        if settings.debug.disable_certificate_validation:
            cert_loc = False

        timeout = settings.agent_limits.data_collector_timeout

        with warnings.catch_warnings():
            warnings.simplefilter("ignore")

            r = session.post(url, params=params, headers=headers,
                    proxies=proxies, timeout=timeout, data=data,
                    verify=cert_loc)

        # Read the content now so we can force close the socket
        # connection if this is a transient session as quickly
        # as possible.

        content = r.content

    except requests.RequestException:
        exc_type, message = sys.exc_info()[:2]

        internal_metric('Supportability/Python/Collector/Failures', 1)
        internal_metric('Supportability/Python/Collector/Failures/'
                '%s' % connection, 1)

        internal_metric('Supportability/Python/Collector/Exception/'
                '%s' % callable_name(exc_type), 1)

        if not settings.proxy_host or not settings.proxy_port:
            _logger.warning('Data collector is not contactable. This can be '
                    'because of a network issue or because of the data '
                    'collector being restarted. In the event that contact '
                    'cannot be made after a period of time then please '
                    'report this problem to New Relic support for further '
                    'investigation. The error raised was %r.', message)

        else:
            _logger.warning('Data collector is not contactable via the proxy '
                    'host %r on port %r with proxy user of %r. This can be '
                    'because of a network issue or because of the data '
                    'collector being restarted. In the event that contact '
                    'cannot be made after a period of time then please '
                    'report this problem to New Relic support for further '
                    'investigation. The error raised was %r.',
                    settings.proxy_host, settings.proxy_port,
                    settings.proxy_user, message)

        raise RetryDataForRequest(str(message))

    except Exception:
        # Any unexpected exception will be caught by higher layer, but
        # still attempt to log a metric here just in case agent run
        # doesn't get shutdown as a result of the exception.

        exc_type = sys.exc_info()[0]

        internal_metric('Supportability/Python/Collector/Failures', 1)
        internal_metric('Supportability/Python/Collector/Failures/'
                '%s' % connection, 1)

        internal_metric('Supportability/Python/Collector/Exception/'
                '%s' % callable_name(exc_type), 1)

        raise

    finally:
        if auto_close_session:
            session.close()
            session = None

    if r.status_code != 200:
        _logger.warning('Received a non 200 HTTP response from the data '
                'collector where url=%r, method=%r, license_key=%r, '
                'agent_run_id=%r, params=%r, headers=%r, status_code=%r '
                'and content=%r.', url, method, license_key, agent_run_id,
                params, headers, r.status_code, content)

        internal_metric('Supportability/Python/Collector/Failures', 1)
        internal_metric('Supportability/Python/Collector/Failures/'
                '%s' % connection, 1)

        internal_metric('Supportability/Python/Collector/HTTPError/%d'
                % r.status_code, 1)

    if r.status_code == 400:
        _logger.error('Data collector is indicating that a bad '
                'request has been submitted for url %r, headers of %r, '
                'params of %r and payload of %r. Please report this '
                'problem to New Relic support.', url, headers, params,
                payload)

        raise DiscardDataForRequest()

    elif r.status_code == 413:
        _logger.warning('Data collector is indicating that a request for '
                'method %r was received where the request content size '
                'was over the maximum allowed size limit. The length of '
                'the request content was %d. If this keeps occurring on a '
                'regular basis, please report this problem to New Relic '
                'support for further investigation.', method, len(data))

        raise DiscardDataForRequest()

    elif r.status_code == 415:
        _logger.warning('Data collector is indicating that it was sent '
                'malformed JSON data for method %r. If this keeps occurring '
                'on a regular basis, please report this problem to New '
                'Relic support for further investigation.', method)

        if settings.debug.log_malformed_json_data:
            if headers['Content-Encoding'] == 'deflate':
                data = zlib.decompress(data)

            _logger.info('JSON data which was rejected by the data '
                    'collector was %r.', data)

        raise DiscardDataForRequest(content)

    elif r.status_code == 503:
        _logger.warning('Data collector is unavailable. This can be a '
                'transient issue because of the data collector or our '
                'core application being restarted. If the issue persists '
                'it can also be indicative of a problem with our servers. '
                'In the event that availability of our servers is not '
                'restored after a period of time then please report this '
                'problem to New Relic support for further investigation.')

        raise ServerIsUnavailable()

    elif r.status_code != 200:
        if not settings.proxy_host or not settings.proxy_port:
            _logger.warning('An unexpected HTTP response was received from '
                    'the data collector of %r for method %r. The payload for '
                    'the request was %r. If this issue persists then please '
                    'report this problem to New Relic support for further '
                    'investigation.', r.status_code, method, payload)

        else:
            _logger.warning('An unexpected HTTP response was received from '
                    'the data collector of %r for method %r while connecting '
                    'via proxy host %r on port %r with proxy user of %r. '
                    'The payload for the request was %r. If this issue '
                    'persists then please report this problem to New Relic '
                    'support for further investigation.', r.status_code,
                    method, settings.proxy_host, settings.proxy_port,
                    settings.proxy_user, payload)

        raise DiscardDataForRequest()

    # Log details of response payload for debugging. Use the JSON
    # encoded value so know that what original encoded value was.

    duration = time.time() - start

    if settings.debug.log_data_collector_payloads:
        _logger.debug('Valid response from data collector after %.2f '
                'seconds with content=%r.', duration, content)
    elif settings.debug.log_data_collector_calls:
        _logger.debug('Valid response from data collector after %.2f '
                'seconds.', duration)

    # If we got this far we should have a legitimate response from the
    # data collector. The response is JSON so need to decode it.

    try:
        if six.PY3:
            content = content.decode('UTF-8')

        result = json_decode(content)

    except Exception:
        _logger.exception('Error decoding data for JSON payload for '
                'method %r with payload of %r. Please report this problem '
                'to New Relic support.', method, content)

        if settings.debug.log_malformed_json_data:
            _logger.info('JSON data received from data collector which '
                    'could not be decoded was %r.', content)

        raise DiscardDataForRequest(str(sys.exc_info()[1]))

    # The decoded JSON can be either for a successful response or an
    # error. A successful response has a 'return_value' element and on
    # error an 'exception' element.

    if log_id is not None:
        _log_response(log_id, result)

    if 'return_value' in result:
        return result['return_value']

    error_type = result['exception']['error_type']
    message = result['exception']['message']

    # Now need to check for server side exceptions. The following
    # exceptions can occur for abnormal events.

    _logger.warning('Received an exception from the data collector where '
            'url=%r, method=%r, license_key=%r, agent_run_id=%r, params=%r, '
            'headers=%r, error_type=%r and message=%r', url, method,
            license_key, agent_run_id, params, headers, error_type,
            message)

    # Technically most server side errors will result in the active
    # agent run being abandoned and so there is no point trying to
    # create a metric for when they occur. Leave this here though to at
    # least log a metric for the case where a completely unexpected
    # server error response is received and the agent run does manage to
    # continue and further requests don't just keep failing. Since do
    # not even expect the metric to be retained, use the original error
    # type as sent.

    internal_metric('Supportability/Python/Collector/ServerError/'
            '%s' % error_type, 1)

    if error_type == 'NewRelic::Agent::LicenseException':
        _logger.error('Data collector is indicating that an incorrect '
                'license key has been supplied by the agent. The value '
                'which was used by the agent is %r. Please correct any '
                'problem with the license key or report this problem to '
                'New Relic support.', license_key)

        raise DiscardDataForRequest(message)

    elif error_type == 'NewRelic::Agent::PostTooBigException':
        # As far as we know we should never see this type of server side
        # error as for JSON API we should always get back a HTTP 413
        # error response instead.

        internal_metric('Supportability/Python/Collector/Failures', 1)
        internal_metric('Supportability/Python/Collector/Failures/'
                '%s' % connection, 1)

        _logger.warning('Core application is indicating that a request for '
                'method %r was received where the request content size '
                'was over the maximum allowed size limit. The length of '
                'the request content was %d. If this keeps occurring on a '
                'regular basis, please report this problem to New Relic '
                'support for further investigation.', method, len(data))

        raise DiscardDataForRequest(message)

    # Server side exceptions are also used to inform the agent to
    # perform certain actions such as restart when server side
    # configuration has changed for this application or when agent is
    # being disabled remotely for some reason.

    if error_type == 'NewRelic::Agent::ForceRestartException':
        _logger.info('An automatic internal agent restart has been '
                'requested by the data collector for the application '
                'where the agent run was %r. The reason given for the '
                'forced restart is %r.', agent_run_id, message)

        raise ForceAgentRestart(message)

    elif error_type == 'NewRelic::Agent::ForceDisconnectException':
        _logger.critical('Disconnection of the agent has been requested by '
                'the data collector for the application where the '
                'agent run was %r. The reason given for the forced '
                'disconnection is %r. Please contact New Relic support '
                'for further information.', agent_run_id, message)

        raise ForceAgentDisconnect(message)

    # We received an unexpected server side error we don't know what to
    # do with. Ignoring PostTooBigException which we expect that we
    # should never receive, unexpected server side errors are the only
    # ones we record a failure metric for as other server side errors
    # are really commands to have the agent do something.

    internal_metric('Supportability/Python/Collector/Failures', 1)
    internal_metric('Supportability/Python/Collector/Failures/'
            '%s' % connection, 1)

    _logger.warning('An unexpected server error was received from the '
            'data collector for method %r with payload of %r. The error '
            'was of type %r with message %r. If this issue persists '
            'then please report this problem to New Relic support for '
            'further investigation.', method, payload, error_type, message)

    raise DiscardDataForRequest(message)