Esempio n. 1
0
    def generate_request_headers(transaction):
        """
        Return a list of NewRelic specific headers as tuples
        [(HEADER_NAME0, HEADER_VALUE0), (HEADER_NAME1, HEADER_VALUE1)]

        """

        if transaction is None:
            return []

        settings = transaction.settings

        if not settings.cross_application_tracer.enabled:
            return []

        encoded_cross_process_id = obfuscate(settings.cross_process_id,
                settings.encoding_key)

        transaction_data = [transaction.guid, transaction.record_tt]
        encoded_transaction = obfuscate(simplejson.dumps(transaction_data,
                    ensure_ascii=True, encoding='Latin-1'),
                settings.encoding_key)

        nr_headers = [('X-NewRelic-ID', encoded_cross_process_id),
                      ('X-NewRelic-Transaction', encoded_transaction)]

        return nr_headers
Esempio n. 2
0
    def generate_request_headers(transaction):
        """
        Return a list of NewRelic specific headers as tuples
        [(HEADER_NAME0, HEADER_VALUE0), (HEADER_NAME1, HEADER_VALUE1)]

        """

        if transaction is None:
            return []

        settings = transaction.settings

        if not settings.cross_application_tracer.enabled:
            return []

        encoded_cross_process_id = obfuscate(settings.cross_process_id,
                                             settings.encoding_key)

        transaction_data = [transaction.guid, transaction.record_tt]
        encoded_transaction = obfuscate(
            simplejson.dumps(transaction_data,
                             ensure_ascii=True,
                             encoding='Latin-1'), settings.encoding_key)

        nr_headers = [('X-NewRelic-ID', encoded_cross_process_id),
                      ('X-NewRelic-Transaction', encoded_transaction)]

        return nr_headers
    def slow_transaction_data(self):
        """Returns a list containing any slow transaction data collected
        during the reporting period.

        NOTE Currently only the slowest transaction for the reporting
        period is retained.

        """

        if not self.__settings:
            return []

        if not self.__slow_transaction:
            return []

        maximum = self.__settings.agent_limits.transaction_traces_nodes

        transaction_trace = self.__slow_transaction.transaction_trace(
                self, maximum)

        internal_metric('Supportability/StatsEngine/Counts/'
                'transaction_sample_data',
                self.__slow_transaction.trace_node_count)

        data = [transaction_trace,
                list(self.__slow_transaction.string_table.values())]

        if self.__settings.debug.log_transaction_trace_payload:
            _logger.debug('Encoding slow transaction data where '
                    'payload=%r.', data)

        with InternalTrace('Supportability/StatsEngine/JSON/Encode/'
                'transaction_sample_data'):

            json_data = simplejson.dumps(data, ensure_ascii=True,
                    encoding='Latin-1', namedtuple_as_object=False,
                    default=lambda o: list(iter(o)))

        internal_metric('Supportability/StatsEngine/ZLIB/Bytes/'
                'transaction_sample_data', len(json_data))

        with InternalTrace('Supportability/StatsEngine/ZLIB/Compress/'
                'transaction_sample_data'):
            zlib_data = zlib.compress(six.b(json_data))

        with InternalTrace('Supportability/StatsEngine/BASE64/Encode/'
                'transaction_sample_data'):
            pack_data = base64.standard_b64encode(zlib_data)

        root = transaction_trace.root

        trace_data = [[root.start_time,
                root.end_time - root.start_time,
                self.__slow_transaction.path,
                self.__slow_transaction.request_uri,
                pack_data]]

        return trace_data
    def slow_sql_data(self):

        if not self.__settings:
            return []

        if not self.__sql_stats_table:
            return []

        maximum = self.__settings.agent_limits.slow_sql_data

        slow_sql_nodes = sorted(six.itervalues(self.__sql_stats_table),
                key=lambda x: x.max_call_time)[-maximum:]

        result = []

        for node in slow_sql_nodes:

            params = {}

            if node.slow_sql_node.stack_trace:
                params['backtrace'] = node.slow_sql_node.stack_trace

            explain_plan = node.slow_sql_node.explain_plan

            if explain_plan:
                params['explain_plan'] = explain_plan

            json_data = simplejson.dumps(params, ensure_ascii=True,
                    encoding='Latin-1', namedtuple_as_object=False,
                    default=lambda o: list(iter(o)))

            params_data = base64.standard_b64encode(
                    zlib.compress(six.b(json_data)))

            data = [node.slow_sql_node.path,
                    node.slow_sql_node.request_uri,
                    node.slow_sql_node.identifier,
                    node.slow_sql_node.formatted,
                    node.slow_sql_node.metric,
                    node.call_count,
                    node.total_call_time * 1000,
                    node.min_call_time * 1000,
                    node.max_call_time * 1000,
                    params_data]

            result.append(data)

        return result
    def transaction_trace_data(self):
        """Returns a list of slow transaction data collected
        during the reporting period.

        """
        if not self.__settings:
            return []

        # Create a set 'traces' that is a union of slow transaction,
        # browser_transactions and xray_transactions. This ensures we don't
        # send duplicates of a transaction.

        traces = set()
        if self.__slow_transaction:
            traces.add(self.__slow_transaction)
        traces.update(self.__browser_transactions)
        traces.update(self.__xray_transactions)

        # Return an empty list if no transactions were captured.

        if not traces:
            return []

        trace_data = []
        maximum = self.__settings.agent_limits.transaction_traces_nodes

        for trace in traces:
            transaction_trace = trace.transaction_trace(self, maximum)

            internal_metric('Supportability/StatsEngine/Counts/'
                            'transaction_sample_data',
                            trace.trace_node_count)

            data = [transaction_trace,
                    list(trace.string_table.values())]

            if self.__settings.debug.log_transaction_trace_payload:
                _logger.debug('Encoding slow transaction data where '
                              'payload=%r.', data)

            with InternalTrace('Supportability/StatsEngine/JSON/Encode/'
                               'transaction_sample_data'):

                json_data = simplejson.dumps(data, ensure_ascii=True,
                        encoding='Latin-1', namedtuple_as_object=False,
                        default=lambda o: list(iter(o)))

            internal_metric('Supportability/StatsEngine/ZLIB/Bytes/'
                            'transaction_sample_data', len(json_data))

            with InternalTrace('Supportability/StatsEngine/ZLIB/Compress/'
                               'transaction_sample_data'):
                zlib_data = zlib.compress(six.b(json_data))

            with InternalTrace('Supportability/StatsEngine/BASE64/Encode/'
                               'transaction_sample_data'):
                pack_data = base64.standard_b64encode(zlib_data)

            root = transaction_trace.root
            xray_id = getattr(trace, 'xray_id', None)

            if (xray_id or trace.rum_trace or trace.record_tt):
                force_persist = True
            else:
                force_persist = False

            trace_data.append([root.start_time,
                    root.end_time - root.start_time,
                    trace.path,
                    trace.request_uri,
                    pack_data,
                    trace.guid,
                    None,
                    force_persist,
                    xray_id,])

        return trace_data
def send_request(session, url, method, license_key, agent_run_id=None,
            payload=()):
    """Constructs and sends a request to the data collector."""

    params = {}
    headers = {}
    config = {}

    settings = global_settings()

    start = time.time()

    # Validate that the license key was actually set and if not replace
    # it with a string which makes it more obvious it was not set.

    if not license_key:
        license_key = 'NO LICENSE KEY WAS SET IN AGENT CONFIGURATION'

    # The agent formats requests and is able to handle responses for
    # protocol version 12.

    params['method'] = method
    params['license_key'] = license_key
    params['protocol_version'] = '12'
    params['marshal_format'] = 'json'

    if agent_run_id:
        params['run_id'] = str(agent_run_id)

    headers['User-Agent'] = USER_AGENT
    headers['Content-Encoding'] = 'identity'

    # Set up definitions for proxy server in case that has been set.

    proxies = proxy_server()

    # At this time we use JSON content encoding for the data being
    # sent. Ensure that normal byte strings are interpreted as Latin-1
    # and that the final result is ASCII so that don't need to worry
    # about converting back to bytes again. We set the default fallback
    # encoder to treat any iterable as a list. Unfortunately the JSON
    # library can't use it as an iterable and so means that generator
    # will be consumed up front and everything collected in memory as a
    # list before then converting to JSON.
    #
    # If an error does occur when encoding the JSON, then it isn't
    # likely going to work later on in a subsequent request with same
    # data, even if aggregated with other data, so we need to log the
    # details and then flag that data should be thrown away. Don't mind
    # being noisy in the the log in this situation as it would indicate
    # a problem with the implementation of the agent.

    try:
        with InternalTrace('Supportability/Collector/JSON/Encode/%s' % method):
            data = simplejson.dumps(payload, ensure_ascii=True,
                    encoding='Latin-1', namedtuple_as_object=False,
                    default=lambda o: list(iter(o)))

    except Exception:
        _logger.exception('Error encoding data for JSON payload for '
                'method %r with payload of %r. Please report this problem '
                'to New Relic support.', method, payload)

        raise DiscardDataForRequest(str(sys.exc_info()[1]))

    # Log details of call and/or payload for debugging. Use the JSON
    # encoded value so know that what is encoded is correct.

    if settings.debug.log_data_collector_payloads:
        _logger.debug('Calling data collector with url=%r, method=%r and '
                'payload=%r.', url, method, data)
    elif settings.debug.log_data_collector_calls:
        _logger.debug('Calling data collector with url=%r and method=%r.',
                url, method)

    # Compress the serialized JSON being sent as content if over 64KiB
    # in size. If less than 2MB in size compress for speed. If over
    # 2MB then compress for smallest size. This parallels what the Ruby
    # agent does.

    if len(data) > 64*1024:
        headers['Content-Encoding'] = 'deflate'
        level = (len(data) < 2000000) and 1 or 9

        internal_metric('Supportability/Collector/ZLIB/Bytes/%s' % method,
                len(data))

        with InternalTrace('Supportability/Collector/ZLIB/Compress/'
                '%s' % method):
            data = zlib.compress(six.b(data), level)

    # If there is no requests session object provided for making
    # requests create one now. We want to close this as soon as we
    # are done with it.

    auto_close_session = False

    if not session:
        session = requests.session()
        auto_close_session = True

    # The 'requests' library can raise a number of exception derived
    # from 'RequestException' before we even manage to get a connection
    # to the data collector.
    #
    # The data collector can the generate a number of different types of
    # HTTP errors for requests. These are:
    #
    # 400 Bad Request - For incorrect method type or incorrectly
    # construct parameters. We should not get this and if we do it would
    # likely indicate a problem with the implementation of the agent.
    #
    # 413 Request Entity Too Large - Where the request content was too
    # large. The limits on number of nodes in slow transaction traces
    # should in general prevent this, but not everything has size limits
    # and so rogue data could still blow things out. Same data is not
    # going to work later on in a subsequent request, even if aggregated
    # with other data, so we need to log the details and then flag that
    # data should be thrown away.
    #
    # 415 Unsupported Media Type - This occurs when the JSON which was
    # sent can't be decoded by the data collector. If this is a true
    # problem with the JSON formatting, then sending again, even if
    # aggregated with other data, may not work, so we need to log the
    # details and then flag that data should be thrown away.
    #
    # 503 Service Unavailable - This occurs when data collector, or core
    # application is being restarted and not in state to be able to
    # accept requests. It should be a transient issue so should be able
    # to retain data and try again.

    internal_metric('Supportability/Collector/Output/Bytes/%s' % method,
            len(data))

    try:
        # The timeout value in the requests module is only on
        # the initial connection and doesn't apply to how long
        # it takes to get back a response.

        timeout = settings.agent_limits.data_collector_timeout

        r = session.post(url, params=params, headers=headers,
                proxies=proxies, timeout=timeout, data=data)

        # Read the content now so we can force close the socket
        # connection if this is a transient session as quickly
        # as possible.

        content = r.content

    except requests.RequestException:
        if not settings.proxy_host or not settings.proxy_port:
            _logger.warning('Data collector is not contactable. This can be '
                    'because of a network issue or because of the data '
                    'collector being restarted. In the event that contact '
                    'cannot be made after a period of time then please '
                    'report this problem to New Relic support for further '
                    'investigation. The error raised was %r.',
                    sys.exc_info()[1])
        else:
            _logger.warning('Data collector is not contactable via the proxy '
                    'host %r on port %r with proxy user of %r. This can be '
                    'because of a network issue or because of the data '
                    'collector being restarted. In the event that contact '
                    'cannot be made after a period of time then please '
                    'report this problem to New Relic support for further '
                    'investigation. The error raised was %r.',
                    settings.proxy_host, settings.proxy_port,
                    settings.proxy_user, sys.exc_info()[1])

        raise RetryDataForRequest(str(sys.exc_info()[1]))

    finally:
        if auto_close_session:
            session.close()
            session = None

    if r.status_code != 200:
        _logger.debug('Received a non 200 HTTP response from the data '
                'collector where url=%r, method=%r, license_key=%r, '
                'agent_run_id=%r, params=%r, headers=%r, status_code=%r '
                'and content=%r.', url, method, license_key, agent_run_id,
                params, headers, r.status_code, content)

    if r.status_code == 400:
        _logger.error('Data collector is indicating that a bad '
                'request has been submitted for url %r, headers of %r, '
                'params of %r and payload of %r. Please report this '
                'problem to New Relic support.', url, headers, params,
                payload)

        raise DiscardDataForRequest()

    elif r.status_code == 413:
        _logger.warning('Data collector is indicating that a request for '
                'method %r was received where the request content size '
                'was over the maximum allowed size limit. The length of '
                'the request content was %d. If this keeps occurring on a '
                'regular basis, please report this problem to New Relic '
                'support for further investigation.', method, len(data))

        raise DiscardDataForRequest()

    elif r.status_code == 415:
        _logger.warning('Data collector is indicating that it was sent '
                'malformed JSON data for method %r. If this keeps occurring '
                'on a regular basis, please report this problem to New '
                'Relic support for further investigation.', method)

        if settings.debug.log_malformed_json_data:
            if headers['Content-Encoding'] == 'deflate':
                data = zlib.uncompress(data)

            _logger.info('JSON data which was rejected by the data '
                    'collector was %r.', data)

        raise DiscardDataForRequest(content)

    elif r.status_code == 503:
        _logger.warning('Data collector is unavailable. This can be a '
                'transient issue because of the data collector or our '
                'core application being restarted. If the issue persists '
                'it can also be indicative of a problem with our servers. '
                'In the event that availability of our servers is not '
                'restored after a period of time then please report this '
                'problem to New Relic support for further investigation.')

        raise ServerIsUnavailable()

    elif r.status_code != 200:
        if not settings.proxy_host or not settings.proxy_port:
            _logger.warning('An unexpected HTTP response was received from '
                    'the data collector of %r for method %r. The payload for '
                    'the request was %r. If this issue persists then please '
                    'report this problem to New Relic support for further '
                    'investigation.', r.status_code, method, payload)
        else:
            _logger.warning('An unexpected HTTP response was received from '
                    'the data collector of %r for method %r while connecting '
                    'via proxy host %r on port %r with proxy user of %r. '
                    'The payload for the request was %r. If this issue '
                    'persists then please report this problem to New Relic '
                    'support for further investigation.', r.status_code,
                    method, settings.proxy_host, settings.proxy_port,
                    settings.proxy_user, payload)

        raise DiscardDataForRequest()

    # Log details of response payload for debugging. Use the JSON
    # encoded value so know that what original encoded value was.

    duration = time.time() - start

    if settings.debug.log_data_collector_payloads:
        _logger.debug('Valid response from data collector after %.2f '
                'seconds with content=%r.', duration, content)
    elif settings.debug.log_data_collector_calls:
        _logger.debug('Valid response from data collector after %.2f '
                'seconds.', duration)

    # If we got this far we should have a legitimate response from the
    # data collector. The response is JSON so need to decode it.
    # Everything will come back as Unicode. Make sure all strings are
    # decoded as 'UTF-8'.

    internal_metric('Supportability/Collector/Input/Bytes/%s' % method,
            len(content))

    try:
        with InternalTrace('Supportability/Collector/JSON/Decode/%s' % method):
            result = simplejson.loads(content, encoding='UTF-8')

    except Exception:
        _logger.exception('Error decoding data for JSON payload for '
                'method %r with payload of %r. Please report this problem '
                'to New Relic support.', method, content)

        if settings.debug.log_malformed_json_data:
            _logger.info('JSON data received from data collector which '
                    'could not be decoded was %r.', content)

        raise DiscardDataForRequest(str(sys.exc_info()[1]))

    # The decoded JSON can be either for a successful response or an
    # error. A successful response has a 'return_value' element and an
    # error an 'exception' element.

    if 'return_value' in result:
        return result['return_value']

    error_type = result['exception']['error_type']
    message = result['exception']['message']

    # Now need to check for server side exceptions. The following
    # exceptions can occur for abnormal events.

    _logger.debug('Received an exception from the data collector where '
            'url=%r, method=%r, license_key=%r, agent_run_id=%r, params=%r, '
            'headers=%r, error_type=%r and message=%r', url, method,
            license_key, agent_run_id, params, headers, error_type,
            message)

    if error_type == 'NewRelic::Agent::LicenseException':
        _logger.error('Data collector is indicating that an incorrect '
                'license key has been supplied by the agent. The value '
                'which was used by the agent is %r. Please correct any '
                'problem with the license key or report this problem to '
                'New Relic support.', license_key)

        raise DiscardDataForRequest(message)

    elif error_type == 'NewRelic::Agent::PostTooBigException':
        _logger.warning('Core application is indicating that a request for '
                'method %r was received where the request content size '
                'was over the maximum allowed size limit. The length of '
                'the request content was %d. If this keeps occurring on a '
                'regular basis, please report this problem to New Relic '
                'support for further investigation.', method, len(data))

        raise DiscardDataForRequest(message)

    # Server side exceptions are also used to inform the agent to
    # perform certain actions such as restart when server side
    # configuration has changed for this application or when agent is
    # being disabled remotely for some reason.

    if error_type == 'NewRelic::Agent::ForceRestartException':
        _logger.info('An automatic internal agent restart has been '
                'requested by the data collector for the application '
                'where the agent run was %r. The reason given for the '
                'forced restart is %r.', agent_run_id, message)

        raise ForceAgentRestart(message)

    elif error_type == 'NewRelic::Agent::ForceDisconnectException':
        _logger.critical('Disconnection of the agent has been requested by '
                'the data collector for the application where the '
                'agent run was %r. The reason given for the forced '
                'disconnection is %r. Please contact New Relic support '
                'for further information.', agent_run_id, message)

        raise ForceAgentDisconnect(message)

    # We received an unexpected server side error we don't know what
    # to do with.

    _logger.warning('An unexpected server error was received from the '
            'data collector for method %r with payload of %r. The error '
            'was of type %r with message %r. If this issue persists '
            'then please report this problem to New Relic support for '
            'further investigation.', method, payload, error_type, message)

    raise DiscardDataForRequest(message)
    def profile_data(self):

        # Generic profiling sessions have to wait for completion before
        # reporting data.
        #
        # Xray profile session can send partial profile data on every harvest.

        if ((self.profiler_type == SessionType.GENERIC)
                and (self.state == SessionState.RUNNING)):
            return None

        # We prune the number of nodes sent if we are over the specified
        # limit. This is just to avoid having the response be too large
        # and get rejected by the data collector.

        settings = global_settings()
        self._prune_call_trees(settings.agent_limits.thread_profiler_nodes)

        flat_tree = {}
        thread_count = 0

        for category, bucket in six.iteritems(self.call_buckets):

            # Only flatten buckets that have data in them. No need to send
            # empty buckets.

            if bucket:
                flat_tree[category] = [x.flatten() for x in bucket.values()]
                thread_count += len(bucket)

        # If no profile data was captured return None instead of sending an
        # encoded empty data-structure

        if thread_count == 0:
            return None

        # Construct the actual final data for sending. The actual call
        # data is turned into JSON, compessed and then base64 encoded at
        # this point to cut its size.

        _logger.debug(
            'Returning partial thread profiling data '
            'for %d transactions with name %r and xray ID of '
            '%r over a period of %.2f seconds and %d samples.',
            self.transaction_count, self.key_txn, self.xray_id,
            time.time() - self.start_time_s, self.sample_count)

        if settings.debug.log_thread_profile_payload:
            _logger.debug('Encoding thread profile data where '
                          'payload=%r.', flat_tree)

        json_call_tree = simplejson.dumps(flat_tree,
                                          ensure_ascii=True,
                                          encoding='Latin-1',
                                          namedtuple_as_object=False)
        encoded_tree = base64.standard_b64encode(
            zlib.compress(six.b(json_call_tree)))

        profile = [[
            self.profile_id, self.start_time_s * 1000,
            (self.actual_stop_time_s or time.time()) * 1000, self.sample_count,
            encoded_tree, thread_count, 0, self.xray_id
        ]]

        # Reset the datastructures to default. For xray profile sessions we
        # report the partial call tree at every harvest cycle. It is required
        # to reset the datastructures to avoid aggregating the call trees
        # across harvest cycles.

        self.reset_profile_data()
        return profile
Esempio n. 8
0
    def profile_data(self):

        # Generic profiling sessions have to wait for completion before
        # reporting data.
        #
        # Xray profile session can send partial profile data on every harvest.

        if ((self.profiler_type == SessionType.GENERIC) and
                (self.state == SessionState.RUNNING)):
            return None

        # We prune the number of nodes sent if we are over the specified
        # limit. This is just to avoid having the response be too large
        # and get rejected by the data collector.

        settings = global_settings()
        self._prune_call_trees(settings.agent_limits.thread_profiler_nodes)

        flat_tree = {}
        thread_count = 0

        for category, bucket in six.iteritems(self.call_buckets):

            # Only flatten buckets that have data in them. No need to send
            # empty buckets.

            if bucket:
                flat_tree[category] = [x.flatten() for x in bucket.values()]
                thread_count += len(bucket)

        # If no profile data was captured return None instead of sending an
        # encoded empty data-structure

        if thread_count == 0:
            return None

        # Construct the actual final data for sending. The actual call
        # data is turned into JSON, compessed and then base64 encoded at
        # this point to cut its size.

        _logger.debug('Returning partial thread profiling data '
                'for %d transactions with name %r and xray ID of '
                '%r over a period of %.2f seconds and %d samples.',
                self.transaction_count, self.key_txn, self.xray_id,
                time.time()-self.start_time_s, self.sample_count)

        if settings.debug.log_thread_profile_payload:
            _logger.debug('Encoding thread profile data where '
                    'payload=%r.', flat_tree)

        json_call_tree = simplejson.dumps(flat_tree, ensure_ascii=True,
                encoding='Latin-1', namedtuple_as_object=False)
        encoded_tree = base64.standard_b64encode(
                zlib.compress(six.b(json_call_tree)))

        profile = [[self.profile_id, self.start_time_s*1000,
            (self.actual_stop_time_s or time.time()) * 1000, self.sample_count,
            encoded_tree, thread_count, 0, self.xray_id]]

        # Reset the datastructures to default. For xray profile sessions we
        # report the partial call tree at every harvest cycle. It is required
        # to reset the datastructures to avoid aggregating the call trees
        # across harvest cycles.

        self.reset_profile_data()
        return profile
Esempio n. 9
0
    def process_response(self, status, response_headers, *args):
        """Processes response status and headers, extracting any
        details required and returning a set of additional headers
        to merge into that being returned for the web transaction.

        """

        additional_headers = []

        # Extract the HTTP status response code.

        try:
            self.response_code = int(status.split(' ')[0])
        except Exception:
            pass

        # Extract response content length for inclusion in custom
        # parameters returned for slow transactions and errors.

        try:
            header = [
                x for x in response_headers if x[0].lower() == 'content-length'
            ][-1:]

            if header:
                self._response_properties['CONTENT_LENGTH'] = header[0][1]
        except Exception:
            pass

        # Generate metrics and response headers for inbound cross
        # process web external calls.

        if self.client_cross_process_id is not None:
            # Need to work out queueing time and duration up to this
            # point for inclusion in metrics and response header. If the
            # recording of the transaction had been prematurely stopped
            # via an API call, only return time up until that call was
            # made so it will match what is reported as duration for the
            # transaction.

            if self.queue_start:
                queue_time = self.start_time - self.queue_start
            else:
                queue_time = 0

            if self.end_time:
                duration = self.end_time = self.start_time
            else:
                duration = time.time() - self.start_time

            # Generate the metric identifying the caller.

            metric_name = 'ClientApplication/%s/all' % (
                self.client_cross_process_id)
            self.record_custom_metric(metric_name, duration)

            # Generate the additional response headers which provide
            # information back to the caller. We need to freeze the
            # transaction name before adding to the header.

            self._freeze_path()

            payload = (self._settings.cross_process_id, self.path, queue_time,
                       duration, self._read_length, self.guid, self.record_tt)
            app_data = simplejson.dumps(payload,
                                        ensure_ascii=True,
                                        encoding='Latin-1')

            additional_headers.append(('X-NewRelic-App-Data',
                                       obfuscate(app_data,
                                                 self._settings.encoding_key)))

        # The additional headers returned need to be merged into the
        # original response headers passed back by the application.

        return additional_headers
Esempio n. 10
0
    def process_response(self, status, response_headers, *args):
        """Processes response status and headers, extracting any
        details required and returning a set of additional headers
        to merge into that being returned for the web transaction.

        """

        additional_headers = []

        # Extract the HTTP status response code.

        try:
            self.response_code = int(status.split(' ')[0])
        except Exception:
            pass

        # Extract response content length for inclusion in custom
        # parameters returned for slow transactions and errors.

        try:
            header = [x for x in response_headers
                    if x[0].lower() == 'content-length'][-1:]

            if header:
                self._response_properties['CONTENT_LENGTH'] = header[0][1]
        except Exception:
            pass

        # Generate metrics and response headers for inbound cross
        # process web external calls.

        if self.client_cross_process_id is not None:
            # Need to work out queueing time and duration up to this
            # point for inclusion in metrics and response header. If the
            # recording of the transaction had been prematurely stopped
            # via an API call, only return time up until that call was
            # made so it will match what is reported as duration for the
            # transaction.

            if self.queue_start:
                queue_time = self.start_time - self.queue_start
            else:
                queue_time = 0

            if self.end_time:
                duration = self.end_time = self.start_time
            else:
                duration = time.time() - self.start_time

            # Generate the metric identifying the caller.

            metric_name = 'ClientApplication/%s/all' % (
                    self.client_cross_process_id)
            self.record_custom_metric(metric_name, duration)

            # Generate the additional response headers which provide
            # information back to the caller. We need to freeze the
            # transaction name before adding to the header.

            self._freeze_path()

            payload = (self._settings.cross_process_id, self.path, queue_time,
                    duration, self._read_length, self.guid, self.record_tt)
            app_data = simplejson.dumps(payload, ensure_ascii=True,
                    encoding='Latin-1')

            additional_headers.append(('X-NewRelic-App-Data', obfuscate(
                    app_data, self._settings.encoding_key)))

        # The additional headers returned need to be merged into the
        # original response headers passed back by the application.

        return additional_headers