def slow_transaction_data(self):
        """Returns a list containing any slow transaction data collected
        during the reporting period.

        NOTE Currently only the slowest transaction for the reporting
        period is retained.

        """

        if not self.__settings:
            return []

        if not self.__slow_transaction:
            return []

        maximum = self.__settings.agent_limits.transaction_traces_nodes

        transaction_trace = self.__slow_transaction.transaction_trace(
                self, maximum)

        internal_metric('Supportability/StatsEngine/Counts/'
                'transaction_sample_data',
                self.__slow_transaction.trace_node_count)

        data = [transaction_trace,
                list(self.__slow_transaction.string_table.values())]

        if self.__settings.debug.log_transaction_trace_payload:
            _logger.debug('Encoding slow transaction data where '
                    'payload=%r.', data)

        with InternalTrace('Supportability/StatsEngine/JSON/Encode/'
                'transaction_sample_data'):

            json_data = simplejson.dumps(data, ensure_ascii=True,
                    encoding='Latin-1', namedtuple_as_object=False,
                    default=lambda o: list(iter(o)))

        internal_metric('Supportability/StatsEngine/ZLIB/Bytes/'
                'transaction_sample_data', len(json_data))

        with InternalTrace('Supportability/StatsEngine/ZLIB/Compress/'
                'transaction_sample_data'):
            zlib_data = zlib.compress(six.b(json_data))

        with InternalTrace('Supportability/StatsEngine/BASE64/Encode/'
                'transaction_sample_data'):
            pack_data = base64.standard_b64encode(zlib_data)

        root = transaction_trace.root

        trace_data = [[root.start_time,
                root.end_time - root.start_time,
                self.__slow_transaction.path,
                self.__slow_transaction.request_uri,
                pack_data]]

        return trace_data
Пример #2
0
def send_request(session,
                 url,
                 method,
                 license_key,
                 agent_run_id=None,
                 payload=()):
    """Constructs and sends a request to the data collector."""

    params = {}
    headers = {}
    config = {}

    settings = global_settings()

    start = time.time()

    # Validate that the license key was actually set and if not replace
    # it with a string which makes it more obvious it was not set.

    if not license_key:
        license_key = 'NO LICENSE KEY WAS SET IN AGENT CONFIGURATION'

    # The agent formats requests and is able to handle responses for
    # protocol version 12.

    params['method'] = method
    params['license_key'] = license_key
    params['protocol_version'] = '12'
    params['marshal_format'] = 'json'

    if agent_run_id:
        params['run_id'] = str(agent_run_id)

    headers['User-Agent'] = USER_AGENT
    headers['Content-Encoding'] = 'identity'

    # Set up definitions for proxy server in case that has been set.

    proxies = proxy_server()

    # At this time we use JSON content encoding for the data being sent.
    # If an error does occur when encoding the JSON, then it isn't
    # likely going to work later on in a subsequent request with same
    # data, even if aggregated with other data, so we need to log the
    # details and then flag that data should be thrown away. Don't mind
    # being noisy in the the log in this situation as it would indicate
    # a problem with the implementation of the agent.

    try:
        with InternalTrace('Supportability/Collector/JSON/Encode/%s' % method):
            data = json_encode(payload)

    except Exception:
        _logger.exception(
            'Error encoding data for JSON payload for '
            'method %r with payload of %r. Please report this problem '
            'to New Relic support.', method, payload)

        raise DiscardDataForRequest(str(sys.exc_info()[1]))

    # Log details of call and/or payload for debugging. Use the JSON
    # encoded value so know that what is encoded is correct.

    if settings.debug.log_data_collector_payloads:
        _logger.debug(
            'Calling data collector with url=%r, method=%r and '
            'payload=%r.', url, method, data)
    elif settings.debug.log_data_collector_calls:
        _logger.debug('Calling data collector with url=%r and method=%r.', url,
                      method)

    # Compress the serialized JSON being sent as content if over 64KiB
    # in size. If less than 2MB in size compress for speed. If over
    # 2MB then compress for smallest size. This parallels what the Ruby
    # agent does.

    if len(data) > 64 * 1024:
        headers['Content-Encoding'] = 'deflate'
        level = (len(data) < 2000000) and 1 or 9

        internal_metric('Supportability/Collector/ZLIB/Bytes/%s' % method,
                        len(data))

        with InternalTrace('Supportability/Collector/ZLIB/Compress/'
                           '%s' % method):
            data = zlib.compress(six.b(data), level)

    # If there is no requests session object provided for making
    # requests create one now. We want to close this as soon as we
    # are done with it.

    auto_close_session = False

    if not session:
        session = requests.session()
        auto_close_session = True

    # The 'requests' library can raise a number of exception derived
    # from 'RequestException' before we even manage to get a connection
    # to the data collector.
    #
    # The data collector can the generate a number of different types of
    # HTTP errors for requests. These are:
    #
    # 400 Bad Request - For incorrect method type or incorrectly
    # construct parameters. We should not get this and if we do it would
    # likely indicate a problem with the implementation of the agent.
    #
    # 413 Request Entity Too Large - Where the request content was too
    # large. The limits on number of nodes in slow transaction traces
    # should in general prevent this, but not everything has size limits
    # and so rogue data could still blow things out. Same data is not
    # going to work later on in a subsequent request, even if aggregated
    # with other data, so we need to log the details and then flag that
    # data should be thrown away.
    #
    # 415 Unsupported Media Type - This occurs when the JSON which was
    # sent can't be decoded by the data collector. If this is a true
    # problem with the JSON formatting, then sending again, even if
    # aggregated with other data, may not work, so we need to log the
    # details and then flag that data should be thrown away.
    #
    # 503 Service Unavailable - This occurs when data collector, or core
    # application is being restarted and not in state to be able to
    # accept requests. It should be a transient issue so should be able
    # to retain data and try again.

    internal_metric('Supportability/Collector/Output/Bytes/%s' % method,
                    len(data))

    try:
        # The timeout value in the requests module is only on
        # the initial connection and doesn't apply to how long
        # it takes to get back a response.

        timeout = settings.agent_limits.data_collector_timeout

        r = session.post(url,
                         params=params,
                         headers=headers,
                         proxies=proxies,
                         timeout=timeout,
                         data=data)

        # Read the content now so we can force close the socket
        # connection if this is a transient session as quickly
        # as possible.

        content = r.content

    except requests.RequestException:
        if not settings.proxy_host or not settings.proxy_port:
            _logger.warning(
                'Data collector is not contactable. This can be '
                'because of a network issue or because of the data '
                'collector being restarted. In the event that contact '
                'cannot be made after a period of time then please '
                'report this problem to New Relic support for further '
                'investigation. The error raised was %r.',
                sys.exc_info()[1])
        else:
            _logger.warning(
                'Data collector is not contactable via the proxy '
                'host %r on port %r with proxy user of %r. This can be '
                'because of a network issue or because of the data '
                'collector being restarted. In the event that contact '
                'cannot be made after a period of time then please '
                'report this problem to New Relic support for further '
                'investigation. The error raised was %r.', settings.proxy_host,
                settings.proxy_port, settings.proxy_user,
                sys.exc_info()[1])

        raise RetryDataForRequest(str(sys.exc_info()[1]))

    finally:
        if auto_close_session:
            session.close()
            session = None

    if r.status_code != 200:
        _logger.debug(
            'Received a non 200 HTTP response from the data '
            'collector where url=%r, method=%r, license_key=%r, '
            'agent_run_id=%r, params=%r, headers=%r, status_code=%r '
            'and content=%r.', url, method, license_key, agent_run_id, params,
            headers, r.status_code, content)

    if r.status_code == 400:
        _logger.error(
            'Data collector is indicating that a bad '
            'request has been submitted for url %r, headers of %r, '
            'params of %r and payload of %r. Please report this '
            'problem to New Relic support.', url, headers, params, payload)

        raise DiscardDataForRequest()

    elif r.status_code == 413:
        _logger.warning(
            'Data collector is indicating that a request for '
            'method %r was received where the request content size '
            'was over the maximum allowed size limit. The length of '
            'the request content was %d. If this keeps occurring on a '
            'regular basis, please report this problem to New Relic '
            'support for further investigation.', method, len(data))

        raise DiscardDataForRequest()

    elif r.status_code == 415:
        _logger.warning(
            'Data collector is indicating that it was sent '
            'malformed JSON data for method %r. If this keeps occurring '
            'on a regular basis, please report this problem to New '
            'Relic support for further investigation.', method)

        if settings.debug.log_malformed_json_data:
            if headers['Content-Encoding'] == 'deflate':
                data = zlib.decompress(data)

            _logger.info(
                'JSON data which was rejected by the data '
                'collector was %r.', data)

        raise DiscardDataForRequest(content)

    elif r.status_code == 503:
        _logger.warning(
            'Data collector is unavailable. This can be a '
            'transient issue because of the data collector or our '
            'core application being restarted. If the issue persists '
            'it can also be indicative of a problem with our servers. '
            'In the event that availability of our servers is not '
            'restored after a period of time then please report this '
            'problem to New Relic support for further investigation.')

        raise ServerIsUnavailable()

    elif r.status_code != 200:
        if not settings.proxy_host or not settings.proxy_port:
            _logger.warning(
                'An unexpected HTTP response was received from '
                'the data collector of %r for method %r. The payload for '
                'the request was %r. If this issue persists then please '
                'report this problem to New Relic support for further '
                'investigation.', r.status_code, method, payload)
        else:
            _logger.warning(
                'An unexpected HTTP response was received from '
                'the data collector of %r for method %r while connecting '
                'via proxy host %r on port %r with proxy user of %r. '
                'The payload for the request was %r. If this issue '
                'persists then please report this problem to New Relic '
                'support for further investigation.', r.status_code, method,
                settings.proxy_host, settings.proxy_port, settings.proxy_user,
                payload)

        raise DiscardDataForRequest()

    # Log details of response payload for debugging. Use the JSON
    # encoded value so know that what original encoded value was.

    duration = time.time() - start

    if settings.debug.log_data_collector_payloads:
        _logger.debug(
            'Valid response from data collector after %.2f '
            'seconds with content=%r.', duration, content)
    elif settings.debug.log_data_collector_calls:
        _logger.debug(
            'Valid response from data collector after %.2f '
            'seconds.', duration)

    # If we got this far we should have a legitimate response from the
    # data collector. The response is JSON so need to decode it.

    internal_metric('Supportability/Collector/Input/Bytes/%s' % method,
                    len(content))

    try:
        with InternalTrace('Supportability/Collector/JSON/Decode/%s' % method):
            if six.PY3:
                content = content.decode('UTF-8')

            result = json_decode(content)

    except Exception:
        _logger.exception(
            'Error decoding data for JSON payload for '
            'method %r with payload of %r. Please report this problem '
            'to New Relic support.', method, content)

        if settings.debug.log_malformed_json_data:
            _logger.info(
                'JSON data received from data collector which '
                'could not be decoded was %r.', content)

        raise DiscardDataForRequest(str(sys.exc_info()[1]))

    # The decoded JSON can be either for a successful response or an
    # error. A successful response has a 'return_value' element and an
    # error an 'exception' element.

    if 'return_value' in result:
        return result['return_value']

    error_type = result['exception']['error_type']
    message = result['exception']['message']

    # Now need to check for server side exceptions. The following
    # exceptions can occur for abnormal events.

    _logger.debug(
        'Received an exception from the data collector where '
        'url=%r, method=%r, license_key=%r, agent_run_id=%r, params=%r, '
        'headers=%r, error_type=%r and message=%r', url, method, license_key,
        agent_run_id, params, headers, error_type, message)

    if error_type == 'NewRelic::Agent::LicenseException':
        _logger.error(
            'Data collector is indicating that an incorrect '
            'license key has been supplied by the agent. The value '
            'which was used by the agent is %r. Please correct any '
            'problem with the license key or report this problem to '
            'New Relic support.', license_key)

        raise DiscardDataForRequest(message)

    elif error_type == 'NewRelic::Agent::PostTooBigException':
        _logger.warning(
            'Core application is indicating that a request for '
            'method %r was received where the request content size '
            'was over the maximum allowed size limit. The length of '
            'the request content was %d. If this keeps occurring on a '
            'regular basis, please report this problem to New Relic '
            'support for further investigation.', method, len(data))

        raise DiscardDataForRequest(message)

    # Server side exceptions are also used to inform the agent to
    # perform certain actions such as restart when server side
    # configuration has changed for this application or when agent is
    # being disabled remotely for some reason.

    if error_type == 'NewRelic::Agent::ForceRestartException':
        _logger.info(
            'An automatic internal agent restart has been '
            'requested by the data collector for the application '
            'where the agent run was %r. The reason given for the '
            'forced restart is %r.', agent_run_id, message)

        raise ForceAgentRestart(message)

    elif error_type == 'NewRelic::Agent::ForceDisconnectException':
        _logger.critical(
            'Disconnection of the agent has been requested by '
            'the data collector for the application where the '
            'agent run was %r. The reason given for the forced '
            'disconnection is %r. Please contact New Relic support '
            'for further information.', agent_run_id, message)

        raise ForceAgentDisconnect(message)

    # We received an unexpected server side error we don't know what
    # to do with.

    _logger.warning(
        'An unexpected server error was received from the '
        'data collector for method %r with payload of %r. The error '
        'was of type %r with message %r. If this issue persists '
        'then please report this problem to New Relic support for '
        'further investigation.', method, payload, error_type, message)

    raise DiscardDataForRequest(message)
    def transaction_trace_data(self):
        """Returns a list of slow transaction data collected
        during the reporting period.

        """
        if not self.__settings:
            return []

        # Create a set 'traces' that is a union of slow transaction,
        # browser_transactions and xray_transactions. This ensures we don't
        # send duplicates of a transaction.

        traces = set()
        if self.__slow_transaction:
            traces.add(self.__slow_transaction)
        traces.update(self.__browser_transactions)
        traces.update(self.__xray_transactions)

        # Return an empty list if no transactions were captured.

        if not traces:
            return []

        trace_data = []
        maximum = self.__settings.agent_limits.transaction_traces_nodes

        for trace in traces:
            transaction_trace = trace.transaction_trace(self, maximum)

            internal_metric(
                'Supportability/StatsEngine/Counts/'
                'transaction_sample_data', trace.trace_node_count)

            data = [transaction_trace, trace.string_table.values()]

            if self.__settings.debug.log_transaction_trace_payload:
                _logger.debug(
                    'Encoding slow transaction data where '
                    'payload=%r.', data)

            with InternalTrace('Supportability/StatsEngine/JSON/Encode/'
                               'transaction_sample_data'):

                json_data = simplejson.dumps(data,
                                             ensure_ascii=True,
                                             encoding='Latin-1',
                                             namedtuple_as_object=False,
                                             default=lambda o: list(iter(o)))

            internal_metric(
                'Supportability/StatsEngine/ZLIB/Bytes/'
                'transaction_sample_data', len(json_data))

            with InternalTrace('Supportability/StatsEngine/ZLIB/Compress/'
                               'transaction_sample_data'):
                zlib_data = zlib.compress(json_data)

            with InternalTrace('Supportability/StatsEngine/BASE64/Encode/'
                               'transaction_sample_data'):
                pack_data = base64.standard_b64encode(zlib_data)

            root = transaction_trace.root
            xray_id = getattr(trace, 'xray_id', None)

            if (xray_id or trace.rum_trace or trace.record_tt):
                force_persist = True
            else:
                force_persist = False

            trace_data.append([
                root.start_time,
                root.end_time - root.start_time,
                trace.path,
                trace.request_uri,
                pack_data,
                trace.guid,
                None,
                force_persist,
                xray_id,
            ])

        return trace_data
    def record_transaction(self, transaction):
        """Record any apdex and time metrics for the transaction as
        well as any errors which occurred for the transaction. If the
        transaction qualifies to become the slow transaction remember
        it for later.

        """

        if not self.__settings:
            return

        settings = self.__settings

        error_collector = settings.error_collector
        transaction_tracer = settings.transaction_tracer
        slow_sql = settings.slow_sql

        # Record the apdex, value and time metrics generated from the
        # transaction. Whether time metrics are reported as distinct
        # metrics or into a rollup is in part controlled via settings
        # for minimum number of unique metrics to be reported and thence
        # whether over a time threshold calculated as percentage of
        # overall request time, up to a maximum number of unique
        # metrics. This is intended to limit how many metrics are
        # reported for each transaction and try and cutdown on an
        # explosion of unique metric names. The limits and thresholds
        # are applied after the metrics are reverse sorted based on
        # exclusive times for each metric. This ensures that the metrics
        # with greatest exclusive time are retained over those with
        # lesser time. Such metrics get reported into the performance
        # breakdown tab for specific web transactions.

        with InternalTrace(
                'Supportability/TransactionNode/Calls/apdex_metrics'):
            self.record_apdex_metrics(transaction.apdex_metrics(self))

        with InternalTrace(
                'Supportability/TransactionNode/Calls/value_metrics'):
            self.merge_custom_metrics(transaction.custom_metrics.metrics())

        with InternalTrace(
                'Supportability/TransactionNode/Calls/time_metrics'):
            self.record_time_metrics(transaction.time_metrics(self))

        # Capture any errors if error collection is enabled.
        # Only retain maximum number allowed per harvest.

        if (error_collector.enabled and settings.collect_errors
                and len(self.__transaction_errors) <
                settings.agent_limits.errors_per_harvest):
            with InternalTrace(
                    'Supportability/TransactionNode/Calls/error_details'):
                self.__transaction_errors.extend(transaction.error_details())

                self.__transaction_errors = self.__transaction_errors[:settings
                                                                      .
                                                                      agent_limits
                                                                      .
                                                                      errors_per_harvest]

        # Capture any sql traces if transaction tracer enabled.

        if slow_sql.enabled and settings.collect_traces:
            with InternalTrace(
                    'Supportability/TransactionNode/Calls/slow_sql_nodes'):
                for node in transaction.slow_sql_nodes(self):
                    self.record_slow_sql_node(node)

        # Remember as slowest transaction if transaction tracer
        # is enabled, it is over the threshold and slower than
        # any existing transaction seen for this period and in
        # the historical snapshot of slow transactions, plus
        # recording of transaction trace for this transaction
        # has not been suppressed.

        if (not transaction.suppress_transaction_trace
                and transaction_tracer.enabled and settings.collect_traces):

            # Transactions saved for xray session do not depend on the
            # transaction threshold.

            self._update_xray_transaction(transaction)

            threshold = transaction_tracer.transaction_threshold

            if threshold is None:
                threshold = transaction.apdex_t * 4

            if transaction.duration >= threshold:
                self._update_slow_transaction(transaction)
                self._update_browser_transaction(transaction)
Пример #5
0
    def record_transaction(self, transaction):
        """Record any apdex and time metrics for the transaction as
        well as any errors which occurred for the transaction. If the
        transaction qualifies to become the slow transaction remember
        it for later.

        """

        if not self.__settings:
            return

        settings = self.__settings

        error_collector = settings.error_collector
        transaction_tracer = settings.transaction_tracer
        slow_sql = settings.slow_sql

        # Record the apdex, value and time metrics generated from the
        # transaction. Whether time metrics are reported as distinct
        # metrics or into a rollup is in part controlled via settings
        # for minimum number of unique metrics to be reported and thence
        # whether over a time threshold calculated as percentage of
        # overall request time, up to a maximum number of unique
        # metrics. This is intended to limit how many metrics are
        # reported for each transaction and try and cutdown on an
        # explosion of unique metric names. The limits and thresholds
        # are applied after the metrics are reverse sorted based on
        # exclusive times for each metric. This ensures that the metrics
        # with greatest exclusive time are retained over those with
        # lesser time. Such metrics get reported into the performance
        # breakdown tab for specific web transactions.

        with InternalTrace(
                'Supportability/TransactionNode/Calls/apdex_metrics'):
            self.record_apdex_metrics(transaction.apdex_metrics(self))

        with InternalTrace(
                'Supportability/TransactionNode/Calls/value_metrics'):
            self.merge_custom_metrics(transaction.custom_metrics.metrics())

        with InternalTrace(
                'Supportability/TransactionNode/Calls/time_metrics'):
            self.record_time_metrics(transaction.time_metrics(self))

        # Capture any errors if error collection is enabled.
        # Only retain maximum number allowed per harvest.

        if (error_collector.enabled and settings.collect_errors
                and len(self.__transaction_errors) <
                settings.agent_limits.errors_per_harvest):
            with InternalTrace(
                    'Supportability/TransactionNode/Calls/error_details'):
                self.__transaction_errors.extend(transaction.error_details())

                self.__transaction_errors = self.__transaction_errors[:settings
                                                                      .
                                                                      agent_limits
                                                                      .
                                                                      errors_per_harvest]

        # Capture any sql traces if transaction tracer enabled.

        if slow_sql.enabled and settings.collect_traces:
            with InternalTrace(
                    'Supportability/TransactionNode/Calls/slow_sql_nodes'):
                for node in transaction.slow_sql_nodes(self):
                    self.record_slow_sql_node(node)

        # Remember as slowest transaction if transaction tracer
        # is enabled, it is over the threshold and slower than
        # any existing transaction seen for this period and in
        # the historical snapshot of slow transactions, plus
        # recording of transaction trace for this transaction
        # has not been suppressed.

        if (not transaction.suppress_transaction_trace
                and transaction_tracer.enabled and settings.collect_traces):

            # Transactions saved for xray session do not depend on the
            # transaction threshold.

            self._update_xray_transaction(transaction)

            threshold = transaction_tracer.transaction_threshold

            if threshold is None:
                threshold = transaction.apdex_t * 4

            if transaction.duration >= threshold:
                self._update_slow_transaction(transaction)
                self._update_browser_transaction(transaction)

        # Create the transaction record summarising key data for later
        # analytics. Only do this for web transaction at this point as
        # not sure if needs to be done for other transactions as field
        # names in record are based on web transaction metric names.

        if (settings.collect_analytics_events
                and settings.analytics_events.enabled):

            if (transaction.type == 'WebTransaction'
                    and settings.analytics_events.transactions.enabled):

                record = {}
                params = {}

                # First remember users custom parameters. We only
                # retain any which have string type for key and
                # string or numeric for value.

                if settings.analytics_events.capture_attributes:
                    for key, value in transaction.custom_params.items():
                        if not isinstance(key, six.string_types):
                            continue
                        if (not isinstance(value, six.string_types)
                                and not isinstance(value, float)
                                and not isinstance(value, six.integer_types)):
                            continue
                        params[key] = value

                # Now we add the agents own values so they
                # overwrite users values if same key name used.

                name = self.__sampled_data_set.intern(transaction.path)

                record['type'] = 'Transaction'
                record['name'] = name
                record['timestamp'] = transaction.start_time
                record['duration'] = transaction.duration

                def _update_entry(source, target):
                    try:
                        record[target] = self.__stats_table[(
                            source, '')].total_call_time
                    except KeyError:
                        pass

                _update_entry('HttpDispatcher', 'webDuration')
                _update_entry('WebFrontend/QueueTime', 'queueDuration')

                _update_entry('External/all', 'externalDuration')
                _update_entry('Database/all', 'databaseDuration')
                _update_entry('Memcache/all', 'memcacheDuration')

                self.__sampled_data_set.add([record, params])
Пример #6
0
    def create_session(cls, license_key, app_name, linked_applications,
                       environment, settings):
        """Registers the agent for the specified application with the data
        collector and retrieves the server side configuration. Returns a
        session object if successful through which subsequent calls to the
        data collector are made. If unsucessful then None is returned.

        """

        start = time.time()

        # If no license key provided in the call, fallback to using that
        # from the agent configuration file or environment variables.
        # Flag an error if the result still seems invalid.

        if not license_key:
            license_key = global_settings().license_key

        if not license_key:
            _logger.error(
                'A valid account license key cannot be found. '
                'Has a license key been specified in the agent configuration '
                'file or via the NEW_RELIC_LICENSE_KEY environment variable?')

        try:
            # First need to ask the primary data collector which of the many
            # data collector instances we should use for this agent run.

            _logger.debug(
                'Connecting to data collector to register agent '
                'with license_key=%r, app_name=%r, '
                'linked_applications=%r, environment=%r and settings=%r.',
                license_key, app_name, linked_applications, environment,
                settings)

            url = collector_url()

            with InternalTrace('Supportability/Python/Collector/Calls/'
                               'get_redirect_host'):
                redirect_host = cls.send_request(None, url,
                                                 'get_redirect_host',
                                                 license_key)

            # Then we perform a connect to the actual data collector host
            # we need to use. All communications after this point should go
            # to the secondary data collector.
            #
            # We use the global requests session object for now as harvest
            # for different applications are all done in turn. We will need
            # to change this if use multiple threads as currently force
            # session object to maintain only single connection to ensure
            # that keep alive is effective.

            app_names = [app_name] + linked_applications

            local_config = {}

            local_config['host'] = socket.gethostname()
            local_config['pid'] = os.getpid()
            local_config['language'] = 'python'
            local_config['app_name'] = app_names
            local_config['identifier'] = ','.join(app_names)
            local_config['agent_version'] = version
            local_config['environment'] = environment

            connect_settings = {}
            security_settings = {}

            connect_settings['browser_monitoring.loader'] = (
                settings['browser_monitoring.loader'])
            connect_settings['browser_monitoring.debug'] = (
                settings['browser_monitoring.debug'])

            security_settings['capture_params'] = settings['capture_params']
            security_settings['transaction_tracer'] = {}
            security_settings['transaction_tracer']['record_sql'] = (
                settings['transaction_tracer.record_sql'])

            local_config['settings'] = connect_settings
            local_config['security_settings'] = security_settings

            local_config['high_security'] = settings['high_security']
            local_config['labels'] = settings['labels']

            display_name = settings['process_host.display_name']

            if display_name is None:
                local_config['display_name'] = local_config['host']
            else:
                local_config['display_name'] = display_name

            payload = (local_config, )

            url = collector_url(redirect_host)

            with InternalTrace('Supportability/Python/Collector/Calls/'
                               'connect'):
                server_config = cls.send_request(None, url, 'connect',
                                                 license_key, None, payload)

            # Apply High Security Mode to server_config, so the local
            # security settings won't get overwritten when we overlay
            # the server settings on top of them.

            server_config = apply_high_security_mode_fixups(
                settings, server_config)

            # The agent configuration for the application in constructed
            # by taking a snapshot of the locally constructed
            # configuration and overlaying it with that from the server.

            application_config = create_settings_snapshot(server_config)

        except NetworkInterfaceException:
            # The reason for errors of this type have already been logged.
            # No matter what the error we just pass back None. The upper
            # layer needs to count how many success times this has failed
            # and escalate things with a more sever error.

            pass

        except Exception:
            # Any other errors are going to be unexpected and likely will
            # indicate an issue with the implementation of the agent.

            _logger.exception(
                'Unexpected exception when attempting to '
                'register the agent with the data collector. Please '
                'report this problem to New Relic support for further '
                'investigation.')

            pass

        else:
            # Everything fine so we create the session object through which
            # subsequent communication with data collector will be done.

            session = cls(url, license_key, application_config)

            duration = time.time() - start

            # Log successful agent registration and any server side messages.

            _logger.info(
                'Successfully registered New Relic Python agent '
                'where app_name=%r, pid=%r, redirect_host=%r and '
                'agent_run_id=%r, in %.2f seconds.', app_name, os.getpid(),
                redirect_host, session.agent_run_id, duration)

            if getattr(application_config, 'high_security', False):
                _logger.info('High Security Mode is being applied to all '
                             'communications between the agent and the data '
                             'collector for this session.')

            logger_func_mapping = {
                'ERROR': _logger.error,
                'WARN': _logger.warning,
                'INFO': _logger.info,
                'VERBOSE': _logger.debug,
            }

            if 'messages' in server_config:
                for item in server_config['messages']:
                    message = item['message']
                    level = item['level']
                    logger_func = logger_func_mapping.get(level, None)
                    if logger_func:
                        logger_func('%s', message)

            return session
Пример #7
0
def send_request(session,
                 url,
                 method,
                 license_key,
                 agent_run_id=None,
                 payload=()):
    """Constructs and sends a request to the data collector."""

    params = {}
    headers = {}
    config = {}

    settings = global_settings()

    start = time.time()

    # Validate that the license key was actually set and if not replace
    # it with a string which makes it more obvious it was not set.

    if not license_key:
        license_key = 'NO LICENSE KEY WAS SET IN AGENT CONFIGURATION'

    # The agent formats requests and is able to handle responses for
    # protocol version 12.

    params['method'] = method
    params['license_key'] = license_key
    params['protocol_version'] = '12'
    params['marshal_format'] = 'json'

    if agent_run_id:
        params['run_id'] = str(agent_run_id)

    headers['User-Agent'] = USER_AGENT
    headers['Content-Encoding'] = 'identity'

    # Set up definitions for proxy server in case that has been set.

    proxies = proxy_server()

    # At this time we use JSON content encoding for the data being
    # sent. Ensure that normal byte strings are interpreted as Latin-1
    # and that the final result is ASCII so that don't need to worry
    # about converting back to bytes again. We set the default fallback
    # encoder to treat any iterable as a list. Unfortunately the JSON
    # library can't use it as an iterable and so means that generator
    # will be consumed up front and everything collected in memory as a
    # list before then converting to JSON.
    #
    # If an error does occur when encoding the JSON, then it isn't
    # likely going to work later on in a subsequent request with same
    # data, even if aggregated with other data, so we need to log the
    # details and then flag that data should be thrown away. Don't mind
    # being noisy in the the log in this situation as it would indicate
    # a problem with the implementation of the agent.

    try:
        with InternalTrace('Supportability/Collector/JSON/Encode/%s' % method):
            data = simplejson.dumps(payload,
                                    ensure_ascii=True,
                                    encoding='Latin-1',
                                    namedtuple_as_object=False,
                                    default=lambda o: list(iter(o)))

    except Exception, exc:
        _logger.error(
            'Error encoding data for JSON payload for method %r '
            'with payload of %r. Exception which occurred was %r. '
            'Please report this problem to New Relic support.', method,
            payload, exc)

        raise DiscardDataForRequest(str(exc))
Пример #8
0
        _logger.debug('Calling data collector with url=%r and method=%r.', url,
                      method)

    # Compress the serialized JSON being sent as content if over 64KiB
    # in size. If less than 2MB in size compress for speed. If over
    # 2MB then compress for smallest size. This parallels what the Ruby
    # agent does.

    if len(data) > 64 * 1024:
        headers['Content-Encoding'] = 'deflate'
        level = (len(data) < 2000000) and 1 or 9

        internal_metric('Supportability/Collector/ZLIB/Bytes/%s' % method,
                        len(data))

        with InternalTrace('Supportability/Collector/ZLIB/Compress/'
                           '%s' % method):
            data = zlib.compress(data, level)

    # If there is no requests session object provided for making
    # requests create one now. We use a transient session to get around
    # designed flaws in the requests/urllib3 modules. See notes for
    # close_requests_session() function above. Note that keep alive
    # must be set to true at this point to ensure that the pool is
    # actually used to allow us to be able to close the connection.

    auto_close_session = False

    if not session:
        session_config = {}
        session_config['keep_alive'] = True
        session_config['pool_connections'] = 1