def update_profile_sessions(self):
        """Check the current time and decide if any of the profile sessions
        have expired and move it to the finished_sessions list.

        """

        if self.full_profile_session:
            self.full_profile_session.sample_count += 1
            if time.time() >= self.full_profile_session.stop_time_s:
                self.stop_profile_session(self.full_profile_app)
                _logger.info('Finished thread profiling session.')

        if self._xray_suspended:
            return

        # Clean out the app_name entries with empty values

        for app_name, xray_profile_sessions in \
                list(six.iteritems(self.application_xrays)):
            if not xray_profile_sessions:
                self.application_xrays.pop(app_name)

        # Update the xray_profile_sessions for each each application

        for app_name, xray_profile_sessions in \
                list(six.iteritems(self.application_xrays)):
            for key_txn, xps in list(six.iteritems(xray_profile_sessions)):
                if time.time() >= xps.stop_time_s:
                    self.stop_profile_session(app_name, key_txn)
                    _logger.info('Finished x-ray profiling session for %s',
                            key_txn)
    def update_profile_sessions(self):
        """Check the current time and decide if any of the profile sessions
        have expired and move it to the finished_sessions list.

        """

        if self.full_profile_session:
            self.full_profile_session.sample_count += 1
            if time.time() >= self.full_profile_session.stop_time_s:
                self.stop_profile_session(self.full_profile_app)
                _logger.info('Finished thread profiling session.')

        if self._xray_suspended:
            return

        # Clean out the app_name entries with empty values

        for app_name, xray_profile_sessions in \
                list(six.iteritems(self.application_xrays)):
            if not xray_profile_sessions:
                self.application_xrays.pop(app_name)

        # Update the xray_profile_sessions for each each application

        for app_name, xray_profile_sessions in \
                list(six.iteritems(self.application_xrays)):
            for key_txn, xps in list(six.iteritems(xray_profile_sessions)):
                if time.time() >= xps.stop_time_s:
                    self.stop_profile_session(app_name, key_txn)
                    _logger.info('Finished x-ray profiling session for %s',
                                 key_txn)
    def metric_data(self, normalizer=None):
        """Returns a list containing the low level metric data for
        sending to the core application pertaining to the reporting
        period. This consists of tuple pairs where first is dictionary
        with name and scope keys with corresponding values, or integer
        identifier if metric had an entry in dictionary mapping metric
        (name, scope) as supplied from core application. The second is
        the list of accumulated metric data, the list always being of
        length 6.

        """

        if not self.__settings:
            return []

        result = []
        normalized_stats = {}

        # Metric Renaming and Re-Aggregation. After applying the metric
        # renaming rules, the metrics are re-aggregated to collapse the
        # metrics with same names after the renaming.

        if self.__settings.debug.log_raw_metric_data:
            _logger.info('Raw metric data for harvest of %r is %r.',
                    self.__settings.app_name,
                    list(six.iteritems(self.__stats_table)))

        if normalizer is not None:
            for key, value in six.iteritems(self.__stats_table):
                key = (normalizer(key[0])[0] , key[1])
                stats = normalized_stats.get(key)
                if stats is None:
                    normalized_stats[key] = copy.copy(value)
                else:
                    stats.merge_stats(value)
        else:
            normalized_stats = self.__stats_table

        if self.__settings.debug.log_normalized_metric_data:
            _logger.info('Normalized metric data for harvest of %r is %r.',
                    self.__settings.app_name,
                    list(six.iteritems(normalized_stats)))

        for key, value in six.iteritems(normalized_stats):
            if key not in self.__metric_ids:
                key = dict(name=key[0], scope=key[1])
            else:
                key = self.__metric_ids[key]
            result.append((key, value))

        return result
Пример #4
0
    def merge_metric_stats(self, snapshot, rollback=False):
        """Merges metric data from a snapshot. This is used when merging
        data from a single transaction into main stats engine. It would
        also be done if the sending of the metric data from the harvest
        failed and wanted to keep accumulating it for subsequent
        harvest.

        """

        if not self.__settings:
            return

        if rollback:
            _logger.debug('Performing rollback of metric data into '
                          'subsequent harvest period.')

        # Merge back data into any new data which has been
        # accumulated.

        for key, other in six.iteritems(snapshot.__stats_table):
            stats = self.__stats_table.get(key)
            if not stats:
                self.__stats_table[key] = copy.copy(other)
            else:
                stats.merge_stats(other)
Пример #5
0
        def _callable_name():
            # This is pretty ugly and inefficient, but a stack
            # frame doesn't provide any information about the
            # original callable object. We thus need to try and
            # deduce what it is by searching through the stack
            # frame globals. This will still not work in many
            # cases, including lambdas, generator expressions,
            # and decoratored attributes such as properties of
            # classes.

            try:
                if func_name in frame.f_globals:
                    if frame.f_globals[func_name].func_code is co:
                        return callable_name(frame.f_globals[func_name])

            except Exception:
                pass

            for name, obj in six.iteritems(frame.f_globals):
                try:
                    if obj.__dict__[func_name].func_code is co:
                        return callable_name(obj.__dict__[func_name])

                except Exception:
                    pass
Пример #6
0
        def _callable_name():
            # This is pretty ugly and inefficient, but a stack
            # frame doesn't provide any information about the
            # original callable object. We thus need to try and
            # deduce what it is by searching through the stack
            # frame globals. This will still not work in many
            # cases, including lambdas, generator expressions,
            # and decoratored attributes such as properties of
            # classes.

            try:
                if func_name in frame.f_globals:
                    if frame.f_globals[func_name].func_code is co:
                        return callable_name(frame.f_globals[func_name])

            except Exception:
                pass

            for name, obj in six.iteritems(frame.f_globals):
                try:
                    if obj.__dict__[func_name].func_code is co:
                        return callable_name(obj.__dict__[func_name])

                except Exception:
                    pass
Пример #7
0
    def merge_metric_stats(self, snapshot, rollback=False):

        """Merges metric data from a snapshot. This is used when merging
        data from a single transaction into main stats engine. It would
        also be done if the sending of the metric data from the harvest
        failed and wanted to keep accumulating it for subsequent
        harvest.

        """

        if not self.__settings:
            return

        if rollback:
            _logger.debug('Performing rollback of metric data into '
                    'subsequent harvest period.')

        # Merge back data into any new data which has been
        # accumulated.

        for key, other in six.iteritems(snapshot.__stats_table):
            stats = self.__stats_table.get(key)
            if not stats:
                self.__stats_table[key] = copy.copy(other)
            else:
                stats.merge_stats(other)
    def metrics(self):
        """Returns an iterator over the set of value metrics. The items
        returned are a tuple consisting of the metric name and accumulated
        stats for the metric.

        """

        return six.iteritems(self.__stats_table)
Пример #9
0
    def profile_data(self):

        # Generic profiling sessions have to wait for completion before
        # reporting data.

        if self.state == SessionState.RUNNING:
            return None

        # We prune the number of nodes sent if we are over the specified
        # limit. This is just to avoid having the response be too large
        # and get rejected by the data collector.

        settings = global_settings()
        self._prune_call_trees(settings.agent_limits.thread_profiler_nodes)

        flat_tree = {}
        thread_count = 0

        for category, bucket in six.iteritems(self.call_buckets):

            # Only flatten buckets that have data in them. No need to send
            # empty buckets.

            if bucket:
                flat_tree[category] = [x.flatten() for x in bucket.values()]
                thread_count += len(bucket)

        # Construct the actual final data for sending. The actual call
        # data is turned into JSON, compressed and then base64 encoded at
        # this point to cut its size.

        if settings.debug.log_thread_profile_payload:
            _logger.debug('Encoding thread profile data where '
                          'payload=%r.', flat_tree)

        json_call_tree = json_encode(flat_tree)

        level = settings.agent_limits.data_compression_level
        level = level or zlib.Z_DEFAULT_COMPRESSION

        encoded_tree = base64.standard_b64encode(
            zlib.compress(six.b(json_call_tree), level))

        if six.PY3:
            encoded_tree = encoded_tree.decode('Latin-1')

        profile = [[
            self.profile_id, self.start_time_s * 1000,
            (self.actual_stop_time_s or time.time()) * 1000, self.sample_count,
            encoded_tree, thread_count, 0, None
        ]]

        # Reset the data structures to default.

        self.reset_profile_data()
        return profile
Пример #10
0
def patch_loguru_logger(logger):
    if hasattr(logger, "_core"):
        if not hasattr(logger._core, "_nr_instrumented"):
            logger.add(_nr_log_forwarder, format="{message}")
            logger._core._nr_instrumented = True
    elif not hasattr(logger, "_nr_instrumented"):
        for _, handler in six.iteritems(logger._handlers):
            if handler._writer is _nr_log_forwarder:
                logger._nr_instrumented = True
                return

        logger.add(_nr_log_forwarder, format="{message}")
        logger._nr_instrumented = True
Пример #11
0
    def _check_log_attributes(expected, captured, mismatches):
        for key, value in six.iteritems(expected):
            if hasattr(captured, key):
                captured_value = getattr(captured, key, None)
            elif key in captured.attributes:
                captured_value = captured.attributes[key]
            else:
                mismatches.append("key: %s, value:<%s><%s>" %
                                  (key, value, getattr(captured, key, None)))
                return False

            if value is not None:
                if value != captured_value:
                    mismatches.append("key: %s, value:<%s><%s>" %
                                      (key, value, captured_value))
                    return False

        return True
Пример #12
0
def create_session(license_key, app_name, linked_applications, environment,
                   settings):

    _global_settings = global_settings()

    if _global_settings.developer_mode:
        session = DeveloperModeSession.create_session(license_key, app_name,
                                                      linked_applications,
                                                      environment, settings)
    else:
        session = ApplicationSession.create_session(license_key, app_name,
                                                    linked_applications,
                                                    environment, settings)

    # When session creation is unsucessful None is returned. We need to catch
    # that and return None. Session creation can fail if data-collector is down
    # or if the configuration is wrong, such as having the capture_params true
    # in high security mode.

    if session is None:
        return None

    # We now need to send up the final merged configuration using the
    # agent_settings() method. We must make sure we pass the
    # configuration through global_settings_dump() to strip/mask any
    # sensitive settings. We also convert values which are strings or
    # numerics to strings before sending to avoid problems with UI
    # interpreting the values strangely if sent as native types.

    application_settings = global_settings_dump(session.configuration)

    for key, value in list(six.iteritems(application_settings)):
        if not isinstance(key, six.string_types):
            del application_settings[key]

        if (not isinstance(value, six.string_types)
                and not isinstance(value, float)
                and not isinstance(value, six.integer_types)):
            application_settings[key] = repr(value)

    session.agent_settings(application_settings)

    return session
Пример #13
0
def create_session(license_key, app_name, linked_applications,
        environment, settings):

    _global_settings = global_settings()

    if _global_settings.developer_mode:
        session = DeveloperModeSession.create_session(license_key, app_name,
                linked_applications, environment, settings)
    else:
        session = ApplicationSession.create_session(license_key, app_name,
                linked_applications, environment, settings)

    # When session creation is unsucessful None is returned. We need to catch
    # that and return None. Session creation can fail if data-collector is down
    # or if the configuration is wrong, such as having the capture_params true
    # in high security mode.

    if session is None:
        return None

    # We now need to send up the final merged configuration using the
    # agent_settings() method. We must make sure we pass the
    # configuration through global_settings_dump() to strip/mask any
    # sensitive settings. We also convert values which are strings or
    # numerics to strings before sending to avoid problems with UI
    # interpreting the values strangely if sent as native types.

    application_settings = global_settings_dump(session.configuration)

    for key, value in list(six.iteritems(application_settings)):
        if not isinstance(key, six.string_types):
            del application_settings[key]

        if (not isinstance(value, six.string_types) and
                not isinstance(value, float) and
                not isinstance(value, six.integer_types)):
            application_settings[key] = repr(value)

    session.agent_settings(application_settings)

    return session
    def profile_data(self):

        # Generic profiling sessions have to wait for completion before
        # reporting data.
        #
        # Xray profile session can send partial profile data on every harvest.

        if ((self.profiler_type == SessionType.GENERIC) and
                (self.state == SessionState.RUNNING)):
            return None

        # We prune the number of nodes sent if we are over the specified
        # limit. This is just to avoid having the response be too large
        # and get rejected by the data collector.

        settings = global_settings()
        self._prune_call_trees(settings.agent_limits.thread_profiler_nodes)

        flat_tree = {}
        thread_count = 0

        for category, bucket in six.iteritems(self.call_buckets):

            # Only flatten buckets that have data in them. No need to send
            # empty buckets.

            if bucket:
                flat_tree[category] = [x.flatten() for x in bucket.values()]
                thread_count += len(bucket)

        # If no profile data was captured for an x-ray session return None
        # instead of sending an encoded empty data-structure. For a generic
        # profiler continue to send an empty tree. This can happen on a system
        # that uses green threads (coroutines), so sending an empty tree marks
        # the end of a profile session. If we don't send anything then the UI
        # timesout after a very long time (~15mins) which is frustrating for
        # the customer.

        if (thread_count == 0) and (self.profiler_type == SessionType.XRAY):
            return None

        # Construct the actual final data for sending. The actual call
        # data is turned into JSON, compessed and then base64 encoded at
        # this point to cut its size.

        if settings.debug.log_thread_profile_payload:
            _logger.debug('Encoding thread profile data where '
                    'payload=%r.', flat_tree)

        json_call_tree = json_encode(flat_tree)

        level = settings.agent_limits.data_compression_level
        level = level or zlib.Z_DEFAULT_COMPRESSION

        encoded_tree = base64.standard_b64encode(
                zlib.compress(six.b(json_call_tree), level))

        if six.PY3:
            encoded_tree = encoded_tree.decode('Latin-1')

        profile = [[self.profile_id, self.start_time_s * 1000,
            (self.actual_stop_time_s or time.time()) * 1000, self.sample_count,
            encoded_tree, thread_count, 0, self.xray_id]]

        # Reset the datastructures to default. For xray profile sessions we
        # report the partial call tree at every harvest cycle. It is required
        # to reset the datastructures to avoid aggregating the call trees
        # across harvest cycles.

        self.reset_profile_data()
        return profile
Пример #15
0
def create_session(license_key, app_name, linked_applications,
        environment, settings):

    _global_settings = global_settings()

    if _global_settings.developer_mode:
        session = DeveloperModeSession.create_session(license_key, app_name,
                linked_applications, environment, settings)
    else:
        session = ApplicationSession.create_session(license_key, app_name,
                linked_applications, environment, settings)

    # When session creation is unsucessful None is returned. We need to catch
    # that and return None. Session creation can fail if data-collector is down
    # or if the configuration is wrong, such as having the capture_params true
    # in high security mode.

    if session is None:
        return None

    # We now need to send up the final merged configuration using the
    # agent_settings() method. We must make sure we pass the
    # configuration through global_settings_dump() to strip/mask any
    # sensitive settings. We also convert values which are strings or
    # numerics to strings before sending to avoid problems with UI
    # interpreting the values strangely if sent as native types.

    application_settings = global_settings_dump(session.configuration)

    for key, value in list(six.iteritems(application_settings)):
        if not isinstance(key, six.string_types):
            del application_settings[key]

        if (not isinstance(value, six.string_types) and
                not isinstance(value, float) and
                not isinstance(value, six.integer_types)):
            application_settings[key] = repr(value)

    try:
        session.agent_settings(application_settings)

    except NetworkInterfaceException:
        # The reason for errors of this type have already been logged.
        # No matter what the error we just pass back None. The upper
        # layer will deal with not being successful.

        _logger.warning('Agent registration failed due to error in '
                'uploading agent settings. Registration should retry '
                'automatically.')

        pass

    except Exception:
        # Any other errors are going to be unexpected and likely will
        # indicate an issue with the implementation of the agent.

        _logger.exception('Unexpected exception when attempting to '
                'update agent settings with the data collector. Please '
                'report this problem to New Relic support for further '
                'investigation.')

        _logger.warning('Agent registration failed due to error in '
                'uploading agent settings. Registration should retry '
                'automatically.')

        pass

    else:
        return session
Пример #16
0
    def profile_data(self):

        # Generic profiling sessions have to wait for completion before
        # reporting data.
        #
        # Xray profile session can send partial profile data on every harvest.

        if ((self.profiler_type == SessionType.GENERIC) and
                (self.state == SessionState.RUNNING)):
            return None

        # We prune the number of nodes sent if we are over the specified
        # limit. This is just to avoid having the response be too large
        # and get rejected by the data collector.

        settings = global_settings()
        self._prune_call_trees(settings.agent_limits.thread_profiler_nodes)

        flat_tree = {}
        thread_count = 0

        for category, bucket in six.iteritems(self.call_buckets):

            # Only flatten buckets that have data in them. No need to send
            # empty buckets.

            if bucket:
                flat_tree[category] = [x.flatten() for x in bucket.values()]
                thread_count += len(bucket)

        # If no profile data was captured return None instead of sending an
        # encoded empty data-structure

        if thread_count == 0:
            return None

        # Construct the actual final data for sending. The actual call
        # data is turned into JSON, compessed and then base64 encoded at
        # this point to cut its size.

        _logger.debug('Returning partial thread profiling data '
                'for %d transactions with name %r and xray ID of '
                '%r over a period of %.2f seconds and %d samples.',
                self.transaction_count, self.key_txn, self.xray_id,
                time.time()-self.start_time_s, self.sample_count)

        if settings.debug.log_thread_profile_payload:
            _logger.debug('Encoding thread profile data where '
                    'payload=%r.', flat_tree)

        json_call_tree = simplejson.dumps(flat_tree, ensure_ascii=True,
                encoding='Latin-1', namedtuple_as_object=False)
        encoded_tree = base64.standard_b64encode(
                zlib.compress(six.b(json_call_tree)))

        profile = [[self.profile_id, self.start_time_s*1000,
            (self.actual_stop_time_s or time.time()) * 1000, self.sample_count,
            encoded_tree, thread_count, 0, self.xray_id]]

        # Reset the datastructures to default. For xray profile sessions we
        # report the partial call tree at every harvest cycle. It is required
        # to reset the datastructures to avoid aggregating the call trees
        # across harvest cycles.

        self.reset_profile_data()
        return profile
    def profile_data(self):

        # Generic profiling sessions have to wait for completion before
        # reporting data.
        #
        # Xray profile session can send partial profile data on every harvest.

        if ((self.profiler_type == SessionType.GENERIC)
                and (self.state == SessionState.RUNNING)):
            return None

        # We prune the number of nodes sent if we are over the specified
        # limit. This is just to avoid having the response be too large
        # and get rejected by the data collector.

        settings = global_settings()
        self._prune_call_trees(settings.agent_limits.thread_profiler_nodes)

        flat_tree = {}
        thread_count = 0

        for category, bucket in six.iteritems(self.call_buckets):

            # Only flatten buckets that have data in them. No need to send
            # empty buckets.

            if bucket:
                flat_tree[category] = [x.flatten() for x in bucket.values()]
                thread_count += len(bucket)

        # If no profile data was captured return None instead of sending an
        # encoded empty data-structure

        if thread_count == 0:
            return None

        # Construct the actual final data for sending. The actual call
        # data is turned into JSON, compessed and then base64 encoded at
        # this point to cut its size.

        _logger.debug(
            'Returning partial thread profiling data '
            'for %d transactions with name %r and xray ID of '
            '%r over a period of %.2f seconds and %d samples.',
            self.transaction_count, self.key_txn, self.xray_id,
            time.time() - self.start_time_s, self.sample_count)

        if settings.debug.log_thread_profile_payload:
            _logger.debug('Encoding thread profile data where '
                          'payload=%r.', flat_tree)

        json_call_tree = simplejson.dumps(flat_tree,
                                          ensure_ascii=True,
                                          encoding='Latin-1',
                                          namedtuple_as_object=False)
        encoded_tree = base64.standard_b64encode(
            zlib.compress(six.b(json_call_tree)))

        profile = [[
            self.profile_id, self.start_time_s * 1000,
            (self.actual_stop_time_s or time.time()) * 1000, self.sample_count,
            encoded_tree, thread_count, 0, self.xray_id
        ]]

        # Reset the datastructures to default. For xray profile sessions we
        # report the partial call tree at every harvest cycle. It is required
        # to reset the datastructures to avoid aggregating the call trees
        # across harvest cycles.

        self.reset_profile_data()
        return profile
    def profile_data(self):

        # Generic profiling sessions have to wait for completion before
        # reporting data.
        #
        # X-ray profile session can send partial profile data on every harvest.

        if ((self.profiler_type == SessionType.GENERIC)
                and (self.state == SessionState.RUNNING)):
            return None

        # We prune the number of nodes sent if we are over the specified
        # limit. This is just to avoid having the response be too large
        # and get rejected by the data collector.

        settings = global_settings()
        self._prune_call_trees(settings.agent_limits.thread_profiler_nodes)

        flat_tree = {}
        thread_count = 0

        for category, bucket in six.iteritems(self.call_buckets):

            # Only flatten buckets that have data in them. No need to send
            # empty buckets.

            if bucket:
                flat_tree[category] = [x.flatten() for x in bucket.values()]
                thread_count += len(bucket)

        # If no profile data was captured for an x-ray session return None
        # instead of sending an encoded empty data-structure. For a generic
        # profiler continue to send an empty tree. This can happen on a system
        # that uses green threads (coroutines), so sending an empty tree marks
        # the end of a profile session. If we don't send anything then the UI
        # times out after a very long time (~15mins) which is frustrating for
        # the customer.

        if (thread_count == 0) and (self.profiler_type == SessionType.XRAY):
            return None

        # Construct the actual final data for sending. The actual call
        # data is turned into JSON, compressed and then base64 encoded at
        # this point to cut its size.

        if settings.debug.log_thread_profile_payload:
            _logger.debug('Encoding thread profile data where '
                          'payload=%r.', flat_tree)

        json_call_tree = json_encode(flat_tree)

        level = settings.agent_limits.data_compression_level
        level = level or zlib.Z_DEFAULT_COMPRESSION

        encoded_tree = base64.standard_b64encode(
            zlib.compress(six.b(json_call_tree), level))

        if six.PY3:
            encoded_tree = encoded_tree.decode('Latin-1')

        profile = [[
            self.profile_id, self.start_time_s * 1000,
            (self.actual_stop_time_s or time.time()) * 1000, self.sample_count,
            encoded_tree, thread_count, 0, self.xray_id
        ]]

        # Reset the data structures to default. For x-ray profile sessions we
        # report the partial call tree at every harvest cycle. It is required
        # to reset the data structures to avoid aggregating the call trees
        # across harvest cycles.

        self.reset_profile_data()
        return profile
def environment_settings():
    """Returns an array of arrays of environment settings

    """

    env = []

    # Agent information.

    env.append(('Agent Version', '.'.join(map(str, newrelic.version_info))))

    if 'NEW_RELIC_ADMIN_COMMAND' in os.environ:
        env.append(('Admin Command', os.environ['NEW_RELIC_ADMIN_COMMAND']))
        del os.environ['NEW_RELIC_ADMIN_COMMAND']

    # System information.

    env.append(('Arch', platform.machine()))
    env.append(('OS', platform.system()))
    env.append(('OS version', platform.release()))
    env.append(('CPU Count', cpu_count()))
    env.append(('System Memory', memory_total()))

    # Python information.

    env.append(('Python Program Name', sys.argv[0]))

    env.append(('Python Executable', sys.executable))

    env.append(('Python Home', os.environ.get('PYTHONHOME', '')))
    env.append(('Python Path', os.environ.get('PYTHONPATH', '')))

    env.append(('Python Prefix', sys.prefix))
    env.append(('Python Exec Prefix', sys.exec_prefix))

    env.append(('Python Version', sys.version))
    env.append(('Python Platform', sys.platform))

    env.append(('Python Max Unicode', sys.maxunicode))

    # Extensions information.

    extensions = []

    if 'newrelic.core._thread_utilization' in sys.modules:
        extensions.append('newrelic.core._thread_utilization')

    if 'newrelic.packages.simplejson._speedups' in sys.modules:
        extensions.append('newrelic.packages.simplejson._speedups')

    env.append(('Compiled Extensions', ', '.join(extensions)))

    # Dispatcher information.

    dispatcher = []

    if not dispatcher and 'mod_wsgi' in sys.modules:
        mod_wsgi = sys.modules['mod_wsgi']
        if hasattr(mod_wsgi, 'process_group'):
            if mod_wsgi.process_group == '':
                dispatcher.append(('Dispatcher', 'Apache/mod_wsgi (embedded)'))
            else:
                dispatcher.append(('Dispatcher', 'Apache/mod_wsgi (daemon)'))
        else:
            dispatcher.append(('Dispatcher', 'Apache/mod_wsgi'))
        if hasattr(mod_wsgi, 'version'):
            dispatcher.append(('Dispatcher Version', str(mod_wsgi.version)))

    if not dispatcher and 'uwsgi' in sys.modules:
        dispatcher.append(('Dispatcher', 'uWSGI'))
        uwsgi = sys.modules['uwsgi']
        if hasattr(uwsgi, 'version'):
            dispatcher.append(('Dispatcher Version', uwsgi.version))

    if not dispatcher and 'flup.server.fcgi' in sys.modules:
        dispatcher.append(('Dispatcher', 'flup/fastcgi (threaded)'))

    if not dispatcher and 'flup.server.fcgi_fork' in sys.modules:
        dispatcher.append(('Dispatcher', 'flup/fastcgi (prefork)'))

    if not dispatcher and 'flup.server.scgi' in sys.modules:
        dispatcher.append(('Dispatcher', 'flup/scgi (threaded)'))

    if not dispatcher and 'flup.server.scgi_fork' in sys.modules:
        dispatcher.append(('Dispatcher', 'flup/scgi (prefork)'))

    if not dispatcher and 'flup.server.ajp' in sys.modules:
        dispatcher.append(('Dispatcher', 'flup/ajp (threaded)'))

    if not dispatcher and 'flup.server.ajp_fork' in sys.modules:
        dispatcher.append(('Dispatcher', 'flup/ajp (forking)'))

    if not dispatcher and 'flup.server.cgi' in sys.modules:
        dispatcher.append(('Dispatcher', 'flup/cgi'))

    if not dispatcher and 'tornado' in sys.modules:
        dispatcher.append(('Dispatcher', 'tornado'))
        tornado = sys.modules['tornado']
        if hasattr(tornado, 'version_info'):
            dispatcher.append(
                ('Dispatcher Version', str(tornado.version_info)))

    if not dispatcher and 'gunicorn' in sys.modules:
        if 'gunicorn.workers.ggevent' in sys.modules:
            dispatcher.append(('Dispatcher', 'gunicorn (gevent)'))
        elif 'gunicorn.workers.geventlet' in sys.modules:
            dispatcher.append(('Dispatcher', 'gunicorn (eventlet)'))
        else:
            dispatcher.append(('Dispatcher', 'gunicorn'))
        gunicorn = sys.modules['gunicorn']
        if hasattr(gunicorn, '__version__'):
            dispatcher.append(('Dispatcher Version', gunicorn.__version__))

    env.extend(dispatcher)

    # Module information.

    plugins = []

    for name, module in list(six.iteritems(sys.modules)):
        if name.startswith('newrelic.hooks.'):
            plugins.append(name)

        elif name.find('.') == -1 and hasattr(module, '__file__'):
            try:
                if 'pkg_resources' in sys.modules:
                    version = pkg_resources.get_distribution(name).version
                    if version:
                        name = '%s (%s)' % (name, version)
            except Exception:
                pass
            plugins.append(name)

    env.append(('Plugin List', plugins))

    return env
Пример #20
0
def create_session(license_key, app_name, linked_applications,
        environment, settings):

    _global_settings = global_settings()

    if _global_settings.developer_mode:
        session = DeveloperModeSession.create_session(license_key, app_name,
                linked_applications, environment, settings)
    else:
        session = ApplicationSession.create_session(license_key, app_name,
                linked_applications, environment, settings)

    # When session creation is unsuccessful None is returned. We need to catch
    # that and return None. Session creation can fail if data-collector is down
    # or if the configuration is wrong, such as having the capture_params true
    # in high security mode.

    if session is None:
        return None

    # We now need to send up the final merged configuration using the
    # agent_settings() method. We must make sure we pass the
    # configuration through global_settings_dump() to strip/mask any
    # sensitive settings. We also convert values which are strings or
    # numerics to strings before sending to avoid problems with UI
    # interpreting the values strangely if sent as native types.

    application_settings = global_settings_dump(session.configuration)

    for key, value in list(six.iteritems(application_settings)):
        if not isinstance(key, six.string_types):
            del application_settings[key]

        if (not isinstance(value, six.string_types) and
                not isinstance(value, float) and
                not isinstance(value, six.integer_types)):
            application_settings[key] = repr(value)

    try:
        session.agent_settings(application_settings)

    except NetworkInterfaceException:
        # The reason for errors of this type have already been logged.
        # No matter what the error we just pass back None. The upper
        # layer will deal with not being successful.

        _logger.warning('Agent registration failed due to error in '
                'uploading agent settings. Registration should retry '
                'automatically.')

        pass

    except Exception:
        # Any other errors are going to be unexpected and likely will
        # indicate an issue with the implementation of the agent.

        _logger.exception('Unexpected exception when attempting to '
                'update agent settings with the data collector. Please '
                'report this problem to New Relic support for further '
                'investigation.')

        _logger.warning('Agent registration failed due to error in '
                'uploading agent settings. Registration should retry '
                'automatically.')

        pass

    else:
        return session
    def merge_other_stats(self, snapshot, merge_traces=True,
            merge_errors=True, merge_sql=True):

        """Merges non metric data from a snapshot. This would only be
        used when merging data from a single transaction into main
        stats engine. It is assumed the snapshot has newer data and
        that any existing data takes precedence where what should be
        collected is not otherwised based on time.

        """

        if not self.__settings:
            return

        settings = self.__settings

        # Append snapshot error details at end to maintain time
        # based order and then trim at maximum to be kept.

        if merge_errors:
            maximum = settings.agent_limits.errors_per_harvest
            self.__transaction_errors.extend(snapshot.__transaction_errors)
            self.__transaction_errors = self.__transaction_errors[:maximum]

        # Add sql traces to the set of existing entries. If over
        # the limit of how many to collect, only merge in if already
        # seen the specific SQL.

        if merge_sql:
            maximum = settings.agent_limits.slow_sql_data
            for key, other in six.iteritems(snapshot.__sql_stats_table):
                stats = self.__sql_stats_table.get(key)
                if not stats:
                    if len(self.__sql_stats_table) < maximum:
                        self.__sql_stats_table[key] = copy.copy(other)
                else:
                    stats.merge_stats(other)

        # Restore original slow transaction if slower than any newer slow
        # transaction. Also append any saved transactions corresponding to
        # browser and xray traces, trimming them at the maximum to be kept.

        if merge_traces:

            # Limit number of browser traces to the limit (10)
            # FIXME - snapshot.__browser_transactions has only one element. So
            # we can use the following code:
            #
            # maximum = settings.agent_limits.browser_transactions
            # if len(self.__browser_transactions) < maximum:
            #     self.__browser_transactions.extend(
            #                               snapshot.__browser_transactions)

            maximum = settings.agent_limits.browser_transactions
            self.__browser_transactions.extend(snapshot.__browser_transactions)
            self.__browser_transactions = self.__browser_transactions[:maximum]

            # Limit number of xray traces to the limit (10)
            # Spill over traces after the limit should have no x-ray ids. This
            # qualifies the trace to be considered for slow transaction.

            maximum = settings.agent_limits.xray_transactions
            self.__xray_transactions.extend(snapshot.__xray_transactions)
            for txn in self.__xray_transactions[maximum:]:
                txn.xray_id = None
            self.__xray_transactions = self.__xray_transactions[:maximum]

            transaction = snapshot.__slow_transaction

            # If the transaction has an xray_id then it does not qualify to
            # be considered for slow transaction.  This is because in the Core
            # app, there is logic to NOT show TTs with xray ids in the
            # WebTransactions tab. If a TT has xray_id it is only shown under
            # the xray page.

            xray_id = getattr(transaction, 'xray_id', None)
            if transaction and xray_id is None:
                name = transaction.path
                duration = transaction.duration

                slowest = 0
                if self.__slow_transaction:
                    slowest = self.__slow_transaction.duration
                if name in self.__slow_transaction_map:
                    slowest = max(self.__slow_transaction_map[name], slowest)

                if duration > slowest:
                    # We are going to replace the prior slow
                    # transaction. We need to be a bit tricky here. If
                    # we are overriding an existing slow transaction for
                    # a different name, then we need to restore in the
                    # transaction map what the previous slowest duration
                    # was for that, or remove it if there wasn't one.
                    # This is so we do not incorrectly suppress it given
                    # that it was never actually reported as the slowest
                    # transaction.

                    if self.__slow_transaction:
                        if self.__slow_transaction.path != name:
                            if self.__slow_transaction_old_duration:
                                self.__slow_transaction_map[
                                        self.__slow_transaction.path] = (
                                        self.__slow_transaction_old_duration)
                            else:
                                del self.__slow_transaction_map[
                                        self.__slow_transaction.path]

                    if name in self.__slow_transaction_map:
                        self.__slow_transaction_old_duration = (
                                self.__slow_transaction_map[name])
                    else:
                        self.__slow_transaction_old_duration = None

                    self.__slow_transaction = transaction
                    self.__slow_transaction_map[name] = duration
Пример #22
0
def global_settings_dump(settings_object=None, serializable=False):
    """This returns dictionary of global settings flattened into a single
    key namespace rather than nested hierarchy. This is used to send the
    global settings configuration back to core application.

    """

    if settings_object is None:
        settings_object = _settings

    settings = flatten_settings(settings_object)

    # Strip out any sensitive settings.
    # The license key is being sent already, but no point sending
    # it again.

    del settings['license_key']
    del settings['api_key']
    del settings['encoding_key']
    del settings['js_agent_loader']
    del settings['js_agent_file']

    # If proxy credentials are included in the settings, we obfuscate
    # them before sending, rather than deleting.

    obfuscated = '****'

    if settings['proxy_user'] is not None:
        settings['proxy_user'] = obfuscated

    if settings['proxy_pass'] is not None:
        settings['proxy_pass'] = obfuscated

    # For the case of proxy_host we have to do a bit more work as it
    # could be a URI which includes the username and password within
    # it. What we do here is parse the value and if identified as a
    # URI, we recompose it with the obfuscated username and password.

    proxy_host = settings['proxy_host']

    if proxy_host:
        components = urlparse.urlparse(proxy_host)

        if components.scheme:

            netloc = create_obfuscated_netloc(components.username,
                                              components.password,
                                              components.hostname, obfuscated)

            if components.port:
                uri = '%s://%s:%s%s' % (components.scheme, netloc,
                                        components.port, components.path)
            else:
                uri = '%s://%s%s' % (components.scheme, netloc,
                                     components.path)

            settings['proxy_host'] = uri

    if serializable:
        for key, value in list(six.iteritems(settings)):
            if not isinstance(key, six.string_types):
                del settings[key]

            if (not isinstance(value, six.string_types)
                    and not isinstance(value, float)
                    and not isinstance(value, six.integer_types)):
                settings[key] = repr(value)

    return settings
Пример #23
0
def environment_settings():
    """Returns an array of arrays of environment settings

    """

    env = []

    # Agent information.

    env.append(('Agent Version', '.'.join(map(str, newrelic.version_info))))

    if 'NEW_RELIC_ADMIN_COMMAND' in os.environ:
        env.append(('Admin Command', os.environ['NEW_RELIC_ADMIN_COMMAND']))
        del os.environ['NEW_RELIC_ADMIN_COMMAND']

    # System information.

    env.append(('Arch', platform.machine()))
    env.append(('OS', platform.system()))
    env.append(('OS version', platform.release()))
    env.append(('CPU Count', cpu_count()))
    env.append(('System Memory', memory_total()))

    # Python information.

    env.append(('Python Program Name', sys.argv[0]))

    env.append(('Python Executable', sys.executable))

    env.append(('Python Home', os.environ.get('PYTHONHOME', '')))
    env.append(('Python Path', os.environ.get('PYTHONPATH', '')))

    env.append(('Python Prefix', sys.prefix))
    env.append(('Python Exec Prefix', sys.exec_prefix))

    env.append(('Python Version', sys.version))
    env.append(('Python Platform', sys.platform))

    env.append(('Python Max Unicode', sys.maxunicode))

    # Extensions information.

    extensions = []

    if 'newrelic.core._thread_utilization' in sys.modules:
        extensions.append('newrelic.core._thread_utilization')

    if 'newrelic.packages.simplejson._speedups' in sys.modules:
        extensions.append('newrelic.packages.simplejson._speedups')

    env.append(('Compiled Extensions', ', '.join(extensions)))

    # Dispatcher information.

    dispatcher = []

    if not dispatcher and 'mod_wsgi' in sys.modules:
        mod_wsgi = sys.modules['mod_wsgi']
        if hasattr(mod_wsgi, 'process_group'):
            if mod_wsgi.process_group == '':
                dispatcher.append(('Dispatcher', 'Apache/mod_wsgi (embedded)'))
            else:
                dispatcher.append(('Dispatcher', 'Apache/mod_wsgi (daemon)'))
        else:
            dispatcher.append(('Dispatcher', 'Apache/mod_wsgi'))
        if hasattr(mod_wsgi, 'version'):
            dispatcher.append(('Dispatcher Version', str(mod_wsgi.version)))

    if not dispatcher and 'uwsgi' in sys.modules:
        dispatcher.append(('Dispatcher', 'uWSGI'))
        uwsgi = sys.modules['uwsgi']
        if hasattr(uwsgi, 'version'):
            dispatcher.append(('Dispatcher Version', uwsgi.version))

    if not dispatcher and 'flup.server.fcgi' in sys.modules:
        dispatcher.append(('Dispatcher', 'flup/fastcgi (threaded)'))

    if not dispatcher and 'flup.server.fcgi_fork' in sys.modules:
        dispatcher.append(('Dispatcher', 'flup/fastcgi (prefork)'))

    if not dispatcher and 'flup.server.scgi' in sys.modules:
        dispatcher.append(('Dispatcher', 'flup/scgi (threaded)'))

    if not dispatcher and 'flup.server.scgi_fork' in sys.modules:
        dispatcher.append(('Dispatcher', 'flup/scgi (prefork)'))

    if not dispatcher and 'flup.server.ajp' in sys.modules:
        dispatcher.append(('Dispatcher', 'flup/ajp (threaded)'))

    if not dispatcher and 'flup.server.ajp_fork' in sys.modules:
        dispatcher.append(('Dispatcher', 'flup/ajp (forking)'))

    if not dispatcher and 'flup.server.cgi' in sys.modules:
        dispatcher.append(('Dispatcher', 'flup/cgi'))

    if not dispatcher and 'tornado' in sys.modules:
        dispatcher.append(('Dispatcher', 'tornado'))
        tornado = sys.modules['tornado']
        if hasattr(tornado, 'version_info'):
            dispatcher.append(('Dispatcher Version',
                               str(tornado.version_info)))

    if not dispatcher and 'gunicorn' in sys.modules:
        if 'gunicorn.workers.ggevent' in sys.modules:
            dispatcher.append(('Dispatcher', 'gunicorn (gevent)'))
        elif 'gunicorn.workers.geventlet' in sys.modules:
            dispatcher.append(('Dispatcher', 'gunicorn (eventlet)'))
        else:
            dispatcher.append(('Dispatcher', 'gunicorn'))
        gunicorn = sys.modules['gunicorn']
        if hasattr(gunicorn, '__version__'):
            dispatcher.append(('Dispatcher Version', gunicorn.__version__))

    env.extend(dispatcher)

    # Module information.

    plugins = []

    for name, module in list(six.iteritems(sys.modules)):
        if name.startswith('newrelic.hooks.'):
            plugins.append(name)

        elif name.find('.') == -1 and hasattr(module, '__file__'):
            try:
                if 'pkg_resources' in sys.modules:
                    version = pkg_resources.get_distribution(name).version
                    if version:
                        name = '%s (%s)' % (name, version)
            except Exception:
                pass
            plugins.append(name)

    env.append(('Plugin List', plugins))

    return env
Пример #24
0
    def __exit__(self, exc, value, tb):

        # Bail out if the transaction is not enabled.

        if not self.enabled:
            return

        # Ensure that we are actually back at the top of
        # transaction call stack. Assume that it is an
        # instrumentation error and return with hope that
        # will recover later.

        if not isinstance(self.current_node, Sentinel):
            _logger.error('Transaction ended but current_node is not Sentinel.'
                    ' Current node is %r. Report this issue to New Relic '
                    'support.\n%s', self.current_node, ''.join(
                    traceback.format_stack()[:-1]))

            return

        # Mark as stopped and drop the transaction from
        # thread/coroutine local storage.
        #
        # Note that we validate the saved transaction ID
        # against that for the current transaction object
        # to protect against situations where a copy was
        # made of the transaction object for some reason.
        # Such a copy when garbage collected could trigger
        # this function and cause a deadlock if it occurs
        # while original transaction was being recorded.

        self._state = self.STATE_STOPPED

        if self._transaction_id != id(self):
            return

        if not self._settings:
            return

        if not self._dead:
            try:
                self.drop_transaction()
            except:  # Catch all
                _logger.exception('Unable to drop transaction.')
                raise

        # Record error if one was registered.

        if exc is not None and value is not None and tb is not None:
            self.record_exception(exc, value, tb)

        # Record the end time for transaction and then
        # calculate the duration.

        if not self.stopped:
            self.end_time = time.time()

        # Calculate transaction duration

        duration = self.end_time - self.start_time

        # Calculate response time. Calculation depends on whether
        # a web response was sent back.

        if self.last_byte_time == 0.0:
            response_time = duration
        else:
            response_time = self.last_byte_time - self.start_time

        # Calculate overall user time.

        if not self._cpu_user_time_end:
            self._cpu_user_time_end = os.times()[0]

        if duration and self._cpu_user_time_end:
            self._cpu_user_time_value = (self._cpu_user_time_end -
                    self._cpu_user_time_start)

        # Calculate thread utilisation factor. Note that even if
        # we are tracking thread utilization we skip calculation
        # if duration is zero. Under normal circumstances this
        # should not occur but may if the system clock is wound
        # backwards and duration was squashed to zero due to the
        # request appearing to finish before it started. It may
        # also occur if true response time came in under the
        # resolution of the clock being used, but that is highly
        # unlikely as the overhead of the agent itself should
        # always ensure that that is hard to achieve.

        if self._utilization_tracker:
            self._utilization_tracker.exit_transaction()
            if self._thread_utilization_start is not None and duration > 0.0:
                if not self._thread_utilization_end:
                    self._thread_utilization_end = (
                            self._utilization_tracker.utilization_count())
                self._thread_utilization_value = (
                        self._thread_utilization_end -
                        self._thread_utilization_start) / duration

        # Derive generated values from the raw data. The
        # dummy root node has exclusive time of children
        # as negative number. Add our own duration to get
        # our own exclusive time.

        root = self.current_node
        children = root.children

        exclusive = duration + root.exclusive

        # Calculate total time.
        #
        # Because we do not track activity on threads, and we currently
        # don't allocate waiting time in the IOLoop to separate segments
        # (like External or Datastore), for right now, our total_time is
        # equal to the duration of the transaction.

        self.total_time = duration

        # Construct final root node of transaction trace.
        # Freeze path in case not already done. This will
        # construct out path.

        self._freeze_path()

        if self.background_task:
            transaction_type = 'OtherTransaction'
        else:
            transaction_type = 'WebTransaction'

        group = self._group

        if group is None:
            if self.background_task:
                group = 'Python'
            else:
                group = 'Uri'

        if self.response_code != 0:
            self._response_properties['STATUS'] = str(self.response_code)

        # _sent_end should already be set by this point, but in case it
        # isn't, set it now before we record the custom metrics.

        if self._sent_start:
            if not self._sent_end:
                self._sent_end = time.time()

        if not self.background_task:
            self.record_custom_metric('Python/WSGI/Input/Bytes',
                               self._bytes_read)
            self.record_custom_metric('Python/WSGI/Input/Time',
                               self.read_duration)
            self.record_custom_metric('Python/WSGI/Input/Calls/read',
                               self._calls_read)
            self.record_custom_metric('Python/WSGI/Input/Calls/readline',
                               self._calls_readline)
            self.record_custom_metric('Python/WSGI/Input/Calls/readlines',
                               self._calls_readlines)

            self.record_custom_metric('Python/WSGI/Output/Bytes',
                               self._bytes_sent)
            self.record_custom_metric('Python/WSGI/Output/Time',
                               self.sent_duration)
            self.record_custom_metric('Python/WSGI/Output/Calls/yield',
                               self._calls_yield)
            self.record_custom_metric('Python/WSGI/Output/Calls/write',
                               self._calls_write)

        # Record supportability metrics for api calls

        for key, value in six.iteritems(self._transaction_metrics):
            self.record_custom_metric(key, {'count': value})

        if self._frameworks:
            for framework, version in self._frameworks:
                self.record_custom_metric('Python/Framework/%s/%s' %
                    (framework, version), 1)

        node = newrelic.core.transaction_node.TransactionNode(
                settings=self._settings,
                path=self.path,
                type=transaction_type,
                group=group,
                base_name=self._name,
                name_for_metric=self.name_for_metric,
                port=self._port,
                request_uri=self._request_uri,
                response_code=self.response_code,
                queue_start=self.queue_start,
                start_time=self.start_time,
                end_time=self.end_time,
                last_byte_time=self.last_byte_time,
                total_time=self.total_time,
                response_time=response_time,
                duration=duration,
                exclusive=exclusive,
                children=tuple(children),
                errors=tuple(self._errors),
                slow_sql=tuple(self._slow_sql),
                custom_events=self._custom_events,
                apdex_t=self.apdex,
                suppress_apdex=self.suppress_apdex,
                custom_metrics=self._custom_metrics,
                guid=self.guid,
                cpu_time=self._cpu_user_time_value,
                suppress_transaction_trace=self.suppress_transaction_trace,
                client_cross_process_id=self.client_cross_process_id,
                referring_transaction_guid=self.referring_transaction_guid,
                record_tt=self.record_tt,
                synthetics_resource_id=self.synthetics_resource_id,
                synthetics_job_id=self.synthetics_job_id,
                synthetics_monitor_id=self.synthetics_monitor_id,
                synthetics_header=self.synthetics_header,
                is_part_of_cat=self.is_part_of_cat,
                trip_id=self.trip_id,
                path_hash=self.path_hash,
                referring_path_hash=self._referring_path_hash,
                alternate_path_hashes=self.alternate_path_hashes,
                trace_intrinsics=self.trace_intrinsics,
                agent_attributes=self.agent_attributes,
                user_attributes=self.user_attributes,
        )

        # Clear settings as we are all done and don't need it
        # anymore.

        self._settings = None
        self.enabled = False

        # Unless we are ignoring the transaction, record it. We
        # need to lock the profile samples and replace it with
        # an empty list just in case the thread profiler kicks
        # in just as we are trying to record the transaction.
        # If we don't, when processing the samples, addition of
        # new samples can cause an error.

        if not self.ignore_transaction:
            profile_samples = []

            if self._profile_samples:
                with self._transaction_lock:
                    profile_samples = self._profile_samples
                    self._profile_samples = deque()

            self._application.record_transaction(node,
                    (self.background_task, profile_samples))