def _profiler_loop(self):
        """Infinite loop that wakes up periodically to collect stack traces,
        merge it into call tree if necessary, finally update the state of all
        the active profile sessions.

        """

        settings = global_settings()

        overhead_threshold = settings.agent_limits.xray_profile_overhead

        while True:

            # If x-ray profilers are not suspended and at least one x-ray
            # session is active it'll cause collect_stack_traces() to add
            # the stack_traces to the txn obj.

            start = time.time()

            include_xrays = ((not self._xray_suspended)
                             and any(six.itervalues(self.application_xrays)))

            for category, stack in collect_stack_traces(
                    self.profile_agent_code, include_xrays):

                # Merge the stack_trace to the call tree only for
                # full_profile_session. X-ray profiles will be merged at
                # the time of exiting the transaction.

                if self.full_profile_session:
                    self.full_profile_session.update_call_tree(category, stack)

            self.update_profile_sessions()

            # Stop the profiler thread if there are no profile sessions.

            if ((self.full_profile_session is None)
                    and (not any(six.itervalues(self.application_xrays)))):
                self._profiler_thread_running = False
                return

            # Adjust sample period dynamically base on overheads of doing
            # thread profiling if is an X-Ray session.

            if not self._xray_suspended:
                overhead = time.time() - start

                with self._lock:
                    aggregation_time = self._aggregation_time
                    self._aggregation_time = 0.0

                overhead += aggregation_time

                delay = overhead / self.sample_period_s / overhead_threshold
                delay = min((max(1.0, delay) * self.sample_period_s), 5.0)

                self._profiler_shutdown.wait(delay)

            else:
                self._profiler_shutdown.wait(self.sample_period_s)
    def _profiler_loop(self):
        """Infinite loop that wakes up periodically to collect stack traces,
        merge it into call tree if necessaray, finally update the state of all
        the active profile sessions.

        """

        settings = global_settings()

        overhead_threshold = settings.agent_limits.xray_profile_overhead

        while True:

            # If x-ray profilers are not suspended and at least one x-ray
            # session is active it'll cause collect_stack_traces() to add
            # the stack_traces to the txn obj.

            start = time.time()

            include_xrays = (not self._xray_suspended) and any(six.itervalues(self.application_xrays))

            for category, stack in collect_stack_traces(self.profile_agent_code, include_xrays):

                # Merge the stack_trace to the call tree only for
                # full_profile_session. X-ray profiles will be merged at
                # the time of exiting the transaction.

                if self.full_profile_session:
                    self.full_profile_session.update_call_tree(category, stack)

            self.update_profile_sessions()

            # Stop the profiler thread if there are no profile sessions.

            if (self.full_profile_session is None) and (not any(six.itervalues(self.application_xrays))):
                self._profiler_thread_running = False
                return

            # Adjust sample period dynamically base on overheads of doing
            # thread profiling if is an X-Ray session.

            if not self._xray_suspended:
                overhead = time.time() - start

                with self._lock:
                    aggregation_time = self._aggregation_time
                    self._aggregation_time = 0.0

                overhead += aggregation_time

                delay = overhead / self.sample_period_s / overhead_threshold
                delay = min((max(1.0, delay) * self.sample_period_s), 5.0)

                self._profiler_shutdown.wait(delay)

            else:
                self._profiler_shutdown.wait(self.sample_period_s)
Example #3
0
    def _harvest_flexible(self, shutdown=False):
        if not self._harvest_shutdown_is_set():
            event_harvest_config = self.global_settings().event_harvest_config

            self._scheduler.enter(
                event_harvest_config.report_period_ms / 1000.0, 1,
                self._harvest_flexible, ())
            _logger.debug("Commencing harvest[flexible] of application data.")
        elif not shutdown:
            return
        else:
            _logger.debug(
                "Commencing final harvest[flexible] of application data.")

        self._flexible_harvest_count += 1
        self._last_flexible_harvest = time.time()

        for application in list(six.itervalues(self._applications)):
            try:
                application.harvest(shutdown=False, flexible=True)
            except Exception:
                _logger.exception("Failed to harvest data for %s." %
                                  application.name)

        self._flexible_harvest_duration = time.time(
        ) - self._last_flexible_harvest

        _logger.debug(
            "Completed harvest[flexible] of application data in %.2f seconds.",
            self._flexible_harvest_duration)
 def __call__(self, *args, **kwargs):
     self._nr_wrapped(*args, **kwargs)
     handler = self.__instance.handler
     for name in six.itervalues(self.__instance.callmap):
         if hasattr(handler, name):
             setattr(handler, name,
                     MethodWrapper(getattr(handler, name), priority=6))
Example #5
0
    def _harvest_flexible(self, shutdown=False):
        # TODO 弹性收集Application数据
        if not self._harvest_shutdown.isSet(
        ):  # TODO 如果线程阻塞,再将收集任务放进调度任务器里,这是一个递归调用的过程,一直等待阻塞结束
            event_harvest_config = self.global_settings().event_harvest_config

            self._scheduler.enter(
                event_harvest_config.report_period_ms / 1000.0, 1,
                self._harvest_flexible, ())
            _logger.debug('Commencing flexible harvest of application data.')
        elif not shutdown:
            return
        else:
            _logger.debug(
                'Commencing final flexible harvest of application data.')

        self._flexible_harvest_count += 1
        self._last_flexible_harvest = time.time()

        for application in list(six.itervalues(
                self._applications)):  # TODO 为每个应用收集数据
            try:
                application.harvest(shutdown=False, flexible=True)
            except Exception:
                _logger.exception('Failed to harvest data '
                                  'for %s.' % application.name)

        self._flexible_harvest_duration = \
            time.time() - self._last_flexible_harvest

        _logger.debug(
            'Completed flexible harvest of application data in %.2f '
            'seconds.', self._flexible_harvest_duration)
Example #6
0
    def _harvest_default(self):
        shutdown = self._harvest_shutdown.isSet()

        if shutdown:
            _logger.debug('Commencing default harvest of application data and '
                          'forcing a shutdown at the same time.')
        else:
            self._scheduler.enter(60.0, 2, self._harvest_default, ())
            _logger.debug('Commencing default harvest of application data.')

        self._default_harvest_count += 1
        self._last_default_harvest = time.time()

        for application in list(six.itervalues(self._applications)):
            try:
                application.harvest(shutdown, flexible=False)
            except Exception:
                _logger.exception('Failed to harvest data '
                                  'for %s.' % application.name)

        self._default_harvest_duration = \
                time.time() - self._last_default_harvest

        _logger.debug(
            'Completed default harvest of application data in %.2f '
            'seconds.', self._default_harvest_duration)
Example #7
0
    def _harvest_flexible(self):
        if not self._harvest_shutdown.isSet():
            event_harvest_config = self.global_settings().event_harvest_config

            self._scheduler.enter(
                event_harvest_config.report_period_ms / 1000.0, 1,
                self._harvest_flexible, ())

        _logger.debug('Commencing flexible harvest of application data.')

        self._flexible_harvest_count += 1
        self._last_flexible_harvest = time.time()

        for application in list(six.itervalues(self._applications)):
            try:
                application.harvest(shutdown=False, flexible=True)
            except Exception:
                _logger.exception('Failed to harvest data '
                                  'for %s.' % application.name)

        self._flexible_harvest_duration = \
                time.time() - self._last_flexible_harvest

        _logger.debug(
            'Completed flexible harvest of application data in %.2f '
            'seconds.', self._flexible_harvest_duration)
Example #8
0
 def __call__(self, *args, **kwargs):
     self._nr_wrapped(*args, **kwargs)
     handler = self.__instance.handler
     for name in six.itervalues(self.__instance.callmap):
         if hasattr(handler, name):
             setattr(handler, name, MethodWrapper(
                     getattr(handler, name)))
    def _harvest_default(self, shutdown=False):
        if not self._harvest_shutdown.isSet():
            self._scheduler.enter(60.0, 2, self._harvest_default, ())
            _logger.debug('Commencing harvest[default] of application data.')
        elif not shutdown:
            return
        else:
            _logger.debug(
                'Commencing final harvest[default] of application data.')

        self._default_harvest_count += 1
        self._last_default_harvest = time.time()

        for application in list(six.itervalues(self._applications)):
            try:
                application.harvest(shutdown, flexible=False)
            except Exception:
                _logger.exception('Failed to harvest data '
                                  'for %s.' % application.name)

        self._default_harvest_duration = \
                time.time() - self._last_default_harvest

        _logger.debug(
            'Completed harvest[default] of application data in %.2f '
            'seconds.', self._default_harvest_duration)
Example #10
0
    def register_data_source(self,
                             source,
                             application=None,
                             name=None,
                             settings=None,
                             **properties):
        """Registers the specified data source.

        """

        _logger.debug('Register data source with agent %r.',
                      (source, application, name, settings, properties))

        with self._lock:
            # Remember the data sources in case we need them later.

            self._data_sources.setdefault(application, []).append(
                (source, name, settings, properties))

            if application is None:
                # Bind to any applications that already exist.

                for application in list(six.itervalues(self._applications)):
                    application.register_data_source(source, name, settings,
                                                     **properties)

            else:
                # Bind to specific application if it exists.

                instance = self._applications.get(application)

                if instance is not None:
                    instance.register_data_source(source, name, settings,
                                                  **properties)
Example #11
0
    def register_data_source(self, source, application=None,
                name=None, settings=None, **properties):
        """Registers the specified data source.

        """

        _logger.debug('Register data source with agent %r.',
                (source, application, name, settings, properties))

        with self._lock:
            # Remember the data sources in case we need them later.

            self._data_sources.setdefault(application, []).append(
                    (source, name, settings, properties))

            if application is None:
                # Bind to any applications that already exist.

                for application in list(six.itervalues(self._applications)):
                    application.register_data_source(source, name,
                            settings, **properties)

            else:
                # Bind to specific application if it exists.

                instance = self._applications.get(application)

                if instance is not None:
                    instance.register_data_source(source, name,
                            settings, **properties)
Example #12
0
    def _run_harvest(self, shutdown=False):
        # This isn't going to maintain order of applications
        # such that oldest is always done first. A new one could
        # come in earlier once added and upset the overall
        # timing. The data collector should cope with this
        # though.

        if shutdown:
            _logger.debug('Commencing harvest of all application data and '
                    'forcing a shutdown at the same time.')
        else:
            _logger.debug('Commencing harvest of all application data.')

        self._harvest_count += 1
        self._last_harvest = time.time()

        for application in list(six.itervalues(self._applications)):
              try:
                  application.harvest(shutdown)

              except Exception:
                  _logger.exception('Failed to harvest data '
                                    'for %s.' % application.name)

        self._harvest_duration = time.time() - self._last_harvest

        _logger.debug('Completed harvest of all application data in %.2f '
                'seconds.', self._harvest_duration)
Example #13
0
    def _run_harvest(self, shutdown=False):
        # This isn't going to maintain order of applications
        # such that oldest is always done first. A new one could
        # come in earlier once added and upset the overall
        # timing. The data collector should cope with this
        # though.

        if shutdown:
            _logger.debug('Commencing harvest of all application data and '
                          'forcing a shutdown at the same time.')
        else:
            _logger.debug('Commencing harvest of all application data.')

        self._harvest_count += 1
        self._last_harvest = time.time()

        for application in list(six.itervalues(self._applications)):
            try:
                application.harvest(shutdown)

            except Exception:
                _logger.exception('Failed to harvest data '
                                  'for %s.' % application.name)

        self._harvest_duration = time.time() - self._last_harvest

        _logger.debug(
            'Completed harvest of all application data in %.2f '
            'seconds.', self._harvest_duration)
Example #14
0
    def slow_sql_data(self):

        if not self.__settings:
            return []

        if not self.__sql_stats_table:
            return []

        maximum = self.__settings.agent_limits.slow_sql_data

        slow_sql_nodes = sorted(six.itervalues(self.__sql_stats_table),
                key=lambda x: x.max_call_time)[-maximum:]

        result = []

        for node in slow_sql_nodes:

            params = {}

            if node.slow_sql_node.stack_trace:
                params['backtrace'] = node.slow_sql_node.stack_trace

            explain_plan = node.slow_sql_node.explain_plan

            if explain_plan:
                params['explain_plan'] = explain_plan

            json_data = json_encode(params)

            params_data = base64.standard_b64encode(
                    zlib.compress(six.b(json_data)))

            if six.PY3:
                params_data = params_data.decode('Latin-1')

            # Limit the length of any SQL that is reported back.

            limit = self.__settings.agent_limits.sql_query_length_maximum

            sql = node.slow_sql_node.formatted[:limit]

            data = [node.slow_sql_node.path,
                    node.slow_sql_node.request_uri,
                    node.slow_sql_node.identifier,
                    sql,
                    node.slow_sql_node.metric,
                    node.call_count,
                    node.total_call_time * 1000,
                    node.min_call_time * 1000,
                    node.max_call_time * 1000,
                    params_data]

            result.append(data)

        return result
Example #15
0
    def slow_sql_data(self):

        if not self.__settings:
            return []

        if not self.__sql_stats_table:
            return []

        maximum = self.__settings.agent_limits.slow_sql_data

        slow_sql_nodes = sorted(six.itervalues(self.__sql_stats_table),
                                key=lambda x: x.max_call_time)[-maximum:]

        result = []

        for node in slow_sql_nodes:

            params = {}

            if node.slow_sql_node.stack_trace:
                params['backtrace'] = node.slow_sql_node.stack_trace

            explain_plan = node.slow_sql_node.explain_plan

            if explain_plan:
                params['explain_plan'] = explain_plan

            json_data = json_encode(params)

            params_data = base64.standard_b64encode(
                zlib.compress(six.b(json_data)))

            if six.PY3:
                params_data = params_data.decode('Latin-1')

            # Limit the length of any SQL that is reported back.

            limit = self.__settings.agent_limits.sql_query_length_maximum

            sql = node.slow_sql_node.formatted[:limit]

            data = [
                node.slow_sql_node.path, node.slow_sql_node.request_uri,
                node.slow_sql_node.identifier, sql, node.slow_sql_node.metric,
                node.call_count, node.total_call_time * 1000,
                node.min_call_time * 1000, node.max_call_time * 1000,
                params_data
            ]

            result.append(data)

        return result
    def slow_sql_data(self):

        if not self.__settings:
            return []

        if not self.__sql_stats_table:
            return []

        maximum = self.__settings.agent_limits.slow_sql_data

        slow_sql_nodes = sorted(six.itervalues(self.__sql_stats_table),
                key=lambda x: x.max_call_time)[-maximum:]

        result = []

        for node in slow_sql_nodes:

            params = {}

            if node.slow_sql_node.stack_trace:
                params['backtrace'] = node.slow_sql_node.stack_trace

            explain_plan = node.slow_sql_node.explain_plan

            if explain_plan:
                params['explain_plan'] = explain_plan

            json_data = simplejson.dumps(params, ensure_ascii=True,
                    encoding='Latin-1', namedtuple_as_object=False,
                    default=lambda o: list(iter(o)))

            params_data = base64.standard_b64encode(
                    zlib.compress(six.b(json_data)))

            data = [node.slow_sql_node.path,
                    node.slow_sql_node.request_uri,
                    node.slow_sql_node.identifier,
                    node.slow_sql_node.formatted,
                    node.slow_sql_node.metric,
                    node.call_count,
                    node.total_call_time * 1000,
                    node.min_call_time * 1000,
                    node.max_call_time * 1000,
                    params_data]

            result.append(data)

        return result
Example #17
0
    def slow_sql_data(self, connections):

        _logger.debug('Generating slow SQL data.')

        if not self.__settings:
            return []

        if not self.__sql_stats_table:
            return []

        if not self.__settings.slow_sql.enabled:
            return []

        maximum = self.__settings.agent_limits.slow_sql_data

        slow_sql_nodes = sorted(six.itervalues(self.__sql_stats_table),
                key=lambda x: x.max_call_time)[-maximum:]

        result = []

        for stats_node in slow_sql_nodes:

            params = {}

            slow_sql_node = stats_node.slow_sql_node

            if slow_sql_node.stack_trace:
                params['backtrace'] = slow_sql_node.stack_trace

            explain_plan_data = explain_plan(connections,
                    slow_sql_node.statement,
                    slow_sql_node.connect_params,
                    slow_sql_node.cursor_params,
                    slow_sql_node.sql_parameters,
                    slow_sql_node.execute_params,
                    slow_sql_node.sql_format)

            if explain_plan_data:
                params['explain_plan'] = explain_plan_data

            json_data = json_encode(params)

            params_data = base64.standard_b64encode(
                    zlib.compress(six.b(json_data)))

            if six.PY3:
                params_data = params_data.decode('Latin-1')

            # Limit the length of any SQL that is reported back.

            limit = self.__settings.agent_limits.sql_query_length_maximum

            sql = slow_sql_node.formatted[:limit]

            data = [slow_sql_node.path,
                    slow_sql_node.request_uri,
                    slow_sql_node.identifier,
                    sql,
                    slow_sql_node.metric,
                    stats_node.call_count,
                    stats_node.total_call_time * 1000,
                    stats_node.min_call_time * 1000,
                    stats_node.max_call_time * 1000,
                    params_data]

            result.append(data)

        return result