コード例 #1
0
def test_payload_metadata_arn(serverless_application, arn_set):

    # If the session object gathers the arn from the settings object before the
    # lambda handler records it there, then this test will fail.

    settings = global_settings()
    original_metadata = settings.aws_lambda_metadata.copy()

    arn = None
    if arn_set:
        arn = 'arrrrrrrrrrRrrrrrrrn'

    settings.aws_lambda_metadata.update({
        'arn': arn,
        'function_version': '$LATEST'
    })

    class Context(object):
        invoked_function_arn = arn

    @validate_serverless_metadata(exact_metadata={'arn': arn})
    @lambda_handler(application=serverless_application)
    def handler(event, context):
        assert settings.aws_lambda_metadata['arn'] == arn
        return {}

    try:
        handler({}, Context)
    finally:
        settings.aws_lambda_metadata = original_metadata
コード例 #2
0
def collector_url(server=None):
    """Returns the URL for talking to the data collector. When no server
    'host:port' is specified then the main data collector host and port is
    taken from the agent configuration. When a server is explicitly passed
    it would be the secondary data collector which subsequents requests
    in an agent session should be sent to.

    """

    settings = global_settings()

    url = '%s://%s/agent_listener/invoke_raw_method'

    scheme = settings.ssl and 'https' or 'http'

    if not server:
        # When pulling port from agent configuration it should only be
        # set when testing against a local data collector. For staging
        # and production should not be set and would default to port 80
        # or 443 based on scheme name in URL and we don't explicitly
        # add the ports.

        if settings.port:
            server = '%s:%d' % (settings.host, settings.port)
        else:
            server = '%s' % settings.host

    return url % (scheme, server)
コード例 #3
0
def test_bad_value_in_env_var():
    settings = global_settings()
    assert settings.utilization.logical_processors == 0

    local_config, = AgentProtocol._connect_payload('', [], [], settings)
    util_conf = local_config['utilization'].get('config')
    assert util_conf == {'hostname': 'env-hostname', 'total_ram_mib': 98765}
コード例 #4
0
def _nr_log_forwarder(message_instance):
    transaction = current_transaction()
    record = message_instance.record
    message = record.get("_nr_original_message", record["message"])

    if transaction:
        settings = transaction.settings
    else:
        settings = global_settings()

    # Return early if application logging not enabled
    if settings and settings.application_logging and settings.application_logging.enabled:
        level = record["level"]
        level_name = "UNKNOWN" if not level else (level.name or "UNKNOWN")

        if settings.application_logging.metrics and settings.application_logging.metrics.enabled:
            if transaction:
                transaction.record_custom_metric("Logging/lines", {"count": 1})
                transaction.record_custom_metric(
                    "Logging/lines/%s" % level_name, {"count": 1})
            else:
                application = application_instance(activate=False)
                if application and application.enabled:
                    application.record_custom_metric("Logging/lines",
                                                     {"count": 1})
                    application.record_custom_metric(
                        "Logging/lines/%s" % level_name, {"count": 1})

        if settings.application_logging.forwarding and settings.application_logging.forwarding.enabled:
            try:
                record_log_event(message, level_name,
                                 int(record["time"].timestamp()))
            except Exception:
                pass
コード例 #5
0
def test_billing_hostname_with_set_in_ini_not_in_env():
    settings = global_settings()
    assert settings.utilization.billing_hostname == 'file-hostname'

    local_config, = AgentProtocol._connect_payload('', [], [], settings)
    util_conf = local_config['utilization'].get('config')
    assert util_conf == {'hostname': 'file-hostname'}
コード例 #6
0
    def time_metrics(self, stats, root, parent):
        settings = global_settings()

        if 'database.instrumentation.r1' in settings.feature_flag:
            return self.time_metrics_r1(stats, root, parent)

        return self.time_metrics_r2(stats, root, parent)
コード例 #7
0
def proxy_server():
    """Returns the dictionary of proxy server settings to be supplied to
    the 'requests' library when making requests.

    """

    settings = global_settings()

    # Require that both proxy host and proxy port are set to work.

    if not settings.proxy_host or not settings.proxy_port:
        return

    # The agent configuration only provides means to set one proxy so we
    # assume that it will be set correctly depending on whether SSL
    # connection requested or not.

    scheme = settings.ssl and 'https' or 'http'
    proxy = '%s:%d' % (settings.proxy_host, settings.proxy_port)

    # Encode the proxy user name and password into the proxy server value
    # as requests library will strip it out of there and use that.

    if settings.proxy_user is not None and settings.proxy_pass is not None:
        proxy = 'http://%s:%s@%s' % (settings.proxy_user,
                settings.proxy_pass, proxy)

    return { scheme: proxy }
コード例 #8
0
    def __init__(self, sql, database=None):
        self._operation = None
        self._target = None
        self._uncommented = None
        self._obfuscated = None
        self._normalized = None
        self._identifier = None

        if isinstance(sql, six.binary_type):
            try:
                sql = sql.decode('utf-8')
            except UnicodeError as e:
                settings = global_settings()
                if settings.debug.log_explain_plan_queries:
                    _logger.debug('An error occurred while decoding sql '
                                  'statement: %s' % e.reason)

                self._operation = ''
                self._target = ''
                self._uncommented = ''
                self._obfuscated = ''
                self._normalized = ''

        self.sql = sql
        self.database = database
コード例 #9
0
def api_request_kwargs():
    settings = global_settings()
    api_key = settings.api_key or "NO API KEY WAS SET IN AGENT CONFIGURATION"

    proxy_scheme = settings.proxy_scheme
    proxy_host = settings.proxy_host
    proxy_port = settings.proxy_port
    proxy_user = settings.proxy_user
    proxy_pass = settings.proxy_pass

    if proxy_scheme is None:
        proxy_scheme = "https"

    timeout = settings.agent_limits.data_collector_timeout

    proxies = proxy_details(proxy_scheme, proxy_host, proxy_port, proxy_user,
                            proxy_pass)

    cert_loc = settings.ca_bundle_path
    if cert_loc is None:
        cert_loc = certs.where()

    if settings.debug.disable_certificate_validation:
        cert_loc = False

    headers = {"X-Api-Key": api_key}

    return {
        'proxies': proxies,
        'headers': headers,
        'timeout': timeout,
        'verify': cert_loc,
    }
コード例 #10
0
    def _profiler_loop(self):
        """Infinite loop that wakes up periodically to collect stack traces,
        merge it into call tree if necessary, finally update the state of all
        the active profile sessions.

        """

        settings = global_settings()

        overhead_threshold = settings.agent_limits.xray_profile_overhead

        while True:

            # If x-ray profilers are not suspended and at least one x-ray
            # session is active it'll cause collect_stack_traces() to add
            # the stack_traces to the txn obj.

            start = time.time()

            include_xrays = ((not self._xray_suspended)
                             and any(six.itervalues(self.application_xrays)))

            for category, stack in collect_stack_traces(
                    self.profile_agent_code, include_xrays):

                # Merge the stack_trace to the call tree only for
                # full_profile_session. X-ray profiles will be merged at
                # the time of exiting the transaction.

                if self.full_profile_session:
                    self.full_profile_session.update_call_tree(category, stack)

            self.update_profile_sessions()

            # Stop the profiler thread if there are no profile sessions.

            if ((self.full_profile_session is None)
                    and (not any(six.itervalues(self.application_xrays)))):
                self._profiler_thread_running = False
                return

            # Adjust sample period dynamically base on overheads of doing
            # thread profiling if is an X-Ray session.

            if not self._xray_suspended:
                overhead = time.time() - start

                with self._lock:
                    aggregation_time = self._aggregation_time
                    self._aggregation_time = 0.0

                overhead += aggregation_time

                delay = overhead / self.sample_period_s / overhead_threshold
                delay = min((max(1.0, delay) * self.sample_period_s), 5.0)

                self._profiler_shutdown.wait(delay)

            else:
                self._profiler_shutdown.wait(self.sample_period_s)
コード例 #11
0
def local_config(args):
    import os
    import sys
    import logging

    if len(args) == 0:
        usage('local-config')
        sys.exit(1)

    from newrelic.config import initialize
    from newrelic.core.config import global_settings

    if len(args) >= 2:
        log_file = args[1]
    else:
        log_file = '/tmp/python-agent-test.log'

    log_level = logging.DEBUG

    try:
        os.unlink(log_file)
    except Exception:
        pass

    config_file = args[0]
    environment = os.environ.get('NEW_RELIC_ENVIRONMENT')

    if config_file == '-':
        config_file = os.environ.get('NEW_RELIC_CONFIG_FILE')

    initialize(config_file, environment, ignore_errors=False,
            log_file=log_file, log_level=log_level)

    for key, value in sorted(global_settings()):
        print('%s = %r' % (key, value))
コード例 #12
0
ファイル: console.py プロジェクト: Mause/table_select_web
    def do_interpreter(self):
        """
        When enabled in the configuration file, will startup up an embedded
        interactive Python interpreter. Invoke 'exit()' or 'quit()' to
        escape the interpreter session."""

        enabled = False

        _settings = global_settings()

        if not _settings.console.allow_interpreter_cmd:
            print >> self.stdout, 'Sorry, the embedded Python ' \
                    'interpreter is disabled.'
            return

        locals = {}

        locals['stdin'] = self.stdin
        locals['stdout'] = self.stdout

        console = EmbeddedConsole(locals)

        console.stdin = self.stdin
        console.stdout = self.stdout

        acquire_console(self)

        try:
            console.interact()
        except SystemExit:
            pass
        finally:
            release_console()
コード例 #13
0
ファイル: data_collector.py プロジェクト: GbalsaC/bitnamiP
def collector_url(server=None):
    """Returns the URL for talking to the data collector. When no server
    'host:port' is specified then the main data collector host and port is
    taken from the agent configuration. When a server is explicitly passed
    it would be the secondary data collector which subsequents requests
    in an agent session should be sent to.

    """

    settings = global_settings()

    url = '%s://%s/agent_listener/invoke_raw_method'

    scheme = settings.ssl and 'https' or 'http'

    if not server or settings.port:
        # When pulling port from agent configuration it should only be
        # set when testing against a local data collector. For staging
        # and production should not be set and would default to port 80
        # or 443 based on scheme name in URL and we don't explicitly
        # add the ports.

        if settings.port:
            server = '%s:%d' % (settings.host, settings.port)
        else:
            server = '%s' % settings.host

    return url % (scheme, server)
コード例 #14
0
def test_remote_config_fixups_hsm_disabled(local_settings, server_settings):
    assert 'high_security' in local_settings
    assert local_settings['high_security'] is False

    assert u'high_security' not in server_settings

    agent_config = server_settings['agent_config']

    original_capture_params = agent_config['capture_params']
    original_record_sql = agent_config['transaction_tracer.record_sql']
    original_strip_messages = agent_config['strip_exception_messages.enabled']
    original_custom_events = agent_config['custom_insights_events.enabled']

    _settings = global_settings()
    settings = override_generic_settings(_settings, local_settings)(
        AgentProtocol._apply_high_security_mode_fixups)(server_settings,
                                                        _settings)

    agent_config = settings['agent_config']

    assert u'high_security' not in settings

    assert agent_config['capture_params'] == original_capture_params
    assert agent_config['transaction_tracer.record_sql'] == original_record_sql
    assert agent_config['strip_exception_messages.enabled'] == \
        original_strip_messages
    assert agent_config['custom_insights_events.enabled'] == \
        original_custom_events
コード例 #15
0
def test_remote_config_fixups_hsm_disabled(local_settings, server_settings):
    assert "high_security" in local_settings
    assert local_settings["high_security"] is False

    assert "high_security" not in server_settings

    agent_config = server_settings["agent_config"]

    original_capture_params = agent_config["capture_params"]
    original_record_sql = agent_config["transaction_tracer.record_sql"]
    original_strip_messages = agent_config["strip_exception_messages.enabled"]
    original_custom_events = agent_config["custom_insights_events.enabled"]
    original_log_forwarding = agent_config["application_logging.forwarding.enabled"]

    _settings = global_settings()
    settings = override_generic_settings(_settings, local_settings)(AgentProtocol._apply_high_security_mode_fixups)(
        server_settings, _settings
    )

    agent_config = settings["agent_config"]

    assert "high_security" not in settings

    assert agent_config["capture_params"] == original_capture_params
    assert agent_config["transaction_tracer.record_sql"] == original_record_sql
    assert agent_config["strip_exception_messages.enabled"] == original_strip_messages
    assert agent_config["custom_insights_events.enabled"] == original_custom_events
    assert agent_config["application_logging.forwarding.enabled"] == original_log_forwarding
コード例 #16
0
def initialize_usage(usage_name, args):
    import os
    import sys
    import logging

    if len(args) == 0:
        usage(usage_name)
        sys.exit(1)

    from newrelic.config import initialize
    from newrelic.core.config import global_settings

    log_level = logging.DEBUG
    log_file = get_log_file_path(args)
    try:
        os.unlink(log_file)
    except Exception:
        pass

    config_file = args[0]
    environment = os.environ.get('NEW_RELIC_ENVIRONMENT')

    if config_file == '-':
        config_file = os.environ.get('NEW_RELIC_CONFIG_FILE')

    initialize(config_file, environment, ignore_errors=False,
               log_file=log_file, log_level=log_level)

    return global_settings()
コード例 #17
0
def test_remote_config_fixups_hsm_enabled(local_settings, server_settings):
    assert "high_security" in local_settings
    assert local_settings["high_security"] is True

    assert "high_security" in server_settings

    _settings = global_settings()
    settings = override_generic_settings(_settings, local_settings)(AgentProtocol._apply_high_security_mode_fixups)(
        server_settings, _settings
    )

    agent_config = settings["agent_config"]

    assert "high_security" not in settings
    assert "capture_params" not in settings
    assert "transaction_tracer.record_sql" not in settings
    assert "strip_exception_messages.enabled" not in settings
    assert "custom_insights_events.enabled" not in settings
    assert "application_logging.forwarding.enabled" not in settings

    assert "capture_params" not in agent_config
    assert "transaction_tracer.record_sql" not in agent_config
    assert "strip_exception_messages.enabled" not in agent_config
    assert "custom_insights_events.enabled" not in agent_config
    assert "application_logging.forwarding.enabled" not in agent_config
コード例 #18
0
def test_middleware(nr_enabled, aiohttp_app, middleware, metric):
    @asyncio.coroutine
    def fetch():
        resp = yield from aiohttp_app.client.request('GET', '/coro')
        assert resp.status == 200
        text = yield from resp.text()
        assert "Hello Aiohttp!" in text
        return resp

    def _test():
        aiohttp_app.loop.run_until_complete(fetch())

    if nr_enabled:
        scoped_metrics = [
            ('Function/_target_application:index', 1),
            (metric, 1),
        ]

        rollup_metrics = [
            ('Function/_target_application:index', 1),
            (metric, 1),
            ('Python/Framework/aiohttp/%s' % aiohttp.__version__, 1),
        ]

        _test = validate_transaction_metrics(
            '_target_application:index',
            scoped_metrics=scoped_metrics,
            rollup_metrics=rollup_metrics)(_test)
    else:
        settings = global_settings()

        _test = override_generic_settings(settings, {'enabled': False})(_test)

    _test()
コード例 #19
0
ファイル: console.py プロジェクト: DmitryMaxim18/4Docker
    def do_interpreter(self):
        """
        When enabled in the configuration file, will startup up an embedded
        interactive Python interpreter. Invoke 'exit()' or 'quit()' to
        escape the interpreter session."""

        enabled = False

        _settings = global_settings()

        if not _settings.console.allow_interpreter_cmd:
            print('Sorry, the embedded Python ' \
                    'interpreter is disabled.', file=self.stdout)
            return

        locals = {}

        locals['stdin'] = self.stdin
        locals['stdout'] = self.stdout

        console = EmbeddedConsole(locals)

        console.stdin = self.stdin
        console.stdout = self.stdout

        acquire_console(self)

        try:
            console.interact()
        except SystemExit:
            pass
        finally:
            release_console()
コード例 #20
0
def test_billing_hostname_from_env_vars():
    settings = global_settings()
    assert settings.utilization.billing_hostname == 'env-hostname'

    local_config, = AgentProtocol._connect_payload('', [], [], settings)
    util_conf = local_config['utilization'].get('config')
    assert util_conf == {'hostname': 'env-hostname'}
コード例 #21
0
    def time_metrics(self, stats, root, parent):
        settings = global_settings()

        if 'database.instrumentation.r1' in settings.feature_flag:
            return self.time_metrics_r1(stats, root, parent)

        return self.time_metrics_r2(stats, root, parent)
コード例 #22
0
    def active_threads(self):
        """Returns an iterator over all current stack frames for all
        active threads in the process. The result for each is a tuple
        consisting of the thread identifier, a categorisation of the
        type of thread, and the stack frame. Note that we actually treat
        any greenlets as threads as well. In that case the thread ID is
        the id() of the greenlet.

        This is in this class for convenience as needs to access the
        currently active transactions to categorise transaction threads
        as being for web transactions or background tasks.

        """
        # TODO  返回活线程信息

        # First yield up those for real Python threads.
        # TODO 返回一个字典,将每个线程的标识符映射到调用该函数时该线程中当前活动的最顶层堆栈帧
        for thread_id, frame in sys._current_frames().items():
            trace = self._cache.get(thread_id)
            transaction = trace and trace.transaction
            if transaction is not None:
                if transaction.background_task:  # TODO 如果是后台队列任务,非Web事物
                    yield transaction, thread_id, 'BACKGROUND', frame
                else:
                    yield transaction, thread_id, 'REQUEST', frame  # TODO Web事物
            else:
                # Note that there may not always be a thread object.
                # This is because thread could have been created direct
                # against the thread module rather than via the high
                # level threading module. Categorise anything we can't
                # obtain a name for as being 'OTHER'.

                thread = threading._active.get(thread_id)
                if thread is not None and thread.getName().startswith('NR-'):
                    yield None, thread_id, 'AGENT', frame
                else:
                    yield None, thread_id, 'OTHER', frame

        # Now yield up those corresponding to greenlets. Right now only
        # doing this for greenlets in which any active transactions are
        # running. We don't have a way of knowing what non transaction
        # threads are running.

        debug = global_settings().debug

        # TODO 启用协程分析
        if debug.enable_coroutine_profiling:
            for thread_id, trace in self._cache.items():
                transaction = trace.transaction
                if transaction and transaction._greenlet is not None:
                    gr = transaction._greenlet(
                    )  # TODO !!!!!!这一块要弄懂,还是要仔细了解下greenlet框架
                    if gr and gr.gr_frame is not None:
                        if transaction.background_task:
                            yield (transaction, thread_id, 'BACKGROUND',
                                   gr.gr_frame)
                        else:
                            yield (transaction, thread_id, 'REQUEST',
                                   gr.gr_frame)
コード例 #23
0
    def profile_data(self):
        """Returns the profile data once the thread profiling session has
        finished otherwise returns None. The data structure returned is
        in a form suitable for sending back to the data collector.

        """

        # Profiling session not finished.

        if self._profiler_thread.isAlive() and not self._xray_txns:
            return None

        call_data = {}
        thread_count = 0

        # We prune the number of nodes sent if we are over the specified
        # limit. This is just to avoid having the response be too large
        # and get rejected by the data collector.

        settings = global_settings()

        self._prune_call_trees(settings.agent_limits.thread_profiler_nodes)

        for thread_category, bucket in self._call_buckets.items():
            if bucket:
                call_data[thread_category] = bucket.values()
                thread_count += len(bucket)

        # If no profile data was captured return None instead of sending an
        # encoded empty data-structure 

        if thread_count == 0:
            return None

        # Construct the actual final data for sending. The actual call
        # data is turned into JSON, compessed and then base64 encoded at
        # this point to cut its size.

        json_data = simplejson.dumps(call_data, ensure_ascii=True,
                encoding='Latin-1', default=lambda o: o.jsonable(),
                namedtuple_as_object=False)
        encoded_data = base64.standard_b64encode(zlib.compress(json_data))

        if self._xray_txns:
            xray_obj = self._xray_txns.values()[0]
            xray_id = xray_obj.xray_id
        else:
            xray_id = None

        profile = [[self.profile_id, self._start_time*1000,
                self._stop_time*1000, self._sample_count, encoded_data,
                thread_count, 0, xray_id]]

        # If xray session is running send partial call tree and clear the
        # data-structures.
        if self._xray_txns:
            self._reset_call_buckets()

        return profile
コード例 #24
0
def test_bad_value_in_env_var():
    settings = global_settings()
    assert settings.utilization.logical_processors == 0

    local_config, = ApplicationSession._create_connect_payload(
        '', [], [], newrelic.core.config.global_settings_dump())
    util_conf = local_config['utilization'].get('config')
    assert util_conf == {'hostname': 'env-hostname', 'total_ram_mib': 98765}
コード例 #25
0
def test_finalize_application_settings(server_settings):
    settings = global_settings()

    finalize_application_settings(server_side_config=server_settings,
                                  settings=settings)

    # hostname set in ini_file and not in env vars
    assert settings.utilization.billing_hostname == 'file-hostname'
コード例 #26
0
ファイル: database_utils.py プロジェクト: GbalsaC/bitnamiP
    def __init__(self, maximum=4):
        self.connections = []
        self.maximum = maximum

        settings = global_settings()

        if settings.debug.log_explain_plan_queries:
            _logger.debug('Creating SQL connections cache %r.', self)
コード例 #27
0
def test_billing_hostname_precedence():
    # ini-file takes precedence over env vars
    settings = global_settings()
    assert settings.utilization.billing_hostname == 'file-hostname'

    local_config, = AgentProtocol._connect_payload('', [], [], settings)
    util_conf = local_config['utilization'].get('config')
    assert util_conf == {'hostname': 'file-hostname'}
コード例 #28
0
    def __init__(self, maximum=4):
        self.connections = []
        self.maximum = maximum

        settings = global_settings()

        if settings.debug.log_explain_plan_queries:
            _logger.debug('Creating SQL connections cache %r.', self)
コード例 #29
0
def test_billing_hostname_with_set_in_ini_not_in_env():
    settings = global_settings()
    assert settings.utilization.billing_hostname == 'file-hostname'

    local_config, = ApplicationSession._create_connect_payload(
        '', [], [], newrelic.core.config.global_settings_dump())
    util_conf = local_config['utilization'].get('config')
    assert util_conf == {'hostname': 'file-hostname'}
コード例 #30
0
 def preconnect():
     url = collector_url()
     license_key = global_settings().license_key
     try:
         result = send_request(None, url, 'preconnect', license_key)
         assert result
     except ForceAgentDisconnect:
         # If the license key is invalid, we should see a force agent disconnect
         pass
コード例 #31
0
    def _profiler_loop(self):
        """Infinite loop that wakes up periodically to collect stack traces,
        merge it into call tree if necessaray, finally update the state of all
        the active profile sessions.

        """

        settings = global_settings()

        overhead_threshold = settings.agent_limits.xray_profile_overhead

        while True:

            # If x-ray profilers are not suspended and at least one x-ray
            # session is active it'll cause collect_stack_traces() to add
            # the stack_traces to the txn obj.

            start = time.time()

            include_xrays = (not self._xray_suspended) and any(six.itervalues(self.application_xrays))

            for category, stack in collect_stack_traces(self.profile_agent_code, include_xrays):

                # Merge the stack_trace to the call tree only for
                # full_profile_session. X-ray profiles will be merged at
                # the time of exiting the transaction.

                if self.full_profile_session:
                    self.full_profile_session.update_call_tree(category, stack)

            self.update_profile_sessions()

            # Stop the profiler thread if there are no profile sessions.

            if (self.full_profile_session is None) and (not any(six.itervalues(self.application_xrays))):
                self._profiler_thread_running = False
                return

            # Adjust sample period dynamically base on overheads of doing
            # thread profiling if is an X-Ray session.

            if not self._xray_suspended:
                overhead = time.time() - start

                with self._lock:
                    aggregation_time = self._aggregation_time
                    self._aggregation_time = 0.0

                overhead += aggregation_time

                delay = overhead / self.sample_period_s / overhead_threshold
                delay = min((max(1.0, delay) * self.sample_period_s), 5.0)

                self._profiler_shutdown.wait(delay)

            else:
                self._profiler_shutdown.wait(self.sample_period_s)
コード例 #32
0
def test_full_uri_connect():
    # An exception will be raised here if there's a problem with the response
    AgentProtocol.connect(
        "Python Agent Test (test_full_uri_payloads)",
        [],
        [],
        global_settings(),
        client_cls=FullUriClient,
    )
コード例 #33
0
def test_connection_type_metric_assumes_ssl(ssl_setting):
    settings = global_settings()

    @override_generic_settings(settings, {'ssl': ssl_setting})
    def _test():
        metric = connection_type(None)
        assert metric.endswith('/https')

    _test()
コード例 #34
0
def test_billing_hostname_with_blank_ini_file_no_env():
    settings = global_settings()
    assert settings.utilization.billing_hostname is None

    # if no utilization config settings are set, the 'config' section is not in
    # the payload at all
    local_config, = AgentProtocol._connect_payload('', [], [], settings)
    util_conf = local_config['utilization'].get('config')
    assert util_conf is None
コード例 #35
0
def test_billing_hostname_precedence():
    # ini-file takes precedence over env vars
    settings = global_settings()
    assert settings.utilization.billing_hostname == 'file-hostname'

    local_config, = ApplicationSession._create_connect_payload(
        '', [], [], newrelic.core.config.global_settings_dump())
    util_conf = local_config['utilization'].get('config')
    assert util_conf == {'hostname': 'file-hostname'}
コード例 #36
0
def test_error_exception(method, uri, metric_name, error, status, nr_enabled,
                         aiohttp_app):
    @asyncio.coroutine
    def fetch():
        resp = yield from aiohttp_app.client.request(
            method, uri, headers={'content-type': 'text/plain'})
        assert resp.status == status

    required_attrs = list(BASE_REQUIRED_ATTRS)
    forgone_attrs = list(BASE_FORGONE_ATTRS)

    if nr_enabled:
        errors = []
        if error:
            errors.append(error)

        @validate_transaction_errors(errors=errors)
        @validate_transaction_metrics(
            metric_name,
            scoped_metrics=[
                ('Function/%s' % metric_name, 1),
            ],
            rollup_metrics=[
                ('Function/%s' % metric_name, 1),
                ('Python/Framework/aiohttp/%s' % aiohttp.__version__, 1),
            ],
        )
        @validate_transaction_event_attributes(
            required_params={
                'agent': required_attrs,
                'user': [],
                'intrinsic': [],
            },
            forgone_params={
                'agent': forgone_attrs,
                'user': [],
                'intrinsic': [],
            },
            exact_attrs={
                'agent': {
                    'response.status': str(status),
                },
                'user': {},
                'intrinsic': {},
            },
        )
        @override_ignore_status_codes([404])
        def _test():
            aiohttp_app.loop.run_until_complete(fetch())
    else:
        settings = global_settings()

        @override_generic_settings(settings, {'enabled': False})
        def _test():
            aiohttp_app.loop.run_until_complete(fetch())

    _test()
コード例 #37
0
ファイル: database_utils.py プロジェクト: GbalsaC/bitnamiP
    def cleanup(self):
        settings = global_settings()

        if settings.debug.log_explain_plan_queries:
            _logger.debug('Cleaning up SQL connections cache %r.', self)

        for key, connection in self.connections:
            connection.cleanup()

        self.connections = []
コード例 #38
0
def create_session(license_key, app_name, linked_applications,
        environment, settings):

    _global_settings = global_settings()

    if _global_settings.developer_mode:
        return DeveloperModeSession.create_session(license_key, app_name,
                linked_applications, environment, settings)

    return ApplicationSession.create_session(license_key, app_name,
            linked_applications, environment, settings)
コード例 #39
0
    def active_threads(self):
        """Returns an iterator over all current stack frames for all
        active threads in the process. The result for each is a tuple
        consisting of the thread identifier, a categorisation of the
        type of thread, and the stack frame. Note that we actually treat
        any greenlets as threads as well. In that case the thread ID is
        the id() of the greenlet.

        This is in this class for convenience as needs to access the
        currently active transactions to categorise transaction threads
        as being for web transactions or background tasks.

        """

        # First yield up those for real Python threads.

        for thread_id, frame in sys._current_frames().items():
            transaction = self._cache.get(thread_id)
            if transaction is not None:
                if transaction.background_task:
                    yield transaction, thread_id, 'BACKGROUND', frame
                else:
                    yield transaction, thread_id, 'REQUEST', frame
            else:
                # Note that there may not always be a thread object.
                # This is because thread could have been created direct
                # against the thread module rather than via the high
                # level threading module. Categorise anything we can't
                # obtain a name for as being 'OTHER'.

                thread = threading._active.get(thread_id)
                if thread is not None and thread.getName().startswith('NR-'):
                    yield None, thread_id, 'AGENT', frame
                else:
                    yield None, thread_id, 'OTHER', frame

        # Now yield up those corresponding to greenlets. Right now only
        # doing this for greenlets in which any active transactions are
        # running. We don't have a way of knowing what non transaction
        # threads are running.

        debug = global_settings().debug

        if debug.enable_coroutine_profiling:
            for thread_id, transaction in self._cache.items():
                if transaction._greenlet is not None:
                    gr = transaction._greenlet()
                    if gr and gr.gr_frame is not None:
                        if transaction.background_task:
                            yield (transaction, thread_id,
                                    'BACKGROUND', gr.gr_frame)
                        else:
                            yield (transaction, thread_id,
                                    'REQUEST', gr.gr_frame)
コード例 #40
0
ファイル: data_collector.py プロジェクト: GbalsaC/bitnamiP
def _requests_proxy_scheme_workaround(wrapped, instance, args, kwargs):
    def _params(connection, *args, **kwargs):
        return connection

    pool, connection = instance, _params(*args, **kwargs)

    settings = global_settings()

    if pool.proxy and pool.proxy.scheme == 'https':
        if settings.proxy_scheme in (None, 'https'):
            return connection

    return wrapped(*args, **kwargs)
コード例 #41
0
ファイル: database_utils.py プロジェクト: GbalsaC/bitnamiP
    def connection(self, database, args, kwargs):
        key = (database.client, args, kwargs)

        connection = None

        settings = global_settings()

        for i, item in enumerate(self.connections):
            if item[0] == key:
                connection = item[1]

                # Move to back of list so we know which is the
                # most recently used all the time.

                item = self.connections.pop(i)
                self.connections.append(item)

                break

        if connection is None:
            # If we are at the maximum number of connections to
            # keep hold of, pop the one which has been used the
            # longest amount of time.

            if len(self.connections) == self.maximum:
                connection = self.connections.pop(0)[1]

                internal_metric('Supportability/DatabaseUtils/Counts/'
                                'drop_database_connection', 1)

                if settings.debug.log_explain_plan_queries:
                    _logger.debug('Drop database connection for %r as '
                            'reached maximum of %r.',
                            connection.database.client, self.maximum)

                connection.cleanup()

            connection = SQLConnection(database,
                    database.connect(*args, **kwargs))

            self.connections.append((key, connection))

            internal_metric('Supportability/DatabaseUtils/Counts/'
                            'create_database_connection', 1)

            if settings.debug.log_explain_plan_queries:
                _logger.debug('Created database connection for %r.',
                        database.client)

        return connection
コード例 #42
0
ファイル: database_utils.py プロジェクト: GbalsaC/bitnamiP
    def cleanup(self):
        settings = global_settings()

        if settings.debug.log_explain_plan_queries:
            _logger.debug('Cleanup database connection for %r.',
                    self.database)

        try:
            self.connection.rollback()
            pass
        except (AttributeError, self.database.NotSupportedError):
            pass

        self.connection.close()
コード例 #43
0
ファイル: database_utils.py プロジェクト: GbalsaC/bitnamiP
    def cursor(self, args=(), kwargs={}):
        key = (args, frozenset(kwargs.items()))

        cursor = self.cursors.get(key)

        if cursor is None:
            settings = global_settings()

            if settings.debug.log_explain_plan_queries:
                _logger.debug('Created database cursor for %r.',
                        self.database.client)

            cursor = self.connection.cursor(*args, **kwargs)
            self.cursors[key] = cursor

        return cursor
コード例 #44
0
ファイル: data_collector.py プロジェクト: tasnim07/BlackFly
def connection_type(proxies):
    """Returns a string describing the connection type for use in metrics.

    """

    settings = global_settings()

    ssl = settings.ssl

    request_scheme = ssl and 'https' or 'http'

    if proxies is None:
        return 'direct/%s' % request_scheme

    proxy_scheme = proxies['http'].split('://')[0]

    return '%s-proxy/%s' % (proxy_scheme, request_scheme)
コード例 #45
0
ファイル: data_collector.py プロジェクト: GbalsaC/bitnamiP
    def send_request(cls, session, url, method, license_key,
            agent_run_id=None, payload=()):

        assert method in _developer_mode_responses

        # Create fake details for the request being made so that we
        # can use the same audit logging functionality.

        params = {}
        headers = {}

        if not license_key:
            license_key = 'NO LICENSE KEY WAS SET IN AGENT CONFIGURATION'

        params['method'] = method
        params['license_key'] = license_key
        params['protocol_version'] = '14'
        params['marshal_format'] = 'json'

        if agent_run_id:
            params['run_id'] = str(agent_run_id)

        headers['User-Agent'] = USER_AGENT
        headers['Content-Encoding'] = 'identity'

        data = json_encode(payload)

        log_id = _log_request(url, params, headers, data)

        # Now create the fake responses so the agent still runs okay.

        result = _developer_mode_responses[method]

        if method == 'connect':
            settings = global_settings()
            if settings.high_security:
                result = dict(result)
                result['high_security'] = True

        # Even though they are always fake responses, still log them.

        if log_id is not None:
            _log_response(log_id, dict(return_value=result))

        return result
コード例 #46
0
    def slow_sql_node(self, stats, root):
        settings = global_settings()

        product = self.product
        operation = self.operation or 'other'
        target = self.target

        if 'database.instrumentation.r1' in settings.feature_flag:
            if operation in ('select', 'update', 'insert', 'delete'):
                if target:
                    name = 'Database/%s/%s' % (target, operation)
                else:
                    name = 'Database/%s' % operation
            elif operation in ('show',):
                name = 'Database/%s' % operation
            else:
                name = 'Database/other/sql'
        else:
            if target:
                name = 'Datastore/statement/%s/%s/%s' % (product, target,
                        operation)
            else:
                name = 'Datastore/operation/%s/%s' % (product, operation)

        request_uri = ''
        if root.type == 'WebTransaction':
            request_uri = root.request_uri

        # Note that we do not limit the length of the SQL at this
        # point as we will need the whole SQL query when doing an
        # explain plan. Only limit the length when sending the
        # formatted SQL up to the data collector.

        return SlowSqlNode(duration=self.duration, path=root.path,
                request_uri=request_uri, sql=self.sql,
                sql_format=self.sql_format, metric=name,
                dbapi2_module=self.dbapi2_module,
                stack_trace=self.stack_trace,
                connect_params=self.connect_params,
                cursor_params=self.cursor_params,
                sql_parameters=self.sql_parameters,
                execute_params=self.execute_params)
コード例 #47
0
ファイル: data_collector.py プロジェクト: uceo/uceo-2015
def create_session(license_key, app_name, linked_applications,
        environment, settings):

    _global_settings = global_settings()

    if _global_settings.developer_mode:
        session = DeveloperModeSession.create_session(license_key, app_name,
                linked_applications, environment, settings)
    else:
        session = ApplicationSession.create_session(license_key, app_name,
                linked_applications, environment, settings)

    # When session creation is unsucessful None is returned. We need to catch
    # that and return None. Session creation can fail if data-collector is down
    # or if the configuration is wrong, such as having the capture_params true
    # in high security mode.

    if session is None:
        return None

    # We now need to send up the final merged configuration using the
    # agent_settings() method. We must make sure we pass the
    # configuration through global_settings_dump() to strip/mask any
    # sensitive settings. We also convert values which are strings or
    # numerics to strings before sending to avoid problems with UI
    # interpreting the values strangely if sent as native types.

    application_settings = global_settings_dump(session.configuration)

    for key, value in list(six.iteritems(application_settings)):
        if not isinstance(key, six.string_types):
            del application_settings[key]

        if (not isinstance(value, six.string_types) and
                not isinstance(value, float) and
                not isinstance(value, six.integer_types)):
            application_settings[key] = repr(value)

    session.agent_settings(application_settings)

    return session
コード例 #48
0
ファイル: data_collector.py プロジェクト: GbalsaC/bitnamiP
def proxy_server():
    """Returns the dictionary of proxy server settings to be supplied to
    the 'requests' library when making requests.

    """

    # For backward compatibility from when using requests prior to 2.0.0,
    # we take the proxy_scheme as not being set to mean that we should
    # derive it from whether SSL is being used. This will still be overridden
    # if the proxy scheme was defined as part of proxy URL in proxy_host.

    settings = global_settings()

    ssl = settings.ssl
    proxy_scheme = settings.proxy_scheme

    if proxy_scheme is None:
        proxy_scheme = ssl and 'https' or 'http'

    return proxy_details(proxy_scheme, settings.proxy_host,
            settings.proxy_port, settings.proxy_user, settings.proxy_pass)
コード例 #49
0
def _obfuscate_explain_plan_postgresql(columns, rows, mask=None):
    settings = global_settings()

    if mask is None:
        mask = (settings.debug.explain_plan_obfuscation == 'simple')

    # Only deal with where we get back the one expected column. If we
    # get more than one column just ignore the whole explain plan. Need
    # to confirm whether we would always definitely only get one column.
    # The reason we do this is that swapping the value of quoted strings
    # could result in the collapsing of multiple rows and in that case
    # not sure what we would do with values from any other columns.

    if len(columns) != 1:
        return None

    # We need to join together all the separate rows of the explain plan
    # back together again. This is because an embedded newline within
    # any text quoted from the original SQL can result in that line of
    # the explain plan being split across multiple rows.

    text = '\n'.join(item[0] for item in rows)

    # Now need to perform the replacements on the complete text of the
    # explain plan.

    text = _obfuscate_explain_plan_postgresql_substitute(text, mask)

    # The mask option dictates whether we use the slightly more aggresive
    # obfuscation and simply mask out any line preceded by a label.

    if mask:
        text = _explain_plan_postgresql_re_2.sub('\g<label>?', text)

    # Now regenerate the list of rows by splitting again on newline.

    rows = [(_,) for _ in text.split('\n')]

    return columns, rows
コード例 #50
0
ファイル: application.py プロジェクト: Mause/table_select_web
    def validate_process(self):
        """Logs a warning message if called in a process different to
        where the application was registered. Only logs a message the
        first time this is detected for current active session.

        """

        process_id = os.getpid()

        # Detect where potentially trying to record any data in a
        # process different to where the harvest thread was created.
        # Note that this only works for the case where a section had
        # been activated prior to the process being forked.

        if self._process_id and process_id != self._process_id:
            _logger.warning('Attempt to reactivate application or record '
                    'transactions in a process different to where the '
                    'agent was already registered for application %r. No '
                    'data will be reported for this process with pid of '
                    '%d. Registration of the agent for this application '
                    'occurred in process with pid %d. If no data at all '
                    'is being reported for your application, then please '
                    'report this problem to New Relic support for further '
                    'investigation.', self._app_name, process_id,
                    self._process_id)

            settings = global_settings()

            if settings.debug.log_agent_initialization:
                _logger.info('Process validation check was triggered '
                        'from: %r', ''.join(traceback.format_stack()[:-1]))
            else:
                _logger.debug('Process validation check was triggered '
                        'from: %r', ''.join(traceback.format_stack()[:-1]))

            # We now zero out the process ID so we know we have already
            # generated a warning message.

            self._process_id = 0
コード例 #51
0
    def trace_node(self, stats, root, connections):
        settings = global_settings()

        product = self.product
        operation = self.operation or 'other'
        target = self.target

        if 'database.instrumentation.r1' in settings.feature_flag:
            if operation in ('select', 'update', 'insert', 'delete'):
                if target:
                    name = 'Database/%s/%s' % (target, operation)
                else:
                    name = 'Database/%s' % operation
            elif operation in ('show',):
                name = 'Database/%s' % operation
            else:
                name = 'Database/other/sql'
        else:
            if target:
                name = 'Datastore/statement/%s/%s/%s' % (product, target,
                        operation)
            else:
                name = 'Datastore/operation/%s/%s' % (product, operation)

        name = root.string_table.cache(name)

        start_time = newrelic.core.trace_node.node_start_time(root, self)
        end_time = newrelic.core.trace_node.node_end_time(root, self)

        children = []

        root.trace_node_count += 1

        params = {}

        sql = self.formatted

        if sql:
            # Limit the length of any SQL that is reported back.

            limit = root.settings.agent_limits.sql_query_length_maximum

            params['sql'] = root.string_table.cache(sql[:limit])

            if self.stack_trace:
                params['backtrace'] = [root.string_table.cache(x) for x in
                        self.stack_trace]

            # Only perform an explain plan if this node ended up being
            # flagged to have an explain plan. This is applied when cap
            # on number of explain plans for whole harvest period is
            # applied across all transaction traces just prior to the
            # transaction traces being generated.

            if getattr(self, 'generate_explain_plan', None):
                explain_plan_data = explain_plan(connections,
                        self.statement, self.connect_params,
                        self.cursor_params, self.sql_parameters,
                        self.execute_params, self.sql_format)

                if explain_plan_data:
                    params['explain_plan'] = explain_plan_data

        return newrelic.core.trace_node.TraceNode(start_time=start_time,
                end_time=end_time, name=name, params=params, children=children,
                label=None)
コード例 #52
0
def _explain_plan(connections, sql, database, connect_params, cursor_params,
        sql_parameters, execute_params):

    query = '%s %s' % (database.explain_query, sql)

    settings = global_settings()

    if settings.debug.log_explain_plan_queries:
        _logger.debug('Executing explain plan for %r on %r.', query,
                database.client)

    try:
        args, kwargs = connect_params
        connection = connections.connection(database, args, kwargs)

        if cursor_params is not None:
            args, kwargs = cursor_params
            cursor = connection.cursor(args, kwargs)
        else:
            cursor = connection.cursor()

        if execute_params is not None:
            args, kwargs = execute_params
        else:
            args, kwargs = ((), {})

        # If sql_parameters is None them args would need
        # to be an empty sequence. Don't pass it just in
        # case it wasn't for some reason, and only supply
        # kwargs. Right now the only time we believe that
        # passing in further params is needed is with
        # oursql cursor execute() method, which has
        # proprietary arguments outside of the DBAPI2
        # specification.

        if sql_parameters is not None:
            cursor.execute(query, sql_parameters, *args, **kwargs)
        else:
            cursor.execute(query, **kwargs)

        columns = []

        if cursor.description:
            for column in cursor.description:
                columns.append(column[0])

        rows = cursor.fetchall()

        if not columns and not rows:
            return None

        return (columns, rows)

    except Exception:
        if settings.debug.log_explain_plan_queries:
            _logger.exception('Error occurred when executing explain '
                    'plan for %r on %r where cursor_params=%r and '
                    'execute_params=%r.', query, database.client,
                    cursor_params, execute_params)

    return None
コード例 #53
0
ファイル: data_collector.py プロジェクト: GbalsaC/bitnamiP
def create_session(license_key, app_name, linked_applications,
        environment, settings):

    _global_settings = global_settings()

    if _global_settings.developer_mode:
        session = DeveloperModeSession.create_session(license_key, app_name,
                linked_applications, environment, settings)
    else:
        session = ApplicationSession.create_session(license_key, app_name,
                linked_applications, environment, settings)

    # When session creation is unsucessful None is returned. We need to catch
    # that and return None. Session creation can fail if data-collector is down
    # or if the configuration is wrong, such as having the capture_params true
    # in high security mode.

    if session is None:
        return None

    # We now need to send up the final merged configuration using the
    # agent_settings() method. We must make sure we pass the
    # configuration through global_settings_dump() to strip/mask any
    # sensitive settings. We also convert values which are strings or
    # numerics to strings before sending to avoid problems with UI
    # interpreting the values strangely if sent as native types.

    application_settings = global_settings_dump(session.configuration)

    for key, value in list(six.iteritems(application_settings)):
        if not isinstance(key, six.string_types):
            del application_settings[key]

        if (not isinstance(value, six.string_types) and
                not isinstance(value, float) and
                not isinstance(value, six.integer_types)):
            application_settings[key] = repr(value)

    try:
        session.agent_settings(application_settings)

    except NetworkInterfaceException:
        # The reason for errors of this type have already been logged.
        # No matter what the error we just pass back None. The upper
        # layer will deal with not being successful.

        _logger.warning('Agent registration failed due to error in '
                'uploading agent settings. Registration should retry '
                'automatically.')

        pass

    except Exception:
        # Any other errors are going to be unexpected and likely will
        # indicate an issue with the implementation of the agent.

        _logger.exception('Unexpected exception when attempting to '
                'update agent settings with the data collector. Please '
                'report this problem to New Relic support for further '
                'investigation.')

        _logger.warning('Agent registration failed due to error in '
                'uploading agent settings. Registration should retry '
                'automatically.')

        pass

    else:
        return session
コード例 #54
0
ファイル: data_collector.py プロジェクト: GbalsaC/bitnamiP
def _log_request(url, params, headers, data):
    settings = global_settings()

    if not settings.audit_log_file:
        return

    global _audit_log_fp

    if not _audit_log_fp:
        log_file = settings.audit_log_file
        try:
            _audit_log_fp = open(log_file, 'a')
        except Exception:
            _logger.exception('Unable to open audit log file %r.', log_file)
            settings.audit_log_file = None
            return

    global _audit_log_id

    _audit_log_id += 1

    print('TIME: %r' % time.strftime('%Y-%m-%d %H:%M:%S',
            time.localtime()), file=_audit_log_fp)
    print(file=_audit_log_fp)
    print('ID: %r' % _audit_log_id, file=_audit_log_fp)
    print(file=_audit_log_fp)
    print('PID: %r' % os.getpid(), file=_audit_log_fp)
    print(file=_audit_log_fp)
    print('URL: %r' % url, file=_audit_log_fp)
    print(file=_audit_log_fp)
    print('PARAMS: %r' % params, file=_audit_log_fp)
    print(file=_audit_log_fp)
    print('HEADERS: %r' % headers, file=_audit_log_fp)
    print(file=_audit_log_fp)
    print('DATA:', end=' ', file=_audit_log_fp)

    if headers.get('Content-Encoding') == 'deflate':
        data = zlib.decompress(data)

    object_from_json = json_decode(data)

    pprint(object_from_json, stream=_audit_log_fp)

    if params.get('method') == 'transaction_sample_data':
        for i, sample in enumerate(object_from_json[1]):
            field_as_json = unpack_field(sample[4])
            print(file=_audit_log_fp)
            print('DATA[1][%d][4]:' % i, end=' ', file=_audit_log_fp)
            pprint(field_as_json, stream=_audit_log_fp)

    elif params.get('method') == 'profile_data':
        for i, sample in enumerate(object_from_json[1]):
            field_as_json = unpack_field(sample[4])
            print(file=_audit_log_fp)
            print('DATA[1][%d][4]:' % i, end=' ', file=_audit_log_fp)
            pprint(field_as_json, stream=_audit_log_fp)

    elif params.get('method') == 'sql_trace_data':
        for i, sample in enumerate(object_from_json[0]):
            field_as_json = unpack_field(sample[9])
            print(file=_audit_log_fp)
            print('DATA[0][%d][9]:' % i, end=' ', file=_audit_log_fp)
            pprint(field_as_json, stream=_audit_log_fp)

    print(file=_audit_log_fp)
    print(78*'=', file=_audit_log_fp)
    print(file=_audit_log_fp)

    _audit_log_fp.flush()

    return _audit_log_id
コード例 #55
0
ファイル: data_collector.py プロジェクト: GbalsaC/bitnamiP
def send_request(session, url, method, license_key, agent_run_id=None,
            payload=()):
    """Constructs and sends a request to the data collector."""

    params = {}
    headers = {}
    config = {}

    settings = global_settings()

    start = time.time()

    # Validate that the license key was actually set and if not replace
    # it with a string which makes it more obvious it was not set.

    if not license_key:
        license_key = 'NO LICENSE KEY WAS SET IN AGENT CONFIGURATION'

    # The agent formats requests and is able to handle responses for
    # protocol version 14.

    params['method'] = method
    params['license_key'] = license_key
    params['protocol_version'] = '14'
    params['marshal_format'] = 'json'

    if agent_run_id:
        params['run_id'] = str(agent_run_id)

    headers['User-Agent'] = USER_AGENT
    headers['Content-Encoding'] = 'identity'

    # Set up definitions for proxy server in case that has been set.

    proxies = proxy_server()

    # At this time we use JSON content encoding for the data being sent.
    # If an error does occur when encoding the JSON, then it isn't
    # likely going to work later on in a subsequent request with same
    # data, even if aggregated with other data, so we need to log the
    # details and then flag that data should be thrown away. Don't mind
    # being noisy in the the log in this situation as it would indicate
    # a problem with the implementation of the agent.

    try:
        with InternalTrace('Supportability/Collector/JSON/Encode/%s' % method):
            data = json_encode(payload)

    except Exception:
        _logger.exception('Error encoding data for JSON payload for '
                'method %r with payload of %r. Please report this problem '
                'to New Relic support.', method, payload)

        raise DiscardDataForRequest(str(sys.exc_info()[1]))

    # Log details of call and/or payload for debugging. Use the JSON
    # encoded value so know that what is encoded is correct.

    if settings.debug.log_data_collector_payloads:
        _logger.debug('Calling data collector with url=%r, method=%r and '
                'payload=%r.', url, method, data)
    elif settings.debug.log_data_collector_calls:
        _logger.debug('Calling data collector with url=%r and method=%r.',
                url, method)

    # Compress the serialized JSON being sent as content if over 64KiB
    # in size. If less than 2MB in size compress for speed. If over
    # 2MB then compress for smallest size. This parallels what the Ruby
    # agent does.

    if len(data) > 64*1024:
        headers['Content-Encoding'] = 'deflate'
        level = (len(data) < 2000000) and 1 or 9

        internal_metric('Supportability/Collector/ZLIB/Bytes/%s' % method,
                len(data))

        with InternalTrace('Supportability/Collector/ZLIB/Compress/'
                '%s' % method):
            data = zlib.compress(six.b(data), level)

    # If there is no requests session object provided for making
    # requests create one now. We want to close this as soon as we
    # are done with it.

    auto_close_session = False

    if not session:
        session = requests.session()
        auto_close_session = True

    # The 'requests' library can raise a number of exception derived
    # from 'RequestException' before we even manage to get a connection
    # to the data collector.
    #
    # The data collector can the generate a number of different types of
    # HTTP errors for requests. These are:
    #
    # 400 Bad Request - For incorrect method type or incorrectly
    # construct parameters. We should not get this and if we do it would
    # likely indicate a problem with the implementation of the agent.
    #
    # 413 Request Entity Too Large - Where the request content was too
    # large. The limits on number of nodes in slow transaction traces
    # should in general prevent this, but not everything has size limits
    # and so rogue data could still blow things out. Same data is not
    # going to work later on in a subsequent request, even if aggregated
    # with other data, so we need to log the details and then flag that
    # data should be thrown away.
    #
    # 415 Unsupported Media Type - This occurs when the JSON which was
    # sent can't be decoded by the data collector. If this is a true
    # problem with the JSON formatting, then sending again, even if
    # aggregated with other data, may not work, so we need to log the
    # details and then flag that data should be thrown away.
    #
    # 503 Service Unavailable - This occurs when data collector, or core
    # application is being restarted and not in state to be able to
    # accept requests. It should be a transient issue so should be able
    # to retain data and try again.

    internal_metric('Supportability/Collector/Output/Bytes/%s' % method,
            len(data))

    # If audit logging is enabled, log the requests details.

    log_id = _log_request(url, params, headers, data)

    try:
        # The timeout value in the requests module is only on
        # the initial connection and doesn't apply to how long
        # it takes to get back a response.

        cert_loc = certs.where()

        if settings.debug.disable_certificate_validation:
            cert_loc = False

        timeout = settings.agent_limits.data_collector_timeout

        with warnings.catch_warnings():
            warnings.simplefilter("ignore")

            r = session.post(url, params=params, headers=headers,
                    proxies=proxies, timeout=timeout, data=data,
                    verify=cert_loc)

        # Read the content now so we can force close the socket
        # connection if this is a transient session as quickly
        # as possible.

        content = r.content

    except requests.RequestException:
        if not settings.proxy_host or not settings.proxy_port:
            _logger.warning('Data collector is not contactable. This can be '
                    'because of a network issue or because of the data '
                    'collector being restarted. In the event that contact '
                    'cannot be made after a period of time then please '
                    'report this problem to New Relic support for further '
                    'investigation. The error raised was %r.',
                    sys.exc_info()[1])
        else:
            _logger.warning('Data collector is not contactable via the proxy '
                    'host %r on port %r with proxy user of %r. This can be '
                    'because of a network issue or because of the data '
                    'collector being restarted. In the event that contact '
                    'cannot be made after a period of time then please '
                    'report this problem to New Relic support for further '
                    'investigation. The error raised was %r.',
                    settings.proxy_host, settings.proxy_port,
                    settings.proxy_user, sys.exc_info()[1])

        raise RetryDataForRequest(str(sys.exc_info()[1]))

    finally:
        if auto_close_session:
            session.close()
            session = None

    if r.status_code != 200:
        _logger.debug('Received a non 200 HTTP response from the data '
                'collector where url=%r, method=%r, license_key=%r, '
                'agent_run_id=%r, params=%r, headers=%r, status_code=%r '
                'and content=%r.', url, method, license_key, agent_run_id,
                params, headers, r.status_code, content)

    if r.status_code == 400:
        _logger.error('Data collector is indicating that a bad '
                'request has been submitted for url %r, headers of %r, '
                'params of %r and payload of %r. Please report this '
                'problem to New Relic support.', url, headers, params,
                payload)

        raise DiscardDataForRequest()

    elif r.status_code == 413:
        _logger.warning('Data collector is indicating that a request for '
                'method %r was received where the request content size '
                'was over the maximum allowed size limit. The length of '
                'the request content was %d. If this keeps occurring on a '
                'regular basis, please report this problem to New Relic '
                'support for further investigation.', method, len(data))

        raise DiscardDataForRequest()

    elif r.status_code == 415:
        _logger.warning('Data collector is indicating that it was sent '
                'malformed JSON data for method %r. If this keeps occurring '
                'on a regular basis, please report this problem to New '
                'Relic support for further investigation.', method)

        if settings.debug.log_malformed_json_data:
            if headers['Content-Encoding'] == 'deflate':
                data = zlib.decompress(data)

            _logger.info('JSON data which was rejected by the data '
                    'collector was %r.', data)

        raise DiscardDataForRequest(content)

    elif r.status_code == 503:
        _logger.warning('Data collector is unavailable. This can be a '
                'transient issue because of the data collector or our '
                'core application being restarted. If the issue persists '
                'it can also be indicative of a problem with our servers. '
                'In the event that availability of our servers is not '
                'restored after a period of time then please report this '
                'problem to New Relic support for further investigation.')

        raise ServerIsUnavailable()

    elif r.status_code != 200:
        if not settings.proxy_host or not settings.proxy_port:
            _logger.warning('An unexpected HTTP response was received from '
                    'the data collector of %r for method %r. The payload for '
                    'the request was %r. If this issue persists then please '
                    'report this problem to New Relic support for further '
                    'investigation.', r.status_code, method, payload)
        else:
            _logger.warning('An unexpected HTTP response was received from '
                    'the data collector of %r for method %r while connecting '
                    'via proxy host %r on port %r with proxy user of %r. '
                    'The payload for the request was %r. If this issue '
                    'persists then please report this problem to New Relic '
                    'support for further investigation.', r.status_code,
                    method, settings.proxy_host, settings.proxy_port,
                    settings.proxy_user, payload)

        raise DiscardDataForRequest()

    # Log details of response payload for debugging. Use the JSON
    # encoded value so know that what original encoded value was.

    duration = time.time() - start

    if settings.debug.log_data_collector_payloads:
        _logger.debug('Valid response from data collector after %.2f '
                'seconds with content=%r.', duration, content)
    elif settings.debug.log_data_collector_calls:
        _logger.debug('Valid response from data collector after %.2f '
                'seconds.', duration)

    # If we got this far we should have a legitimate response from the
    # data collector. The response is JSON so need to decode it.

    internal_metric('Supportability/Collector/Input/Bytes/%s' % method,
            len(content))

    try:
        with InternalTrace('Supportability/Collector/JSON/Decode/%s' % method):
            if six.PY3:
                content = content.decode('UTF-8')

            result = json_decode(content)

    except Exception:
        _logger.exception('Error decoding data for JSON payload for '
                'method %r with payload of %r. Please report this problem '
                'to New Relic support.', method, content)

        if settings.debug.log_malformed_json_data:
            _logger.info('JSON data received from data collector which '
                    'could not be decoded was %r.', content)

        raise DiscardDataForRequest(str(sys.exc_info()[1]))

    # The decoded JSON can be either for a successful response or an
    # error. A successful response has a 'return_value' element and on
    # error an 'exception' element.

    if log_id is not None:
        _log_response(log_id, result)

    if 'return_value' in result:
        return result['return_value']

    error_type = result['exception']['error_type']
    message = result['exception']['message']

    # Now need to check for server side exceptions. The following
    # exceptions can occur for abnormal events.

    _logger.debug('Received an exception from the data collector where '
            'url=%r, method=%r, license_key=%r, agent_run_id=%r, params=%r, '
            'headers=%r, error_type=%r and message=%r', url, method,
            license_key, agent_run_id, params, headers, error_type,
            message)

    if error_type == 'NewRelic::Agent::LicenseException':
        _logger.error('Data collector is indicating that an incorrect '
                'license key has been supplied by the agent. The value '
                'which was used by the agent is %r. Please correct any '
                'problem with the license key or report this problem to '
                'New Relic support.', license_key)

        raise DiscardDataForRequest(message)

    elif error_type == 'NewRelic::Agent::PostTooBigException':
        _logger.warning('Core application is indicating that a request for '
                'method %r was received where the request content size '
                'was over the maximum allowed size limit. The length of '
                'the request content was %d. If this keeps occurring on a '
                'regular basis, please report this problem to New Relic '
                'support for further investigation.', method, len(data))

        raise DiscardDataForRequest(message)

    # Server side exceptions are also used to inform the agent to
    # perform certain actions such as restart when server side
    # configuration has changed for this application or when agent is
    # being disabled remotely for some reason.

    if error_type == 'NewRelic::Agent::ForceRestartException':
        _logger.info('An automatic internal agent restart has been '
                'requested by the data collector for the application '
                'where the agent run was %r. The reason given for the '
                'forced restart is %r.', agent_run_id, message)

        raise ForceAgentRestart(message)

    elif error_type == 'NewRelic::Agent::ForceDisconnectException':
        _logger.critical('Disconnection of the agent has been requested by '
                'the data collector for the application where the '
                'agent run was %r. The reason given for the forced '
                'disconnection is %r. Please contact New Relic support '
                'for further information.', agent_run_id, message)

        raise ForceAgentDisconnect(message)

    # We received an unexpected server side error we don't know what
    # to do with.

    _logger.warning('An unexpected server error was received from the '
            'data collector for method %r with payload of %r. The error '
            'was of type %r with message %r. If this issue persists '
            'then please report this problem to New Relic support for '
            'further investigation.', method, payload, error_type, message)

    raise DiscardDataForRequest(message)
コード例 #56
0
    def profile_data(self):

        # Generic profiling sessions have to wait for completion before
        # reporting data.
        #
        # Xray profile session can send partial profile data on every harvest.

        if ((self.profiler_type == SessionType.GENERIC) and
                (self.state == SessionState.RUNNING)):
            return None

        # We prune the number of nodes sent if we are over the specified
        # limit. This is just to avoid having the response be too large
        # and get rejected by the data collector.

        settings = global_settings()
        self._prune_call_trees(settings.agent_limits.thread_profiler_nodes)

        flat_tree = {}
        thread_count = 0

        for category, bucket in six.iteritems(self.call_buckets):

            # Only flatten buckets that have data in them. No need to send
            # empty buckets.

            if bucket:
                flat_tree[category] = [x.flatten() for x in bucket.values()]
                thread_count += len(bucket)

        # If no profile data was captured for an x-ray session return None
        # instead of sending an encoded empty data-structure. For a generic
        # profiler continue to send an empty tree. This can happen on a system
        # that uses green threads (coroutines), so sending an empty tree marks
        # the end of a profile session. If we don't send anything then the UI
        # timesout after a very long time (~15mins) which is frustrating for
        # the customer.

        if (thread_count == 0) and (self.profiler_type == SessionType.XRAY):
            return None

        # Construct the actual final data for sending. The actual call
        # data is turned into JSON, compessed and then base64 encoded at
        # this point to cut its size.

        if settings.debug.log_thread_profile_payload:
            _logger.debug('Encoding thread profile data where '
                    'payload=%r.', flat_tree)

        json_call_tree = json_encode(flat_tree)

        level = settings.agent_limits.data_compression_level
        level = level or zlib.Z_DEFAULT_COMPRESSION

        encoded_tree = base64.standard_b64encode(
                zlib.compress(six.b(json_call_tree), level))

        if six.PY3:
            encoded_tree = encoded_tree.decode('Latin-1')

        profile = [[self.profile_id, self.start_time_s * 1000,
            (self.actual_stop_time_s or time.time()) * 1000, self.sample_count,
            encoded_tree, thread_count, 0, self.xray_id]]

        # Reset the datastructures to default. For xray profile sessions we
        # report the partial call tree at every harvest cycle. It is required
        # to reset the datastructures to avoid aggregating the call trees
        # across harvest cycles.

        self.reset_profile_data()
        return profile
コード例 #57
0
    def profile_data(self):

        # Generic profiling sessions have to wait for completion before
        # reporting data.
        #
        # Xray profile session can send partial profile data on every harvest. 

        if ((self.profiler_type == SessionType.GENERIC) and 
                (self.state == SessionState.RUNNING)):
            return None

        # We prune the number of nodes sent if we are over the specified
        # limit. This is just to avoid having the response be too large
        # and get rejected by the data collector.

        settings = global_settings()
        self._prune_call_trees(settings.agent_limits.thread_profiler_nodes)

        flat_tree = {}
        thread_count = 0

        for category, bucket in self.call_buckets.items():

            # Only flatten buckets that have data in them. No need to send
            # empty buckets.

            if bucket:
                flat_tree[category] = [x.flatten() for x in bucket.values()]
                thread_count += len(bucket)

        # If no profile data was captured return None instead of sending an
        # encoded empty data-structure 

        if thread_count == 0:
            return None

        # Construct the actual final data for sending. The actual call
        # data is turned into JSON, compessed and then base64 encoded at
        # this point to cut its size.

        _logger.debug('Returning partial thread profiling data '
                'for %d transactions with name %r and xray ID of '
                '%r over a period of %.2f seconds and %d samples.',
                self.transaction_count, self.key_txn, self.xray_id,
                time.time()-self.start_time_s, self.sample_count)

        if settings.debug.log_thread_profile_payload:
            _logger.debug('Encoding thread profile data where '
                    'payload=%r.', flat_tree)

        json_call_tree = simplejson.dumps(flat_tree, ensure_ascii=True,
                encoding='Latin-1', namedtuple_as_object=False)
        encoded_tree = base64.standard_b64encode(zlib.compress(json_call_tree))

        profile = [[self.profile_id, self.start_time_s*1000,
            (self.actual_stop_time_s or time.time()) * 1000, self.sample_count,
            encoded_tree, thread_count, 0, self.xray_id]]

        # Reset the datastructures to default. For xray profile sessions we
        # report the partial call tree at every harvest cycle. It is required
        # to reset the datastructures to avoid aggregating the call trees
        # across harvest cycles.

        self.reset_profile_data()
        return profile
コード例 #58
0
ファイル: application.py プロジェクト: Mause/table_select_web
    def connect_to_data_collector(self):
        """Performs the actual registration of the application with the
        data collector if no current active session.

        """

        if self._agent_shutdown:
            return

        if self._active_session:
            return

        if self._detect_deadlock:
            imp.acquire_lock()
            self._deadlock_event.set()
            imp.release_lock()

        # Register the application with the data collector. Any errors
        # that occur will be dealt with by create_session(). The result
        # will either be a session object or None. In the event of a
        # failure to register we will try again, gradually backing off
        # for longer and longer periods as we retry. The retry interval
        # will be capped at 300 seconds.

        retries = [(15, False, False), (15, False, False),
                   (30, False, False), (60, True, False),
                   (120, False, False), (300, False, True),]

        try:
            while not self._active_session:

                self._active_session = create_session(None, self._app_name,
                        self.linked_applications, environment_settings(),
                        global_settings_dump())

                # We were successful, but first need to make sure we do
                # not have any problems with the agent normalization
                # rules provided by the data collector. These could blow
                # up when being compiled if the patterns are broken or
                # use text which conflicts with extensions in Python's
                # regular expression syntax.

                if self._active_session:
                    configuration = self._active_session.configuration

                    try:
                        settings = global_settings()

                        if settings.debug.log_normalization_rules:
                            _logger.info('The URL normalization rules for '
                                    '%r are %r.', self._app_name,
                                     configuration.url_rules)
                            _logger.info('The metric normalization rules '
                                    'for %r are %r.', self._app_name,
                                     configuration.metric_name_rules)
                            _logger.info('The transaction normalization '
                                    'rules for %r are %r.', self._app_name,
                                     configuration.transaction_name_rules)

                        self._rules_engine['url'] = RulesEngine(
                                configuration.url_rules)
                        self._rules_engine['metric'] = RulesEngine(
                                configuration.metric_name_rules)
                        self._rules_engine['transaction'] = RulesEngine(
                                configuration.transaction_name_rules)

                    except Exception:
                        _logger.exception('The agent normalization rules '
                                'received from the data collector could not '
                                'be compiled properly by the agent due to a '
                                'syntactical error or other problem. Please '
                                'report this to New Relic support for '
                                'investigation.')

                        # For good measure, in this situation we explicitly
                        # shutdown the session as then the data collector
                        # will record this. Ignore any error from this. Then
                        # we discard the session so we go into a retry loop
                        # on presumption that issue with the URL rules will
                        # be fixed.

                        try:
                            self._active_session.shutdown_session()
                        except Exception:
                            pass

                        self._active_session = None

                # Were we successful. If not go into the retry loop. Log
                # warnings or errors as per schedule associated with the
                # retry intervals.

                if not self._active_session:
                    if retries:
                        timeout, warning, error = retries.pop(0)

                        if warning:
                            _logger.warning('Registration of the application '
                                    '%r with the data collector failed after '
                                    'multiple attempts. Check the prior log '
                                    'entries and remedy any issue as '
                                    'necessary, or if the problem persists, '
                                    'report this problem to New Relic '
                                    'support for further investigation.',
                                    self._app_name)

                        elif error:
                            _logger.error('Registration of the application '
                                    '%r with the data collector failed after '
                                    'further additional attempts. Please '
                                    'report this problem to New Relic support '
                                    'for further investigation.',
                                    self._app_name)

                    else:
                        timeout = 300

                    _logger.debug('Retrying registration of the application '
                            '%r with the data collector after a further %d '
                            'seconds.', self._app_name, timeout)

                    time.sleep(timeout)

                    continue

                # Ensure we have cleared out any cached data from a
                # prior agent run for this application.

                configuration = self._active_session.configuration

                with self._stats_lock:
                    self._stats_engine.reset_stats(configuration)

                with self._stats_custom_lock:
                    self._stats_custom_engine.reset_stats(configuration)

                # Record an initial start time for the reporting period and
                # clear record of last transaction processed.

                self._period_start = time.time()

                self._transaction_count = 0
                self._last_transaction = 0.0

                # Clear any prior count of harvest merges due to failures.

                self._merge_count = 0

                # Flag that the session activation has completed to
                # anyone who has been waiting through calling the
                # wait_for_session_activation() method.

                self._connected_event.set()

        except Exception:
            # If an exception occurs after agent has been flagged to be
            # shutdown then we ignore the error. This is because all
            # sorts of wierd errors could occur when main thread start
            # destroying objects and this background thread to register
            # the application is still running.

            if not self._agent_shutdown:
                _logger.exception('Unexpected exception when registering '
                        'agent with the data collector. If this problem '
                        'persists, please report this problem to New Relic '
                        'support for further investigation.')
コード例 #59
0
ファイル: data_collector.py プロジェクト: GbalsaC/bitnamiP
    def create_session(cls, license_key, app_name, linked_applications,
            environment, settings):

        """Registers the agent for the specified application with the data
        collector and retrieves the server side configuration. Returns a
        session object if successful through which subsequent calls to the
        data collector are made. If unsucessful then None is returned.

        """

        start = time.time()

        # If no license key provided in the call, fallback to using that
        # from the agent configuration file or environment variables.
        # Flag an error if the result still seems invalid.

        if not license_key:
            license_key = global_settings().license_key

        if not license_key:
            _logger.error('A valid account license key cannot be found. '
                'Has a license key been specified in the agent configuration '
                'file or via the NEW_RELIC_LICENSE_KEY environment variable?')

        try:
            # First need to ask the primary data collector which of the many
            # data collector instances we should use for this agent run.

            _logger.debug('Connecting to data collector to register agent '
                    'with license_key=%r, app_name=%r, '
                    'linked_applications=%r, environment=%r and settings=%r.',
                    license_key, app_name, linked_applications, environment,
                    settings)

            url = collector_url()
            redirect_host = cls.send_request(None, url, 'get_redirect_host',
                    license_key)

            # Then we perform a connect to the actual data collector host
            # we need to use. All communications after this point should go
            # to the secondary data collector.
            #
            # We use the global requests session object for now as harvest
            # for different applications are all done in turn. We will need
            # to change this if use multiple threads as currently force
            # session object to maintain only single connection to ensure
            # that keep alive is effective.

            app_names = [app_name] + linked_applications

            local_config = {}

            local_config['host'] = socket.gethostname()
            local_config['pid'] = os.getpid()
            local_config['language'] = 'python'
            local_config['app_name'] = app_names
            local_config['identifier'] = ','.join(app_names)
            local_config['agent_version'] = version
            local_config['environment'] = environment

            connect_settings = {}
            security_settings = {}

            connect_settings['browser_monitoring.loader'] = (
                    settings['browser_monitoring.loader'])
            connect_settings['browser_monitoring.debug'] = (
                    settings['browser_monitoring.debug'])

            security_settings['capture_params'] = settings['capture_params']
            security_settings['transaction_tracer'] = {}
            security_settings['transaction_tracer']['record_sql'] = (
                    settings['transaction_tracer.record_sql'])

            local_config['settings'] = connect_settings
            local_config['security_settings'] = security_settings

            local_config['high_security'] = settings['high_security']
            local_config['labels'] = settings['labels']

            display_name = settings['process_host.display_name']

            if display_name is None:
                local_config['display_name'] = local_config['host']
            else:
                local_config['display_name'] = display_name

            payload = (local_config,)

            url = collector_url(redirect_host)
            server_config = cls.send_request(None, url, 'connect',
                    license_key, None, payload)

            # Apply High Security Mode to server_config, so the local
            # security settings won't get overwritten when we overlay
            # the server settings on top of them.

            server_config = apply_high_security_mode_fixups(settings,
                    server_config)

            # The agent configuration for the application in constructed
            # by taking a snapshot of the locally constructed
            # configuration and overlaying it with that from the server.

            application_config = create_settings_snapshot(server_config)

        except NetworkInterfaceException:
            # The reason for errors of this type have already been logged.
            # No matter what the error we just pass back None. The upper
            # layer needs to count how many success times this has failed
            # and escalate things with a more sever error.

            pass

        except Exception:
            # Any other errors are going to be unexpected and likely will
            # indicate an issue with the implementation of the agent.

            _logger.exception('Unexpected exception when attempting to '
                    'register the agent with the data collector. Please '
                    'report this problem to New Relic support for further '
                    'investigation.')

            pass

        else:
            # Everything fine so we create the session object through which
            # subsequent communication with data collector will be done.

            session = cls(url, license_key, application_config)

            duration = time.time() - start

            # Log successful agent registration and any server side messages.

            _logger.info('Successfully registered New Relic Python agent '
                    'where app_name=%r, pid=%r, redirect_host=%r and '
                    'agent_run_id=%r, in %.2f seconds.', app_name,
                    os.getpid(), redirect_host, session.agent_run_id,
                    duration)

            if getattr(application_config, 'high_security', False):
                _logger.info('High Security Mode is being applied to all '
                        'communications between the agent and the data '
                        'collector for this session.')

            logger_func_mapping = {
                'ERROR': _logger.error,
                'WARN': _logger.warning,
                'INFO': _logger.info,
                'VERBOSE': _logger.debug,
            }

            if 'messages' in server_config:
                for item in server_config['messages']:
                    message = item['message']
                    level = item['level']
                    logger_func = logger_func_mapping.get(level, None)
                    if logger_func:
                        logger_func('%s', message)

            return session