Exemplo n.º 1
0
def test_passes_parent_sampling_decision_in_sampling_context(
    sentry_init, parent_sampling_decision
):
    sentry_init(traces_sample_rate=1.0)

    sentry_trace_header = (
        "12312012123120121231201212312012-1121201211212012-{sampled}".format(
            sampled=int(parent_sampling_decision)
        )
    )

    transaction = Transaction.continue_from_headers(
        headers={"sentry-trace": sentry_trace_header}, name="dogpark"
    )
    spy = mock.Mock(wraps=transaction)
    start_transaction(transaction=spy)

    # there's only one call (so index at 0) and kwargs are always last in a call
    # tuple (so index at -1)
    sampling_context = spy._set_initial_sampling_decision.mock_calls[0][-1][
        "sampling_context"
    ]
    assert "parent_sampled" in sampling_context
    # because we passed in a spy, attribute access requires unwrapping
    assert sampling_context["parent_sampled"]._mock_wraps is parent_sampling_decision
Exemplo n.º 2
0
        def sentry_patched_perform_job(self, job, *args, **kwargs):
            # type: (Any, Job, *Queue, **Any) -> bool
            hub = Hub.current
            integration = hub.get_integration(RqIntegration)

            if integration is None:
                return old_perform_job(self, job, *args, **kwargs)

            client = hub.client
            assert client is not None

            with hub.push_scope() as scope:
                scope.clear_breadcrumbs()
                scope.add_event_processor(_make_event_processor(weakref.ref(job)))

                transaction = Transaction.continue_from_headers(
                    job.meta.get("_sentry_trace_headers") or {},
                    op="rq.task",
                    name="unknown RQ task",
                )

                with capture_internal_exceptions():
                    transaction.name = job.func_name

                with hub.start_transaction(transaction):
                    rv = old_perform_job(self, job, *args, **kwargs)

            if self.is_horse:
                # We're inside of a forked process and RQ is
                # about to call `os._exit`. Make sure that our
                # events get sent out.
                client.flush()

            return rv
Exemplo n.º 3
0
    def _inner(*args, **kwargs):
        # type: (*Any, **Any) -> Any
        hub = Hub.current
        if hub.get_integration(CeleryIntegration) is None:
            return f(*args, **kwargs)

        with hub.push_scope() as scope:
            scope._name = "celery"
            scope.clear_breadcrumbs()
            scope.add_event_processor(
                _make_event_processor(task, *args, **kwargs))

            transaction = Transaction.continue_from_headers(
                args[3].get("headers") or {},
                op="celery.task",
                name="unknown celery task",
            )

            # Could possibly use a better hook than this one
            transaction.set_status("ok")

            with capture_internal_exceptions():
                # Celery task objects are not a thing to be trusted. Even
                # something such as attribute access can fail.
                transaction.name = task.name

            with hub.start_transaction(transaction):
                return f(*args, **kwargs)
Exemplo n.º 4
0
    def wrapper(*args, **kwargs):
        flow_id = data = None

        try:
            if isinstance(args[0], (str, bytes)):
                try:
                    data = json.loads(args[0])

                except json.JSONDecodeError:
                    pass

            elif isinstance(args[0], dict):
                data = args[0]

            if data:
                flow_id = data.get('metadata', {}).get('flow_id')

        except IndexError:
            pass

        transaction = Transaction.continue_from_headers(
            {'sentry-trace': flow_id},
            op='message_handler',
            name=transaction_name or f'{func.__module__}.{func.__name__}',
        )
        with Hub.current.start_transaction(transaction):
            result = func(*args, **kwargs)

        _logger.debug(
            "Logged message handling with trace_id=%s, span_id=%s, id=%s",
            transaction.trace_id, transaction.span_id, last_event_id())
        return result
Exemplo n.º 5
0
def _handle_request_impl(self):
    # type: (RequestHandler) -> Generator[None, None, None]
    hub = Hub.current
    integration = hub.get_integration(TornadoIntegration)

    if integration is None:
        yield

    weak_handler = weakref.ref(self)

    with Hub(hub) as hub:
        with hub.configure_scope() as scope:
            scope.clear_breadcrumbs()
            processor = _make_event_processor(weak_handler)  # type: ignore
            scope.add_event_processor(processor)

        transaction = Transaction.continue_from_headers(
            self.request.headers,
            op="http.server",
            # Like with all other integrations, this is our
            # fallback transaction in case there is no route.
            # sentry_urldispatcher_resolve is responsible for
            # setting a transaction name later.
            name="generic Tornado request",
        )

        with hub.start_transaction(
            transaction, custom_sampling_context={"tornado_request": self.request}
        ):
            yield
def test_continue_from_headers(sentry_init, capture_events, sampled,
                               sample_rate):
    """
    Ensure data is actually passed along via headers, and that they are read
    correctly.
    """
    sentry_init(traces_sample_rate=sample_rate)
    events = capture_events()

    # make a parent transaction (normally this would be in a different service)
    with start_transaction(
            name="hi",
            sampled=True if sample_rate == 0 else None) as parent_transaction:
        with start_span() as old_span:
            old_span.sampled = sampled
            headers = dict(
                Hub.current.iter_trace_propagation_headers(old_span))
            tracestate = parent_transaction._sentry_tracestate

    # child transaction, to prove that we can read 'sentry-trace' and
    # `tracestate` header data correctly
    child_transaction = Transaction.continue_from_headers(headers,
                                                          name="WRONG")
    assert child_transaction is not None
    assert child_transaction.parent_sampled == sampled
    assert child_transaction.trace_id == old_span.trace_id
    assert child_transaction.same_process_as_parent is False
    assert child_transaction.parent_span_id == old_span.span_id
    assert child_transaction.span_id != old_span.span_id
    assert child_transaction._sentry_tracestate == tracestate

    # add child transaction to the scope, to show that the captured message will
    # be tagged with the trace id (since it happens while the transaction is
    # open)
    with start_transaction(child_transaction):
        with configure_scope() as scope:
            # change the transaction name from "WRONG" to make sure the change
            # is reflected in the final data
            scope.transaction = "ho"
        capture_message("hello")

    # in this case the child transaction won't be captured
    if sampled is False or (sample_rate == 0 and sampled is None):
        trace1, message = events

        assert trace1["transaction"] == "hi"
    else:
        trace1, message, trace2 = events

        assert trace1["transaction"] == "hi"
        assert trace2["transaction"] == "ho"

        assert (trace1["contexts"]["trace"]["trace_id"] ==
                trace2["contexts"]["trace"]["trace_id"] ==
                child_transaction.trace_id ==
                message["contexts"]["trace"]["trace_id"])

    assert message["message"] == "hello"
Exemplo n.º 7
0
    def sentry_handler(event, context, *args, **kwargs):
        # type: (Any, Any, *Any, **Any) -> Any
        hub = Hub.current
        integration = hub.get_integration(AwsLambdaIntegration)
        if integration is None:
            return handler(event, context, *args, **kwargs)

        # If an integration is there, a client has to be there.
        client = hub.client  # type: Any
        configured_time = context.get_remaining_time_in_millis()

        with hub.push_scope() as scope:
            with capture_internal_exceptions():
                scope.clear_breadcrumbs()
                scope.add_event_processor(
                    _make_request_event_processor(event, context,
                                                  configured_time))
                scope.set_tag("aws_region",
                              context.invoked_function_arn.split(":")[3])

                timeout_thread = None
                # Starting the Timeout thread only if the configured time is greater than Timeout warning
                # buffer and timeout_warning parameter is set True.
                if (integration.timeout_warning
                        and configured_time > TIMEOUT_WARNING_BUFFER):
                    waiting_time = (configured_time -
                                    TIMEOUT_WARNING_BUFFER) / MILLIS_TO_SECONDS

                    timeout_thread = TimeoutThread(
                        waiting_time,
                        configured_time / MILLIS_TO_SECONDS,
                    )

                    # Starting the thread to raise timeout warning exception
                    timeout_thread.start()

            headers = event.get("headers", {})
            transaction = Transaction.continue_from_headers(
                headers, op="serverless.function", name=context.function_name)
            with hub.start_transaction(transaction):
                try:
                    return handler(event, context, *args, **kwargs)
                except Exception:
                    exc_info = sys.exc_info()
                    event, hint = event_from_exception(
                        exc_info,
                        client_options=client.options,
                        mechanism={
                            "type": "aws_lambda",
                            "handled": False
                        },
                    )
                    hub.capture_event(event, hint=hint)
                    reraise(*exc_info)
                finally:
                    if timeout_thread:
                        timeout_thread.stop()
def test_continue_from_headers(sentry_init, capture_events, sampled, sample_rate):
    sentry_init(traces_sample_rate=sample_rate)
    events = capture_events()

    # make a parent transaction (normally this would be in a different service)
    with start_transaction(name="hi", sampled=True if sample_rate == 0 else None):
        with start_span() as old_span:
            old_span.sampled = sampled
            headers = dict(Hub.current.iter_trace_propagation_headers(old_span))

    # test that the sampling decision is getting encoded in the header correctly
    header = headers["sentry-trace"]
    if sampled is True:
        assert header.endswith("-1")
    if sampled is False:
        assert header.endswith("-0")
    if sampled is None:
        assert header.endswith("-")

    # child transaction, to prove that we can read 'sentry-trace' header data
    # correctly
    transaction = Transaction.continue_from_headers(headers, name="WRONG")
    assert transaction is not None
    assert transaction.parent_sampled == sampled
    assert transaction.trace_id == old_span.trace_id
    assert transaction.same_process_as_parent is False
    assert transaction.parent_span_id == old_span.span_id
    assert transaction.span_id != old_span.span_id

    # add child transaction to the scope, to show that the captured message will
    # be tagged with the trace id (since it happens while the transaction is
    # open)
    with start_transaction(transaction):
        with configure_scope() as scope:
            scope.transaction = "ho"
        capture_message("hello")

    if sampled is False or (sample_rate == 0 and sampled is None):
        trace1, message = events

        assert trace1["transaction"] == "hi"
    else:
        trace1, message, trace2 = events

        assert trace1["transaction"] == "hi"
        assert trace2["transaction"] == "ho"

        assert (
            trace1["contexts"]["trace"]["trace_id"]
            == trace2["contexts"]["trace"]["trace_id"]
            == transaction.trace_id
            == message["contexts"]["trace"]["trace_id"]
        )

    assert message["message"] == "hello"
Exemplo n.º 9
0
    async def _run_app(self, scope, callback):
        # type: (Any, Any) -> Any
        is_recursive_asgi_middleware = _asgi_middleware_applied.get(False)

        if is_recursive_asgi_middleware:
            try:
                return await callback()
            except Exception as exc:
                _capture_exception(Hub.current, exc)
                raise exc from None

        _asgi_middleware_applied.set(True)
        try:
            hub = Hub(Hub.current)
            with auto_session_tracking(hub, session_mode="request"):
                with hub:
                    with hub.configure_scope() as sentry_scope:
                        sentry_scope.clear_breadcrumbs()
                        sentry_scope._name = "asgi"
                        processor = partial(self.event_processor,
                                            asgi_scope=scope)
                        sentry_scope.add_event_processor(processor)

                    ty = scope["type"]

                    if ty in ("http", "websocket"):
                        transaction = Transaction.continue_from_headers(
                            self._get_headers(scope),
                            op="{}.server".format(ty),
                        )
                    else:
                        transaction = Transaction(op="asgi.server")

                    transaction.name = _DEFAULT_TRANSACTION_NAME
                    transaction.set_tag("asgi.type", ty)

                    with hub.start_transaction(
                            transaction,
                            custom_sampling_context={"asgi_scope": scope}):
                        # XXX: Would be cool to have correct span status, but we
                        # would have to wrap send(). That is a bit hard to do with
                        # the current abstraction over ASGI 2/3.
                        try:
                            return await callback()
                        except Exception as exc:
                            _capture_exception(hub, exc)
                            raise exc from None
        finally:
            _asgi_middleware_applied.set(False)
Exemplo n.º 10
0
def test_continue_from_headers(sentry_init, capture_events, sampled):
    sentry_init(traces_sample_rate=1.0, traceparent_v2=True)
    events = capture_events()

    with start_transaction(name="hi"):
        with start_span() as old_span:
            old_span.sampled = sampled
            headers = dict(Hub.current.iter_trace_propagation_headers())

    header = headers["sentry-trace"]
    if sampled is True:
        assert header.endswith("-1")
    if sampled is False:
        assert header.endswith("-0")
    if sampled is None:
        assert header.endswith("-")

    transaction = Transaction.continue_from_headers(headers, name="WRONG")
    assert transaction is not None
    assert transaction.sampled == sampled
    assert transaction.trace_id == old_span.trace_id
    assert transaction.same_process_as_parent is False
    assert transaction.parent_span_id == old_span.span_id
    assert transaction.span_id != old_span.span_id

    with start_transaction(transaction):
        with configure_scope() as scope:
            scope.transaction = "ho"
        capture_message("hello")

    if sampled is False:
        trace1, message = events

        assert trace1["transaction"] == "hi"
    else:
        trace1, message, trace2 = events

        assert trace1["transaction"] == "hi"
        assert trace2["transaction"] == "ho"

        assert (
            trace1["contexts"]["trace"]["trace_id"]
            == trace2["contexts"]["trace"]["trace_id"]
            == transaction.trace_id
            == message["contexts"]["trace"]["trace_id"]
        )

    assert message["message"] == "hello"
Exemplo n.º 11
0
    async def wrap(*args, **kwargs):
        with Hub(Hub.current) as hub:
            with hub.push_scope() as scope:
                scope.add_event_processor(_process_ws)
                for key, builder in ext._scopes.items():
                    scope.set_extra(key, await builder())

                txn = Transaction.continue_from_headers(
                    current.request._scope["headers"], op="websocket.server")
                txn.set_tag("asgi.type", "websocket")

                with hub.start_transaction(txn):
                    try:
                        return await dispatch_method(*args, **kwargs)
                    except Exception as exc:
                        _capture_exception(hub, exc)
                        raise
Exemplo n.º 12
0
    def _inner(*args, **kwargs):
        # type: (*Any, **Any) -> Any
        hub = Hub.current
        if hub.get_integration(CeleryIntegration) is None:
            return f(*args, **kwargs)

        with hub.push_scope() as scope:
            scope._name = "celery"
            scope.clear_breadcrumbs()
            scope.add_event_processor(
                _make_event_processor(task, *args, **kwargs))

            transaction = None

            # Celery task objects are not a thing to be trusted. Even
            # something such as attribute access can fail.
            with capture_internal_exceptions():
                transaction = Transaction.continue_from_headers(
                    args[3].get("headers") or {},
                    op="celery.task",
                    name="unknown celery task",
                )

                transaction.name = task.name
                transaction.set_status("ok")

            if transaction is None:
                return f(*args, **kwargs)

            with hub.start_transaction(
                    transaction,
                    custom_sampling_context=
                {
                    "celery_job": {
                        "task": task.name,
                        # for some reason, args[1] is a list if non-empty but a
                        # tuple if empty
                        "args": list(args[1]),
                        "kwargs": args[2],
                    }
                },
            ):
                return f(*args, **kwargs)
Exemplo n.º 13
0
        async def sentry_app_handle(self, request, *args, **kwargs):
            # type: (Any, Request, *Any, **Any) -> Any
            hub = Hub.current
            if hub.get_integration(AioHttpIntegration) is None:
                return await old_handle(self, request, *args, **kwargs)

            weak_request = weakref.ref(request)

            with Hub(hub) as hub:
                # Scope data will not leak between requests because aiohttp
                # create a task to wrap each request.
                with hub.configure_scope() as scope:
                    scope.clear_breadcrumbs()
                    scope.add_event_processor(
                        _make_request_processor(weak_request))

                transaction = Transaction.continue_from_headers(
                    request.headers,
                    op="http.server",
                    # If this transaction name makes it to the UI, AIOHTTP's
                    # URL resolver did not find a route or died trying.
                    name="generic AIOHTTP request",
                )
                with hub.start_transaction(
                        transaction,
                        custom_sampling_context={"aiohttp_request": request}):
                    try:
                        response = await old_handle(self, request)
                    except HTTPException as e:
                        transaction.set_http_status(e.status_code)
                        raise
                    except (asyncio.CancelledError, ConnectionResetError):
                        transaction.set_status("cancelled")
                        raise
                    except Exception:
                        # This will probably map to a 500 but seems like we
                        # have no way to tell. Do not set span status.
                        reraise(*_capture_exception(hub))

                    transaction.set_http_status(response.status)
                    return response
Exemplo n.º 14
0
    async def middleware(self, handler: ASGIApp, request: Request,
                         receive: Receive, send: Send):  # type: ignore
        """Capture exceptions to Sentry."""
        hub = Hub(Hub.current)
        with hub.configure_scope() as scope:
            scope.clear_breadcrumbs()
            scope._name = "muffin"
            self.current_scope.set(scope)
            scope.add_event_processor(
                partial(self.processData, request=request))

            with hub.start_transaction(
                    Transaction.continue_from_headers(
                        request.headers, op=f"{request.scope['type']}.muffin"),
                    custom_sampling_context={'asgi_scope': scope}):
                try:
                    return await handler(request, receive, send)

                except Exception as exc:
                    if type(exc) not in self.cfg.ignore_errors:
                        hub.capture_exception(exc)
                    raise exc from None
Exemplo n.º 15
0
    def sentry_func(functionhandler, event, *args, **kwargs):
        # type: (Any, Any, *Any, **Any) -> Any

        hub = Hub.current
        integration = hub.get_integration(GcpIntegration)
        if integration is None:
            return func(functionhandler, event, *args, **kwargs)

        # If an integration is there, a client has to be there.
        client = hub.client  # type: Any

        configured_time = environ.get("FUNCTION_TIMEOUT_SEC")
        if not configured_time:
            logger.debug(
                "The configured timeout could not be fetched from Cloud Functions configuration."
            )
            return func(functionhandler, event, *args, **kwargs)

        configured_time = int(configured_time)

        initial_time = datetime.utcnow()

        with hub.push_scope() as scope:
            with capture_internal_exceptions():
                scope.clear_breadcrumbs()
                scope.add_event_processor(
                    _make_request_event_processor(event, configured_time, initial_time)
                )
                scope.set_tag("gcp_region", environ.get("FUNCTION_REGION"))
                if (
                    integration.timeout_warning
                    and configured_time > TIMEOUT_WARNING_BUFFER
                ):
                    waiting_time = configured_time - TIMEOUT_WARNING_BUFFER

                    timeout_thread = TimeoutThread(waiting_time, configured_time)

                    # Starting the thread to raise timeout warning exception
                    timeout_thread.start()

            headers = {}
            if hasattr(event, "headers"):
                headers = event.headers
            transaction = Transaction.continue_from_headers(
                headers, op="serverless.function", name=environ.get("FUNCTION_NAME", "")
            )
            with hub.start_transaction(transaction):
                try:
                    return func(functionhandler, event, *args, **kwargs)
                except Exception:
                    exc_info = sys.exc_info()
                    event, hint = event_from_exception(
                        exc_info,
                        client_options=client.options,
                        mechanism={"type": "gcp", "handled": False},
                    )
                    hub.capture_event(event, hint=hint)
                    reraise(*exc_info)
                finally:
                    # Flush out the event queue
                    hub.flush()
Exemplo n.º 16
0
    def sentry_handler(aws_event, aws_context, *args, **kwargs):
        # type: (Any, Any, *Any, **Any) -> Any

        # Per https://docs.aws.amazon.com/lambda/latest/dg/python-handler.html,
        # `event` here is *likely* a dictionary, but also might be a number of
        # other types (str, int, float, None).
        #
        # In some cases, it is a list (if the user is batch-invoking their
        # function, for example), in which case we'll use the first entry as a
        # representative from which to try pulling request data. (Presumably it
        # will be the same for all events in the list, since they're all hitting
        # the lambda in the same request.)

        if isinstance(aws_event, list):
            request_data = aws_event[0]
            batch_size = len(aws_event)
        else:
            request_data = aws_event
            batch_size = 1

        if not isinstance(request_data, dict):
            # If we're not dealing with a dictionary, we won't be able to get
            # headers, path, http method, etc in any case, so it's fine that
            # this is empty
            request_data = {}

        hub = Hub.current
        integration = hub.get_integration(AwsLambdaIntegration)
        if integration is None:
            return handler(aws_event, aws_context, *args, **kwargs)

        # If an integration is there, a client has to be there.
        client = hub.client  # type: Any
        configured_time = aws_context.get_remaining_time_in_millis()

        with hub.push_scope() as scope:
            with capture_internal_exceptions():
                scope.clear_breadcrumbs()
                scope.add_event_processor(
                    _make_request_event_processor(request_data, aws_context,
                                                  configured_time))
                scope.set_tag("aws_region",
                              aws_context.invoked_function_arn.split(":")[3])
                if batch_size > 1:
                    scope.set_tag("batch_request", True)
                    scope.set_tag("batch_size", batch_size)

                timeout_thread = None
                # Starting the Timeout thread only if the configured time is greater than Timeout warning
                # buffer and timeout_warning parameter is set True.
                if (integration.timeout_warning
                        and configured_time > TIMEOUT_WARNING_BUFFER):
                    waiting_time = (configured_time -
                                    TIMEOUT_WARNING_BUFFER) / MILLIS_TO_SECONDS

                    timeout_thread = TimeoutThread(
                        waiting_time,
                        configured_time / MILLIS_TO_SECONDS,
                    )

                    # Starting the thread to raise timeout warning exception
                    timeout_thread.start()

            headers = request_data.get("headers", {})
            transaction = Transaction.continue_from_headers(
                headers,
                op="serverless.function",
                name=aws_context.function_name)
            with hub.start_transaction(
                    transaction,
                    custom_sampling_context={
                        "aws_event": aws_event,
                        "aws_context": aws_context,
                    },
            ):
                try:
                    return handler(aws_event, aws_context, *args, **kwargs)
                except Exception:
                    exc_info = sys.exc_info()
                    sentry_event, hint = event_from_exception(
                        exc_info,
                        client_options=client.options,
                        mechanism={
                            "type": "aws_lambda",
                            "handled": False
                        },
                    )
                    hub.capture_event(sentry_event, hint=hint)
                    reraise(*exc_info)
                finally:
                    if timeout_thread:
                        timeout_thread.stop()
Exemplo n.º 17
0
def _start_transaction(**kwargs):
    transaction = Transaction.continue_from_headers(
        dict(Hub.current.iter_trace_propagation_headers()), **kwargs
    )
    transaction.same_process_as_parent = True
    return sentry_sdk.start_transaction(transaction)