def setup_integrations(integrations, with_defaults=True): # type: (List[Integration], bool) -> Dict[str, Integration] """Given a list of integration instances this installs them all. When `with_defaults` is set to `True` then all default integrations are added unless they were already provided before. """ integrations = dict((integration.identifier, integration) for integration in integrations or ()) logger.debug("Setting up integrations (with default = %s)", with_defaults) if with_defaults: for integration_cls in iter_default_integrations(): if integration_cls.identifier not in integrations: instance = integration_cls() integrations[instance.identifier] = instance for identifier, integration in iteritems(integrations): # type: ignore with _installer_lock: if identifier not in _installed_integrations: logger.debug( "Setting up previously not enabled integration %s", identifier) try: type(integration).setup_once() except NotImplementedError: if getattr(integration, "install", None) is not None: logger.warn( "Integration %s: The install method is " "deprecated. Use `setup_once`.", identifier, ) integration.install() else: raise _installed_integrations.add(identifier) for identifier in integrations: logger.debug("Enabling integration %s", identifier) return integrations
def kill(self): # type: () -> None logger.debug("Killing HTTP transport") self._worker.kill()
def sentry_func(functionhandler, event, *args, **kwargs): # type: (Any, Any, *Any, **Any) -> Any hub = Hub.current integration = hub.get_integration(GcpIntegration) if integration is None: return func(functionhandler, event, *args, **kwargs) # If an integration is there, a client has to be there. client = hub.client # type: Any configured_time = environ.get("FUNCTION_TIMEOUT_SEC") if not configured_time: logger.debug( "The configured timeout could not be fetched from Cloud Functions configuration." ) return func(functionhandler, event, *args, **kwargs) configured_time = int(configured_time) initial_time = datetime.utcnow() with hub.push_scope() as scope: with capture_internal_exceptions(): scope.clear_breadcrumbs() scope.add_event_processor( _make_request_event_processor(event, configured_time, initial_time) ) scope.set_tag("gcp_region", environ.get("FUNCTION_REGION")) if ( integration.timeout_warning and configured_time > TIMEOUT_WARNING_BUFFER ): waiting_time = configured_time - TIMEOUT_WARNING_BUFFER timeout_thread = TimeoutThread(waiting_time, configured_time) # Starting the thread to raise timeout warning exception timeout_thread.start() headers = {} if hasattr(event, "headers"): headers = event.headers transaction = Transaction.continue_from_headers( headers, op="serverless.function", name=environ.get("FUNCTION_NAME", "") ) with hub.start_transaction(transaction): try: return func(functionhandler, event, *args, **kwargs) except Exception: exc_info = sys.exc_info() event, hint = event_from_exception( exc_info, client_options=client.options, mechanism={"type": "gcp", "handled": False}, ) hub.capture_event(event, hint=hint) reraise(*exc_info) finally: # Flush out the event queue hub.flush()
def kill(self): logger.debug("Killing HTTP transport") self._worker.kill()
def shutdown(self, timeout, callback=None): logger.debug("Shutting down HTTP transport orderly") if timeout <= 0: self._worker.kill() else: self._worker.shutdown(timeout, callback)
def flush(self, timeout, callback=None): # type: (float, Optional[Any]) -> None logger.debug("Flushing HTTP transport") if timeout > 0: self._worker.flush(timeout, callback)
def _set_initial_sampling_decision(self, sampling_context): # type: (SamplingContext) -> None """ Sets the transaction's sampling decision, according to the following precedence rules: 1. If a sampling decision is passed to `start_transaction` (`start_transaction(name: "my transaction", sampled: True)`), that decision will be used, regardlesss of anything else 2. If `traces_sampler` is defined, its decision will be used. It can choose to keep or ignore any parent sampling decision, or use the sampling context data to make its own decision or to choose a sample rate for the transaction. 3. If `traces_sampler` is not defined, but there's a parent sampling decision, the parent sampling decision will be used. 4. If `traces_sampler` is not defined and there's no parent sampling decision, `traces_sample_rate` will be used. """ hub = self.hub or sentry_sdk.Hub.current client = hub.client options = (client and client.options) or {} transaction_description = "{op}transaction <{name}>".format( op=("<" + self.op + "> " if self.op else ""), name=self.name) # nothing to do if there's no client or if tracing is disabled if not client or not has_tracing_enabled(options): self.sampled = False return # if the user has forced a sampling decision by passing a `sampled` # value when starting the transaction, go with that if self.sampled is not None: return # we would have bailed already if neither `traces_sampler` nor # `traces_sample_rate` were defined, so one of these should work; prefer # the hook if so sample_rate = ( options["traces_sampler"](sampling_context) if callable( options.get("traces_sampler")) else ( # default inheritance behavior sampling_context["parent_sampled"] if sampling_context["parent_sampled"] is not None else options["traces_sample_rate"])) # Since this is coming from the user (or from a function provided by the # user), who knows what we might get. (The only valid values are # booleans or numbers between 0 and 1.) if not _is_valid_sample_rate(sample_rate): logger.warning( "[Tracing] Discarding {transaction_description} because of invalid sample rate." .format(transaction_description=transaction_description, )) self.sampled = False return # if the function returned 0 (or false), or if `traces_sample_rate` is # 0, it's a sign the transaction should be dropped if not sample_rate: logger.debug( "[Tracing] Discarding {transaction_description} because {reason}" .format( transaction_description=transaction_description, reason=("traces_sampler returned 0 or False" if callable( options.get("traces_sampler")) else "traces_sample_rate is set to 0"), )) self.sampled = False return # Now we roll the dice. random.random is inclusive of 0, but not of 1, # so strict < is safe here. In case sample_rate is a boolean, cast it # to a float (True becomes 1.0 and False becomes 0.0) self.sampled = random.random() < float(sample_rate) if self.sampled: logger.debug("[Tracing] Starting {transaction_description}".format( transaction_description=transaction_description, )) else: logger.debug( "[Tracing] Discarding {transaction_description} because it's not included in the random sample (sampling rate = {sample_rate})" .format( transaction_description=transaction_description, sample_rate=float(sample_rate), ))
def kill(self): logger.debug("Kill requested: not needed")
def flush( self, timeout, # type: float callback=None, # type: Optional[Any] ): logger.debug("Flush requested: not needed")
def capture_envelope(self, envelope): logger.debug("capture_envelope")
def flush(self, timeout, callback=None): logger.debug("Flushing HTTP transport") if timeout > 0: self._worker.flush(timeout, callback)
def on_full_queue(self, callback): # type: (Optional[Any]) -> None logger.debug("background worker queue full, dropping event")
def _capture_internal_exception(self, exc_info): """Capture an exception that is likely caused by a bug in the SDK itself.""" logger.debug("Internal error in sentry_sdk", exc_info=exc_info)
def finish(self, hub=None): # type: (Optional[sentry_sdk.Hub]) -> Optional[str] if self.timestamp is not None: # This transaction is already finished, ignore. return None hub = hub or self.hub or sentry_sdk.Hub.current client = hub.client # This is a de facto proxy for checking if sampled = False if self._span_recorder is None: logger.debug("Discarding transaction because sampled = False") # This is not entirely accurate because discards here are not # exclusively based on sample rate but also traces sampler, but # we handle this the same here. if client and client.transport: client.transport.record_lost_event("sample_rate", data_category="transaction") return None if client is None: # We have no client and therefore nowhere to send this transaction. return None if not self.name: logger.warning( "Transaction has no name, falling back to `<unlabeled transaction>`." ) self.name = "<unlabeled transaction>" Span.finish(self, hub) if not self.sampled: # At this point a `sampled = None` should have already been resolved # to a concrete decision. if self.sampled is None: logger.warning( "Discarding transaction without sampling decision.") return None finished_spans = [ span.to_json() for span in self._span_recorder.spans if span.timestamp is not None ] # we do this to break the circular reference of transaction -> span # recorder -> span -> containing transaction (which is where we started) # before either the spans or the transaction goes out of scope and has # to be garbage collected self._span_recorder = None return hub.capture_event({ "type": "transaction", "transaction": self.name, "contexts": { "trace": self.get_trace_context() }, "tags": self._tags, "timestamp": self.timestamp, "start_timestamp": self.start_timestamp, "spans": finished_spans, })
def _shutdown(): main_client = Hub.main.client logger.debug("atexit: got shutdown signal") if main_client is not None: logger.debug("atexit: shutting down client") main_client.close(shutdown_callback=self.callback)
def flush(self, timeout, callback=None): logger.debug("background worker got flush request") with self._lock: if self.is_alive and timeout > 0.0: self._wait_flush(timeout, callback) logger.debug("background worker flushed")