コード例 #1
0
def process_profile(profile: MutableMapping[str, Any], **kwargs: Any) -> None:
    if profile["platform"] == "cocoa":
        if not _validate_ios_profile(profile=profile):
            return None
        profile = _symbolicate(profile=profile)

    profile = _normalize(profile=profile)

    global processed_profiles_publisher

    if processed_profiles_publisher is None:
        config = settings.KAFKA_TOPICS[settings.KAFKA_PROFILES]
        processed_profiles_publisher = KafkaPublisher(
            kafka_config.get_kafka_producer_cluster_options(
                config["cluster"]), )

    processed_profiles_publisher.publish(
        "processed-profiles",
        json.dumps(profile),
    )
コード例 #2
0
ファイル: api.py プロジェクト: zhangdinet/sentry
from sentry.web.client_config import get_client_config
from sentry.relay.config import get_project_config

logger = logging.getLogger("sentry")
minidumps_logger = logging.getLogger("sentry.minidumps")

# Transparent 1x1 gif
# See http://probablyprogramming.com/2009/03/15/the-tiniest-gif-ever
PIXEL = base64.b64decode("R0lGODlhAQABAAD/ACwAAAAAAQABAAACADs=")

PROTOCOL_VERSIONS = frozenset(("2.0", "3", "4", "5", "6", "7"))

kafka_publisher = (
    QueuedPublisherService(
        KafkaPublisher(
            getattr(settings, "KAFKA_RAW_EVENTS_PUBLISHER_CONNECTION", None), asynchronous=False
        )
    )
    if getattr(settings, "KAFKA_RAW_EVENTS_PUBLISHER_ENABLED", False)
    else None
)


def allow_cors_options(func):
    """
    Decorator that adds automatic handling of OPTIONS requests for CORS

    If the request is OPTIONS (i.e. pre flight CORS) construct a OK (200) response
    in which we explicitly enable the caller and add the custom headers that we support
    For other requests just add the appropriate CORS headers
コード例 #3
0
def track_outcome(
    org_id,
    project_id,
    key_id,
    outcome,
    reason=None,
    timestamp=None,
    event_id=None,
    category=None,
    quantity=None,
):
    """
    This is a central point to track org/project counters per incoming event.
    NB: This should only ever be called once per incoming event, which means
    it should only be called at the point we know the final outcome for the
    event (invalid, rate_limited, accepted, discarded, etc.)

    This increments all the relevant legacy RedisTSDB counters, as well as
    sending a single metric event to Kafka which can be used to reconstruct the
    counters with SnubaTSDB.
    """
    global outcomes_publisher
    if outcomes_publisher is None:
        outcomes_publisher = KafkaPublisher(settings.KAFKA_CLUSTERS[outcomes["cluster"]])

    if quantity is None:
        quantity = 1

    assert isinstance(org_id, six.integer_types)
    assert isinstance(project_id, six.integer_types)
    assert isinstance(key_id, (type(None), six.integer_types))
    assert isinstance(outcome, Outcome)
    assert isinstance(timestamp, (type(None), datetime))
    assert isinstance(category, (type(None), DataCategory))
    assert isinstance(quantity, int)

    timestamp = timestamp or to_datetime(time.time())

    tsdb_in_consumer = decide_tsdb_in_consumer()

    if not tsdb_in_consumer:
        increment_list = list(
            tsdb_increments_from_outcome(
                org_id=org_id,
                project_id=project_id,
                key_id=key_id,
                outcome=outcome,
                reason=reason,
                category=category,
            )
        )

        if increment_list:
            tsdb.incr_multi(increment_list, timestamp=timestamp)

        if project_id and event_id:
            mark_tsdb_incremented(project_id, event_id)

    # Send a snuba metrics payload.
    outcomes_publisher.publish(
        outcomes["topic"],
        json.dumps(
            {
                "timestamp": timestamp,
                "org_id": org_id,
                "project_id": project_id,
                "key_id": key_id,
                "outcome": outcome.value,
                "reason": reason,
                "event_id": event_id,
                "category": category,
                "quantity": quantity,
            }
        ),
    )

    metrics.incr(
        "events.outcomes",
        skip_internal=True,
        tags={"outcome": outcome.name.lower(), "reason": reason},
    )
コード例 #4
0
ファイル: outcomes.py プロジェクト: viteksafronov/sentry
def track_outcome(org_id,
                  project_id,
                  key_id,
                  outcome,
                  reason=None,
                  timestamp=None,
                  event_id=None):
    """
    This is a central point to track org/project counters per incoming event.
    NB: This should only ever be called once per incoming event, which means
    it should only be called at the point we know the final outcome for the
    event (invalid, rate_limited, accepted, discarded, etc.)

    This increments all the relevant legacy RedisTSDB counters, as well as
    sending a single metric event to Kafka which can be used to reconstruct the
    counters with SnubaTSDB.
    """
    global outcomes_publisher
    if outcomes_publisher is None:
        outcomes_publisher = QueuedPublisherService(
            KafkaPublisher(settings.KAFKA_CLUSTERS[outcomes["cluster"]]))

    assert isinstance(org_id, six.integer_types)
    assert isinstance(project_id, six.integer_types)
    assert isinstance(key_id, (type(None), six.integer_types))
    assert isinstance(outcome, Outcome)
    assert isinstance(timestamp, (type(None), datetime))

    timestamp = timestamp or to_datetime(time.time())
    increment_list = []
    if outcome != Outcome.INVALID:
        # This simply preserves old behavior. We never counted invalid events
        # (too large, duplicate, CORS) toward regular `received` counts.
        increment_list.extend([
            (tsdb.models.project_total_received, project_id),
            (tsdb.models.organization_total_received, org_id),
            (tsdb.models.key_total_received, key_id),
        ])

    if outcome == Outcome.FILTERED:
        increment_list.extend([
            (tsdb.models.project_total_blacklisted, project_id),
            (tsdb.models.organization_total_blacklisted, org_id),
            (tsdb.models.key_total_blacklisted, key_id),
        ])
    elif outcome == Outcome.RATE_LIMITED:
        increment_list.extend([
            (tsdb.models.project_total_rejected, project_id),
            (tsdb.models.organization_total_rejected, org_id),
            (tsdb.models.key_total_rejected, key_id),
        ])

    if reason in FILTER_STAT_KEYS_TO_VALUES:
        increment_list.append((FILTER_STAT_KEYS_TO_VALUES[reason], project_id))

    increment_list = [(model, key) for model, key in increment_list
                      if key is not None]
    if increment_list:
        tsdb.incr_multi(increment_list, timestamp=timestamp)

    # Send a snuba metrics payload.
    outcomes_publisher.publish(
        outcomes["topic"],
        json.dumps({
            "timestamp": timestamp,
            "org_id": org_id,
            "project_id": project_id,
            "key_id": key_id,
            "outcome": outcome.value,
            "reason": reason,
            "event_id": event_id,
        }),
    )

    metrics.incr(
        "events.outcomes",
        skip_internal=True,
        tags={
            "outcome": outcome.name.lower(),
            "reason": reason
        },
    )
コード例 #5
0
def track_outcome(
    org_id,
    project_id,
    key_id,
    outcome,
    reason=None,
    timestamp=None,
    event_id=None,
    category=None,
    quantity=None,
):
    """
    This is a central point to track org/project counters per incoming event.
    NB: This should only ever be called once per incoming event, which means
    it should only be called at the point we know the final outcome for the
    event (invalid, rate_limited, accepted, discarded, etc.)

    This sends the "outcome" message to Kafka which is used by Snuba to serve
    data for SnubaTSDB and RedisSnubaTSDB, such as # of rate-limited/filtered
    events.
    """
    global outcomes_publisher
    if outcomes_publisher is None:
        cluster_name = outcomes["cluster"]
        outcomes_publisher = KafkaPublisher(
            kafka_config.get_kafka_producer_cluster_options(cluster_name))

    if quantity is None:
        quantity = 1

    assert isinstance(org_id, int)
    assert isinstance(project_id, int)
    assert isinstance(key_id, (type(None), int))
    assert isinstance(outcome, Outcome)
    assert isinstance(timestamp, (type(None), datetime))
    assert isinstance(category, (type(None), DataCategory))
    assert isinstance(quantity, int)

    timestamp = timestamp or to_datetime(time.time())

    # Send a snuba metrics payload.
    outcomes_publisher.publish(
        outcomes["topic"],
        json.dumps({
            "timestamp": timestamp,
            "org_id": org_id,
            "project_id": project_id,
            "key_id": key_id,
            "outcome": outcome.value,
            "reason": reason,
            "event_id": event_id,
            "category": category,
            "quantity": quantity,
        }),
    )

    metrics.incr(
        "events.outcomes",
        skip_internal=True,
        tags={
            "outcome": outcome.name.lower(),
            "reason": reason,
            "category":
            category.api_name() if category is not None else "null",
        },
    )
コード例 #6
0
ファイル: event_manager.py プロジェクト: omares/sentry
def track_outcome(org_id,
                  project_id,
                  key_id,
                  outcome,
                  reason=None,
                  timestamp=None):
    """
    This is a central point to track org/project counters per incoming event.
    NB: This should only ever be called once per incoming event, which means
    it should only be called at the point we know the final outcome for the
    event (invalid, rate_limited, accepted, discarded, etc.)

    This increments all the relevant legacy RedisTSDB counters, as well as
    sending a single metric event to Kafka which can be used to reconstruct the
    counters with SnubaTSDB.
    """
    global outcomes_publisher
    if outcomes_publisher is None:
        outcomes_publisher = QueuedPublisherService(
            KafkaPublisher(settings.KAFKA_CLUSTERS[outcomes['cluster']]))

    timestamp = timestamp or to_datetime(time.time())
    increment_list = []
    if outcome != 'invalid':
        # This simply preserves old behavior. We never counted invalid events
        # (too large, duplicate, CORS) toward regular `received` counts.
        increment_list.extend([
            (tsdb.models.project_total_received, project_id),
            (tsdb.models.organization_total_received, org_id),
            (tsdb.models.key_total_received, key_id),
        ])

    if outcome == 'filtered':
        increment_list.extend([
            (tsdb.models.project_total_blacklisted, project_id),
            (tsdb.models.organization_total_blacklisted, org_id),
            (tsdb.models.key_total_blacklisted, key_id),
        ])
    elif outcome == 'rate_limited':
        increment_list.extend([
            (tsdb.models.project_total_rejected, project_id),
            (tsdb.models.organization_total_rejected, org_id),
            (tsdb.models.key_total_rejected, key_id),
        ])

    if reason in FILTER_STAT_KEYS_TO_VALUES:
        increment_list.append((FILTER_STAT_KEYS_TO_VALUES[reason], project_id))

    increment_list = [(model, key) for model, key in increment_list
                      if key is not None]
    if increment_list:
        tsdb.incr_multi(increment_list, timestamp=timestamp)

    # Send a snuba metrics payload.
    if random.random() <= options.get('snuba.track-outcomes-sample-rate'):
        outcomes_publisher.publish(
            outcomes['topic'],
            json.dumps({
                'timestamp': timestamp,
                'org_id': org_id,
                'project_id': project_id,
                'key_id': key_id,
                'outcome': outcome,
                'reason': reason,
            }))

    metrics.incr(
        'events.outcomes',
        skip_internal=True,
        tags={
            'outcome': outcome,
            'reason': reason,
        },
    )
コード例 #7
0
def track_outcome(
    org_id: int,
    project_id: int,
    key_id: Optional[int],
    outcome: Outcome,
    reason: Optional[str] = None,
    timestamp: Optional[datetime] = None,
    event_id: Optional[str] = None,
    category: Optional[DataCategory] = None,
    quantity: Optional[int] = None,
) -> None:
    """
    This is a central point to track org/project counters per incoming event.
    NB: This should only ever be called once per incoming event, which means
    it should only be called at the point we know the final outcome for the
    event (invalid, rate_limited, accepted, discarded, etc.)

    This sends the "outcome" message to Kafka which is used by Snuba to serve
    data for SnubaTSDB and RedisSnubaTSDB, such as # of rate-limited/filtered
    events.
    """
    global outcomes_publisher
    global billing_publisher

    if quantity is None:
        quantity = 1

    assert isinstance(org_id, int)
    assert isinstance(project_id, int)
    assert isinstance(key_id, (type(None), int))
    assert isinstance(outcome, Outcome)
    assert isinstance(timestamp, (type(None), datetime))
    assert isinstance(category, (type(None), DataCategory))
    assert isinstance(quantity, int)

    outcomes_config = settings.KAFKA_TOPICS[settings.KAFKA_OUTCOMES]
    billing_config = settings.KAFKA_TOPICS.get(settings.KAFKA_OUTCOMES_BILLING) or outcomes_config

    # Create a second producer instance only if the cluster differs. Otherwise,
    # reuse the same producer and just send to the other topic.
    if outcome.is_billing() and billing_config["cluster"] != outcomes_config["cluster"]:
        if billing_publisher is None:
            cluster_name = billing_config["cluster"]
            billing_publisher = KafkaPublisher(
                kafka_config.get_kafka_producer_cluster_options(cluster_name)
            )
        publisher = billing_publisher

    else:
        if outcomes_publisher is None:
            cluster_name = outcomes_config["cluster"]
            outcomes_publisher = KafkaPublisher(
                kafka_config.get_kafka_producer_cluster_options(cluster_name)
            )
        publisher = outcomes_publisher

    timestamp = timestamp or to_datetime(time.time())

    # Send billing outcomes to a dedicated topic if there is a separate
    # configuration for it. Otherwise, fall back to the regular outcomes topic.
    # This does NOT switch the producer, if both topics are on the same cluster.
    #
    # In Sentry, there is no significant difference between the classes of
    # outcome. In Sentry SaaS, they have elevated stability requirements as they
    # are used for spike protection and quota enforcement.
    topic_name = billing_config["topic"] if outcome.is_billing() else outcomes_config["topic"]

    # Send a snuba metrics payload.
    publisher.publish(
        topic_name,
        json.dumps(
            {
                "timestamp": timestamp,
                "org_id": org_id,
                "project_id": project_id,
                "key_id": key_id,
                "outcome": outcome.value,
                "reason": reason,
                "event_id": event_id,
                "category": category,
                "quantity": quantity,
            }
        ),
    )

    metrics.incr(
        "events.outcomes",
        skip_internal=True,
        tags={
            "outcome": outcome.name.lower(),
            "reason": reason,
            "category": category.api_name() if category is not None else "null",
            "topic": topic_name,
        },
    )