Esempio n. 1
0
from sentry.web.client_config import get_client_config
from sentry.relay.config import get_project_config

logger = logging.getLogger("sentry")
minidumps_logger = logging.getLogger("sentry.minidumps")

# Transparent 1x1 gif
# See http://probablyprogramming.com/2009/03/15/the-tiniest-gif-ever
PIXEL = base64.b64decode("R0lGODlhAQABAAD/ACwAAAAAAQABAAACADs=")

PROTOCOL_VERSIONS = frozenset(("2.0", "3", "4", "5", "6", "7"))

kafka_publisher = (
    QueuedPublisherService(
        KafkaPublisher(
            getattr(settings, "KAFKA_RAW_EVENTS_PUBLISHER_CONNECTION", None), asynchronous=False
        )
    )
    if getattr(settings, "KAFKA_RAW_EVENTS_PUBLISHER_ENABLED", False)
    else None
)


def allow_cors_options(func):
    """
    Decorator that adds automatic handling of OPTIONS requests for CORS

    If the request is OPTIONS (i.e. pre flight CORS) construct a OK (200) response
    in which we explicitly enable the caller and add the custom headers that we support
    For other requests just add the appropriate CORS headers
Esempio n. 2
0
def track_outcome(org_id,
                  project_id,
                  key_id,
                  outcome,
                  reason=None,
                  timestamp=None,
                  event_id=None):
    """
    This is a central point to track org/project counters per incoming event.
    NB: This should only ever be called once per incoming event, which means
    it should only be called at the point we know the final outcome for the
    event (invalid, rate_limited, accepted, discarded, etc.)

    This increments all the relevant legacy RedisTSDB counters, as well as
    sending a single metric event to Kafka which can be used to reconstruct the
    counters with SnubaTSDB.
    """
    global outcomes_publisher
    if outcomes_publisher is None:
        outcomes_publisher = QueuedPublisherService(
            KafkaPublisher(settings.KAFKA_CLUSTERS[outcomes["cluster"]]))

    assert isinstance(org_id, six.integer_types)
    assert isinstance(project_id, six.integer_types)
    assert isinstance(key_id, (type(None), six.integer_types))
    assert isinstance(outcome, Outcome)
    assert isinstance(timestamp, (type(None), datetime))

    timestamp = timestamp or to_datetime(time.time())
    increment_list = []
    if outcome != Outcome.INVALID:
        # This simply preserves old behavior. We never counted invalid events
        # (too large, duplicate, CORS) toward regular `received` counts.
        increment_list.extend([
            (tsdb.models.project_total_received, project_id),
            (tsdb.models.organization_total_received, org_id),
            (tsdb.models.key_total_received, key_id),
        ])

    if outcome == Outcome.FILTERED:
        increment_list.extend([
            (tsdb.models.project_total_blacklisted, project_id),
            (tsdb.models.organization_total_blacklisted, org_id),
            (tsdb.models.key_total_blacklisted, key_id),
        ])
    elif outcome == Outcome.RATE_LIMITED:
        increment_list.extend([
            (tsdb.models.project_total_rejected, project_id),
            (tsdb.models.organization_total_rejected, org_id),
            (tsdb.models.key_total_rejected, key_id),
        ])

    if reason in FILTER_STAT_KEYS_TO_VALUES:
        increment_list.append((FILTER_STAT_KEYS_TO_VALUES[reason], project_id))

    increment_list = [(model, key) for model, key in increment_list
                      if key is not None]
    if increment_list:
        tsdb.incr_multi(increment_list, timestamp=timestamp)

    # Send a snuba metrics payload.
    outcomes_publisher.publish(
        outcomes["topic"],
        json.dumps({
            "timestamp": timestamp,
            "org_id": org_id,
            "project_id": project_id,
            "key_id": key_id,
            "outcome": outcome.value,
            "reason": reason,
            "event_id": event_id,
        }),
    )

    metrics.incr(
        "events.outcomes",
        skip_internal=True,
        tags={
            "outcome": outcome.name.lower(),
            "reason": reason
        },
    )
Esempio n. 3
0
def track_outcome(org_id,
                  project_id,
                  key_id,
                  outcome,
                  reason=None,
                  timestamp=None,
                  event_id=None):
    """
    This is a central point to track org/project counters per incoming event.
    NB: This should only ever be called once per incoming event, which means
    it should only be called at the point we know the final outcome for the
    event (invalid, rate_limited, accepted, discarded, etc.)

    This increments all the relevant legacy RedisTSDB counters, as well as
    sending a single metric event to Kafka which can be used to reconstruct the
    counters with SnubaTSDB.
    """
    global outcomes_publisher
    if outcomes_publisher is None:
        outcomes_publisher = QueuedPublisherService(
            KafkaPublisher(settings.KAFKA_CLUSTERS[outcomes["cluster"]]))

    assert isinstance(org_id, six.integer_types)
    assert isinstance(project_id, six.integer_types)
    assert isinstance(key_id, (type(None), six.integer_types))
    assert isinstance(outcome, Outcome)
    assert isinstance(timestamp, (type(None), datetime))

    timestamp = timestamp or to_datetime(time.time())

    tsdb_in_consumer = decide_tsdb_in_consumer()

    if not tsdb_in_consumer:
        increment_list = list(
            tsdb_increments_from_outcome(org_id=org_id,
                                         project_id=project_id,
                                         key_id=key_id,
                                         outcome=outcome,
                                         reason=reason))

        if increment_list:
            tsdb.incr_multi(increment_list, timestamp=timestamp)

        if project_id and event_id:
            mark_tsdb_incremented(project_id, event_id)

    # Send a snuba metrics payload.
    outcomes_publisher.publish(
        outcomes["topic"],
        json.dumps({
            "timestamp": timestamp,
            "org_id": org_id,
            "project_id": project_id,
            "key_id": key_id,
            "outcome": outcome.value,
            "reason": reason,
            "event_id": event_id,
        }),
    )

    metrics.incr(
        "events.outcomes",
        skip_internal=True,
        tags={
            "outcome": outcome.name.lower(),
            "reason": reason
        },
    )
Esempio n. 4
0
def track_outcome(org_id,
                  project_id,
                  key_id,
                  outcome,
                  reason=None,
                  timestamp=None):
    """
    This is a central point to track org/project counters per incoming event.
    NB: This should only ever be called once per incoming event, which means
    it should only be called at the point we know the final outcome for the
    event (invalid, rate_limited, accepted, discarded, etc.)

    This increments all the relevant legacy RedisTSDB counters, as well as
    sending a single metric event to Kafka which can be used to reconstruct the
    counters with SnubaTSDB.
    """
    global outcomes_publisher
    if outcomes_publisher is None:
        outcomes_publisher = QueuedPublisherService(
            KafkaPublisher(settings.KAFKA_CLUSTERS[outcomes['cluster']]))

    timestamp = timestamp or to_datetime(time.time())
    increment_list = []
    if outcome != 'invalid':
        # This simply preserves old behavior. We never counted invalid events
        # (too large, duplicate, CORS) toward regular `received` counts.
        increment_list.extend([
            (tsdb.models.project_total_received, project_id),
            (tsdb.models.organization_total_received, org_id),
            (tsdb.models.key_total_received, key_id),
        ])

    if outcome == 'filtered':
        increment_list.extend([
            (tsdb.models.project_total_blacklisted, project_id),
            (tsdb.models.organization_total_blacklisted, org_id),
            (tsdb.models.key_total_blacklisted, key_id),
        ])
    elif outcome == 'rate_limited':
        increment_list.extend([
            (tsdb.models.project_total_rejected, project_id),
            (tsdb.models.organization_total_rejected, org_id),
            (tsdb.models.key_total_rejected, key_id),
        ])

    if reason in FILTER_STAT_KEYS_TO_VALUES:
        increment_list.append((FILTER_STAT_KEYS_TO_VALUES[reason], project_id))

    increment_list = [(model, key) for model, key in increment_list
                      if key is not None]
    if increment_list:
        tsdb.incr_multi(increment_list, timestamp=timestamp)

    # Send a snuba metrics payload.
    if random.random() <= options.get('snuba.track-outcomes-sample-rate'):
        outcomes_publisher.publish(
            outcomes['topic'],
            json.dumps({
                'timestamp': timestamp,
                'org_id': org_id,
                'project_id': project_id,
                'key_id': key_id,
                'outcome': outcome,
                'reason': reason,
            }))

    metrics.incr(
        'events.outcomes',
        skip_internal=True,
        tags={
            'outcome': outcome,
            'reason': reason,
        },
    )
Esempio n. 5
0
from sentry.utils.safe import safe_execute
from sentry.web.helpers import render_to_response

logger = logging.getLogger('sentry')
minidumps_logger = logging.getLogger('sentry.minidumps')

# Transparent 1x1 gif
# See http://probablyprogramming.com/2009/03/15/the-tiniest-gif-ever
PIXEL = base64.b64decode('R0lGODlhAQABAAD/ACwAAAAAAQABAAACADs=')

PROTOCOL_VERSIONS = frozenset(('2.0', '3', '4', '5', '6', '7'))

kafka_publisher = QueuedPublisherService(
    KafkaPublisher(
        getattr(
            settings,
            'KAFKA_RAW_EVENTS_PUBLISHER_CONNECTION',
            None),
        asynchronous=False)
) if getattr(settings, 'KAFKA_RAW_EVENTS_PUBLISHER_ENABLED', False) else None


def api(func):
    @wraps(func)
    def wrapped(request, *args, **kwargs):
        data = func(request, *args, **kwargs)
        if request.is_ajax():
            response = HttpResponse(data)
            response['Content-Type'] = 'application/json'
        else:
            ref = request.META.get('HTTP_REFERER')
            if ref is None or not is_same_domain(ref, request.build_absolute_uri()):
Esempio n. 6
0
def track_outcome(org_id, project_id, key_id, outcome, reason=None, timestamp=None, event_id=None):
    """
    This is a central point to track org/project counters per incoming event.
    NB: This should only ever be called once per incoming event, which means
    it should only be called at the point we know the final outcome for the
    event (invalid, rate_limited, accepted, discarded, etc.)

    This increments all the relevant legacy RedisTSDB counters, as well as
    sending a single metric event to Kafka which can be used to reconstruct the
    counters with SnubaTSDB.
    """
    global outcomes_publisher
    if outcomes_publisher is None:
        outcomes_publisher = QueuedPublisherService(
            KafkaPublisher(
                settings.KAFKA_CLUSTERS[outcomes['cluster']]
            )
        )

    assert isinstance(org_id, six.integer_types)
    assert isinstance(project_id, six.integer_types)
    assert isinstance(key_id, (type(None), six.integer_types))
    assert isinstance(outcome, Outcome)
    assert isinstance(timestamp, (type(None), datetime))

    timestamp = timestamp or to_datetime(time.time())
    increment_list = []
    if outcome != Outcome.INVALID:
        # This simply preserves old behavior. We never counted invalid events
        # (too large, duplicate, CORS) toward regular `received` counts.
        increment_list.extend([
            (tsdb.models.project_total_received, project_id),
            (tsdb.models.organization_total_received, org_id),
            (tsdb.models.key_total_received, key_id),
        ])

    if outcome == Outcome.FILTERED:
        increment_list.extend([
            (tsdb.models.project_total_blacklisted, project_id),
            (tsdb.models.organization_total_blacklisted, org_id),
            (tsdb.models.key_total_blacklisted, key_id),
        ])
    elif outcome == Outcome.RATE_LIMITED:
        increment_list.extend([
            (tsdb.models.project_total_rejected, project_id),
            (tsdb.models.organization_total_rejected, org_id),
            (tsdb.models.key_total_rejected, key_id),
        ])

    if reason in FILTER_STAT_KEYS_TO_VALUES:
        increment_list.append((FILTER_STAT_KEYS_TO_VALUES[reason], project_id))

    increment_list = [(model, key) for model, key in increment_list if key is not None]
    if increment_list:
        tsdb.incr_multi(increment_list, timestamp=timestamp)

    # Send a snuba metrics payload.
    if random.random() <= options.get('snuba.track-outcomes-sample-rate'):
        outcomes_publisher.publish(
            outcomes['topic'],
            json.dumps({
                'timestamp': timestamp,
                'org_id': org_id,
                'project_id': project_id,
                'key_id': key_id,
                'outcome': outcome.value,
                'reason': reason,
                'event_id': event_id,
            })
        )

    metrics.incr(
        'events.outcomes',
        skip_internal=True,
        tags={
            'outcome': outcome.name.lower(),
            'reason': reason,
        },
    )