예제 #1
0
파일: plugin.py 프로젝트: overquota/sentry
def reprocess_minidump(data):
    project = Project.objects.get_from_cache(id=data['project'])

    minidump_is_reprocessed_cache_key = minidump_reprocessed_cache_key_for_event(
        data)
    if default_cache.get(minidump_is_reprocessed_cache_key):
        return

    minidump = get_attached_minidump(data)

    if not minidump:
        logger.error("Missing minidump for minidump event")
        return

    request_id_cache_key = request_id_cache_key_for_event(data)

    response = run_symbolicator(project=project,
                                request_id_cache_key=request_id_cache_key,
                                create_task=create_minidump_task,
                                minidump=make_buffered_slice_reader(
                                    minidump.data, None))

    if handle_symbolicator_response_status(data, response):
        merge_symbolicator_minidump_response(data, response)

    event_cache_key = cache_key_for_event(data)
    default_cache.set(event_cache_key, dict(data), 3600)
    default_cache.set(minidump_is_reprocessed_cache_key, True, 3600)

    return data
예제 #2
0
파일: plugin.py 프로젝트: yaoqi/sentry
def reprocess_minidump(data):
    project = Project.objects.get_from_cache(id=data['project'])

    minidump_is_reprocessed_cache_key = minidump_reprocessed_cache_key_for_event(data)
    if default_cache.get(minidump_is_reprocessed_cache_key):
        return

    if not _is_symbolicator_enabled(project, data):
        rv = reprocess_minidump_with_cfi(data)
        default_cache.set(minidump_is_reprocessed_cache_key, True, 3600)
        return rv

    minidump = get_attached_minidump(data)

    if not minidump:
        logger.error("Missing minidump for minidump event")
        return

    request_id_cache_key = request_id_cache_key_for_event(data)

    response = run_symbolicator(
        project=project,
        request_id_cache_key=request_id_cache_key,
        create_task=create_minidump_task,
        minidump=make_buffered_slice_reader(minidump.data, None)
    )

    if handle_symbolicator_response_status(data, response):
        merge_symbolicator_minidump_response(data, response)

    event_cache_key = cache_key_for_event(data)
    default_cache.set(event_cache_key, dict(data), 3600)
    default_cache.set(minidump_is_reprocessed_cache_key, True, 3600)

    return data
예제 #3
0
파일: cfi.py 프로젝트: getsentry/sentry
def reprocess_minidump_with_cfi(data):
    """Reprocesses a minidump event if CFI(call frame information) is available
    and viable. The event is only processed if there are stack traces that
    contain scanned frames.
    """

    handle = ThreadProcessingHandle(data)

    # Check stacktrace caches first and skip all that do not need CFI. This is
    # either if a thread is trusted (i.e. it does not contain scanned frames) or
    # since it can be fetched from the cache.
    threads = {}
    for tid, thread in handle.iter_threads():
        if not thread.needs_cfi:
            continue

        if thread.load_from_cache():
            if thread.apply_to_event():
                handle.indicate_change()
            continue

        threads[tid] = thread

    if not threads:
        return handle.result()

    # Check if we have a minidump to reprocess
    cache_key = cache_key_for_event(data)
    attachments = attachment_cache.get(cache_key) or []
    minidump = next((a for a in attachments if a.type == MINIDUMP_ATTACHMENT_TYPE), None)
    if not minidump:
        return handle.result()

    # Determine modules loaded into the process during the crash
    debug_ids = [module.debug_id for module in handle.iter_modules()]
    if not debug_ids:
        return handle.result()

    # Load CFI caches for all loaded modules (even unreferenced ones)
    project = Project.objects.get_from_cache(id=data['project'])
    cficaches = ProjectDebugFile.difcache.get_cficaches(project, debug_ids)
    if not cficaches:
        return handle.result()

    # Reprocess the minidump with CFI
    cfi_map = FrameInfoMap.new()
    for debug_id, cficache in six.iteritems(cficaches):
        cfi_map.add(debug_id, cficache)
    state = process_minidump(minidump.data, cfi=cfi_map)

    # Merge existing stack traces with new ones from the minidump
    for minidump_thread in state.threads():
        thread = threads.get(minidump_thread.thread_id)
        if thread:
            thread.load_from_minidump(minidump_thread)
            thread.save_to_cache()
            if thread.apply_to_event():
                handle.indicate_change()

    return handle.result()
예제 #4
0
파일: cfi.py 프로젝트: zhouhuiquan/sentry
def reprocess_minidump_with_cfi(data):
    """Reprocesses a minidump event if CFI(call frame information) is available
    and viable. The event is only processed if there are stack traces that
    contain scanned frames.
    """

    handle = ThreadProcessingHandle(data)

    # Check stacktrace caches first and skip all that do not need CFI. This is
    # either if a thread is trusted (i.e. it does not contain scanned frames) or
    # since it can be fetched from the cache.
    threads = {}
    for tid, thread in handle.iter_threads():
        if not thread.needs_cfi:
            continue

        if thread.load_from_cache():
            if thread.apply_to_event():
                handle.indicate_change()
            continue

        threads[tid] = thread

    if not threads:
        return handle.result()

    # Check if we have a minidump to reprocess
    cache_key = cache_key_for_event(data)
    attachments = attachment_cache.get(cache_key) or []
    minidump = next((a for a in attachments if a.type == MINIDUMP_ATTACHMENT_TYPE), None)
    if not minidump:
        return handle.result()

    # Determine modules loaded into the process during the crash
    debug_ids = [module.id for module in handle.iter_modules()]
    if not debug_ids:
        return handle.result()

    # Load CFI caches for all loaded modules (even unreferenced ones)
    project = Project.objects.get_from_cache(id=data['project'])
    cficaches = ProjectDebugFile.difcache.get_cficaches(project, debug_ids)
    if not cficaches:
        return handle.result()

    # Reprocess the minidump with CFI
    cfi_map = FrameInfoMap.new()
    for debug_id, cficache in six.iteritems(cficaches):
        cfi_map.add(debug_id, cficache)
    state = process_minidump(minidump.data, cfi=cfi_map)

    # Merge existing stack traces with new ones from the minidump
    for minidump_thread in state.threads():
        thread = threads.get(minidump_thread.thread_id)
        if thread:
            thread.load_from_minidump(minidump_thread)
            thread.save_to_cache()
            if thread.apply_to_event():
                handle.indicate_change()

    return handle.result()
예제 #5
0
    def process_message(self, message):
        message = msgpack.unpackb(message.value(), use_list=False)
        payload = message["payload"]
        start_time = float(message["start_time"])
        event_id = message["event_id"]
        project_id = message["project_id"]
        remote_addr = message.get("remote_addr")

        # check that we haven't already processed this event (a previous instance of the forwarder
        # died before it could commit the event queue offset)
        deduplication_key = "ev:{}:{}".format(project_id, event_id)
        if cache.get(deduplication_key) is not None:
            logger.warning(
                "pre-process-forwarder detected a duplicated event"
                " with id:%s for project:%s.",
                event_id,
                project_id,
            )
            return True  # message already processed do not reprocess

        try:
            project = Project.objects.get_from_cache(id=project_id)
        except Project.DoesNotExist:
            logger.error("Project for ingested event does not exist: %s",
                         project_id)
            return True

        # Parse the JSON payload. This is required to compute the cache key and
        # call process_event. The payload will be put into Kafka raw, to avoid
        # serializing it again.
        # XXX: Do not use CanonicalKeyDict here. This may break preprocess_event
        # which assumes that data passed in is a raw dictionary.
        data = json.loads(payload)

        cache_timeout = 3600
        cache_key = cache_key_for_event(data)
        default_cache.set(cache_key, data, cache_timeout)

        # Preprocess this event, which spawns either process_event or
        # save_event. Pass data explicitly to avoid fetching it again from the
        # cache.
        preprocess_event(cache_key=cache_key,
                         data=data,
                         start_time=start_time,
                         event_id=event_id)

        # remember for an 1 hour that we saved this event (deduplication protection)
        cache.set(deduplication_key, "", 3600)

        # emit event_accepted once everything is done
        event_accepted.send_robust(ip=remote_addr,
                                   data=data,
                                   project=project,
                                   sender=self.process_message)

        # Return *something* so that it counts against batch size
        return True
예제 #6
0
def reprocess_minidump(data):
    project = Project.objects.get_from_cache(id=data['project'])

    minidump_is_reprocessed_cache_key = minidump_reprocessed_cache_key_for_event(
        data)
    if default_cache.get(minidump_is_reprocessed_cache_key):
        return

    if not _is_symbolicator_enabled(project, data):
        rv = reprocess_minidump_with_cfi(data)
        default_cache.set(minidump_is_reprocessed_cache_key, True, 3600)
        return rv

    minidump = get_attached_minidump(data)

    if not minidump:
        logger.error("Missing minidump for minidump event")
        return

    request_id_cache_key = request_id_cache_key_for_event(data)

    response = run_symbolicator(project=project,
                                request_id_cache_key=request_id_cache_key,
                                create_task=create_minidump_task,
                                minidump=make_buffered_slice_reader(
                                    minidump.data, None))

    if not response:
        handle_symbolication_failed(
            SymbolicationFailed(type=EventError.NATIVE_SYMBOLICATOR_FAILED),
            data=data,
        )
        default_cache.set(minidump_is_reprocessed_cache_key, True, 3600)
        return

    merge_symbolicator_minidump_response(data, response)

    event_cache_key = cache_key_for_event(data)
    default_cache.set(event_cache_key, dict(data), 3600)
    default_cache.set(minidump_is_reprocessed_cache_key, True, 3600)

    return data
예제 #7
0
def get_attached_minidump(data):
    cache_key = cache_key_for_event(data)
    attachments = attachment_cache.get(cache_key) or []
    return next((a for a in attachments if a.type == MINIDUMP_ATTACHMENT_TYPE),
                None)
예제 #8
0
def get_attached_minidump(data):
    cache_key = cache_key_for_event(data)
    attachments = attachment_cache.get(cache_key) or []
    return next((a for a in attachments if a.type == MINIDUMP_ATTACHMENT_TYPE), None)
예제 #9
0
def get_event_attachment(data, attachment_type):
    cache_key = cache_key_for_event(data)
    attachments = attachment_cache.get(cache_key) or []
    return next((a for a in attachments if a.type == attachment_type), None)