Ejemplo n.º 1
0
def reprocess_minidump(data):
    project = Project.objects.get_from_cache(id=data['project'])

    minidump_is_reprocessed_cache_key = minidump_reprocessed_cache_key_for_event(
        data)
    if default_cache.get(minidump_is_reprocessed_cache_key):
        return

    minidump = get_attached_minidump(data)

    if not minidump:
        logger.error("Missing minidump for minidump event")
        return

    request_id_cache_key = request_id_cache_key_for_event(data)

    response = run_symbolicator(project=project,
                                request_id_cache_key=request_id_cache_key,
                                create_task=create_minidump_task,
                                minidump=make_buffered_slice_reader(
                                    minidump.data, None))

    if handle_symbolicator_response_status(data, response):
        merge_symbolicator_minidump_response(data, response)

    event_cache_key = cache_key_for_event(data)
    default_cache.set(event_cache_key, dict(data), 3600)
    default_cache.set(minidump_is_reprocessed_cache_key, True, 3600)

    return data
Ejemplo n.º 2
0
def reprocess_minidump(data):
    project = Project.objects.get_from_cache(id=data['project'])

    minidump_is_reprocessed_cache_key = minidump_reprocessed_cache_key_for_event(data)
    if default_cache.get(minidump_is_reprocessed_cache_key):
        return

    if not _is_symbolicator_enabled(project, data):
        rv = reprocess_minidump_with_cfi(data)
        default_cache.set(minidump_is_reprocessed_cache_key, True, 3600)
        return rv

    minidump = get_attached_minidump(data)

    if not minidump:
        logger.error("Missing minidump for minidump event")
        return

    request_id_cache_key = request_id_cache_key_for_event(data)

    response = run_symbolicator(
        project=project,
        request_id_cache_key=request_id_cache_key,
        create_task=create_minidump_task,
        minidump=make_buffered_slice_reader(minidump.data, None)
    )

    if handle_symbolicator_response_status(data, response):
        merge_symbolicator_minidump_response(data, response)

    event_cache_key = cache_key_for_event(data)
    default_cache.set(event_cache_key, dict(data), 3600)
    default_cache.set(minidump_is_reprocessed_cache_key, True, 3600)

    return data
Ejemplo n.º 3
0
def reprocess_minidump(data):
    project = Project.objects.get_from_cache(id=data['project'])

    minidump_is_reprocessed_cache_key = minidump_reprocessed_cache_key_for_event(
        data)
    if default_cache.get(minidump_is_reprocessed_cache_key):
        return

    if not _is_symbolicator_enabled(project, data):
        rv = reprocess_minidump_with_cfi(data)
        default_cache.set(minidump_is_reprocessed_cache_key, True, 3600)
        return rv

    minidump = get_attached_minidump(data)

    if not minidump:
        logger.error("Missing minidump for minidump event")
        return

    request_id_cache_key = request_id_cache_key_for_event(data)

    response = run_symbolicator(project=project,
                                request_id_cache_key=request_id_cache_key,
                                create_task=create_minidump_task,
                                minidump=make_buffered_slice_reader(
                                    minidump.data, None))

    if not response:
        handle_symbolication_failed(
            SymbolicationFailed(type=EventError.NATIVE_SYMBOLICATOR_FAILED),
            data=data,
        )
        default_cache.set(minidump_is_reprocessed_cache_key, True, 3600)
        return

    merge_symbolicator_minidump_response(data, response)

    event_cache_key = cache_key_for_event(data)
    default_cache.set(event_cache_key, dict(data), 3600)
    default_cache.set(minidump_is_reprocessed_cache_key, True, 3600)

    return data
Ejemplo n.º 4
0
    def run_symbolicator(self, processing_task):
        # TODO(markus): Make this work with minidumps. An unprocessed minidump
        # event will not contain unsymbolicated frames, because the minidump
        # upload already happened in store.
        # It will also presumably not contain images, so `self.available` will
        # already be `False`.

        if not self.available:
            return

        request_id_cache_key = request_id_cache_key_for_event(self.data)

        stacktraces = []
        processable_stacktraces = []
        has_frames = False

        for stacktrace_info, pf_list in processing_task.iter_processable_stacktraces(
        ):
            registers = stacktrace_info.stacktrace.get('registers') or {}

            # The filtering condition of this list comprehension is copied
            # from `iter_processable_frames`.
            #
            # We cannot reuse `iter_processable_frames` because the
            # symbolicator currently expects a list of stacktraces, not
            # flat frames.
            #
            # Right now we can't even filter out frames (e.g. using a frame
            # cache locally). The stacktraces have to be as complete as
            # possible because the symbolicator assumes the first frame of
            # a stacktrace to be the crashing frame. This assumption is
            # already violated because the SDK might chop off frames though
            # (which is less likely to be the case though).
            pf_list = [pf for pf in reversed(pf_list) if pf.processor == self]

            frames = []

            for pf in pf_list:
                frame = {'instruction_addr': pf['instruction_addr']}
                if pf.get('trust') is not None:
                    frame['trust'] = pf['trust']
                frames.append(frame)
                has_frames = True

            stacktraces.append({'registers': registers, 'frames': frames})

            processable_stacktraces.append(pf_list)

        if not has_frames:
            return

        rv = run_symbolicator(project=self.project,
                              request_id_cache_key=request_id_cache_key,
                              stacktraces=stacktraces,
                              modules=self.images,
                              signal=self.signal)

        if not rv:
            handle_symbolication_failed(
                SymbolicationFailed(
                    type=EventError.NATIVE_SYMBOLICATOR_FAILED),
                data=self.data,
            )
            return

        # TODO(markus): Set signal and os context from symbolicator response,
        # for minidumps

        assert len(self.images) == len(rv['modules']), (self.images, rv)

        for image, complete_image in zip(self.images, rv['modules']):
            merge_symbolicator_image(
                image, complete_image, self.sdk_info,
                lambda e: handle_symbolication_failed(e, data=self.data))

        assert len(stacktraces) == len(rv['stacktraces'])

        for pf_list, symbolicated_stacktrace in zip(processable_stacktraces,
                                                    rv['stacktraces']):
            for symbolicated_frame in symbolicated_stacktrace.get(
                    'frames') or ():
                pf = pf_list[symbolicated_frame['original_index']]
                pf.data['symbolicator_match'].append(symbolicated_frame)
Ejemplo n.º 5
0
    def run_symbolicator(self, processing_task):
        # TODO(markus): Make this work with minidumps. An unprocessed minidump
        # event will not contain unsymbolicated frames, because the minidump
        # upload already happened in store.
        # It will also presumably not contain images, so `self.available` will
        # already be `False`.

        if not self.available:
            return

        request_id_cache_key = request_id_cache_key_for_event(self.data)

        stacktraces = []
        processable_stacktraces = []
        has_frames = False

        for stacktrace_info, pf_list in processing_task.iter_processable_stacktraces():
            registers = stacktrace_info.stacktrace.get('registers') or {}

            # The filtering condition of this list comprehension is copied
            # from `iter_processable_frames`.
            #
            # We cannot reuse `iter_processable_frames` because the
            # symbolicator currently expects a list of stacktraces, not
            # flat frames.
            #
            # Right now we can't even filter out frames (e.g. using a frame
            # cache locally). The stacktraces have to be as complete as
            # possible because the symbolicator assumes the first frame of
            # a stacktrace to be the crashing frame. This assumption is
            # already violated because the SDK might chop off frames though
            # (which is less likely to be the case though).
            pf_list = [
                pf for pf in reversed(pf_list)
                if pf.processor == self
            ]

            frames = []

            for pf in pf_list:
                frame = {'instruction_addr': pf['instruction_addr']}
                if pf.get('trust') is not None:
                    frame['trust'] = pf['trust']
                frames.append(frame)
                has_frames = True

            stacktraces.append({
                'registers': registers,
                'frames': frames
            })

            processable_stacktraces.append(pf_list)

        if not has_frames:
            return

        rv = run_symbolicator(
            project=self.project,
            request_id_cache_key=request_id_cache_key,
            stacktraces=stacktraces, modules=self.images,
            signal=self.signal
        )

        if not handle_symbolicator_response_status(self.data, rv):
            return

        # TODO(markus): Set signal and os context from symbolicator response,
        # for minidumps

        assert len(self.images) == len(rv['modules']), (self.images, rv)

        for image, complete_image in zip(self.images, rv['modules']):
            merge_symbolicator_image(
                image, complete_image, self.sdk_info,
                lambda e: handle_symbolication_failed(e, data=self.data)
            )

        assert len(stacktraces) == len(rv['stacktraces'])

        for pf_list, symbolicated_stacktrace in zip(
            processable_stacktraces,
            rv['stacktraces']
        ):
            for symbolicated_frame in symbolicated_stacktrace.get('frames') or ():
                pf = pf_list[symbolicated_frame['original_index']]
                pf.data['symbolicator_match'].append(symbolicated_frame)
Ejemplo n.º 6
0
    def run_symbolicator(self, processing_task):
        # TODO(markus): Make this work with minidumps. An unprocessed minidump
        # event will not contain unsymbolicated frames, because the minidump
        # upload already happened in store.
        # It will also presumably not contain images, so `self.available` will
        # already be `False`.

        if not self.available:
            return

        request_id_cache_key = request_id_cache_key_for_event(self.data)

        stacktraces = []
        processable_stacktraces = []
        for stacktrace_info, pf_list in processing_task.iter_processable_stacktraces(
        ):
            registers = stacktrace_info.stacktrace.get('registers') or {}

            # The filtering condition of this list comprehension is copied
            # from `iter_processable_frames`.
            #
            # We cannot reuse `iter_processable_frames` because the
            # symbolicator currently expects a list of stacktraces, not
            # flat frames.
            #
            # Right now we can't even filter out frames (e.g. using a frame
            # cache locally). The stacktraces have to be as complete as
            # possible because the symbolicator assumes the first frame of
            # a stacktrace to be the crashing frame. This assumption is
            # already violated because the SDK might chop off frames though
            # (which is less likely to be the case though).
            pf_list = [pf for pf in reversed(pf_list) if pf.processor == self]

            frames = []

            for pf in pf_list:
                frame = {'instruction_addr': pf['instruction_addr']}
                if pf.get('trust') is not None:
                    frame['trust'] = pf['trust']
                frames.append(frame)

            stacktraces.append({'registers': registers, 'frames': frames})

            processable_stacktraces.append(pf_list)

        rv = run_symbolicator(stacktraces=stacktraces,
                              modules=self.images,
                              project=self.project,
                              arch=self.arch,
                              signal=self.signal,
                              request_id_cache_key=request_id_cache_key)
        if not rv:
            self.data \
                .setdefault('errors', []) \
                .extend(self._handle_symbolication_failed(
                    SymbolicationFailed(type=EventError.NATIVE_SYMBOLICATOR_FAILED)
                ))
            return

        # TODO(markus): Set signal and os context from symbolicator response,
        # for minidumps

        assert len(self.images) == len(rv['modules']), (self.images, rv)

        for image, fetched_debug_file in zip(self.images, rv['modules']):
            status = fetched_debug_file.pop('status')
            # Set image data from symbolicator as symbolicator might know more
            # than the SDK, especially for minidumps
            if fetched_debug_file.get('arch') == 'unknown':
                fetched_debug_file.pop('arch')
            image.update(fetched_debug_file)

            if status in ('found', 'unused'):
                continue
            elif status == 'missing_debug_file':
                error = SymbolicationFailed(
                    type=EventError.NATIVE_MISSING_DSYM)
            elif status == 'malformed_debug_file':
                error = SymbolicationFailed(type=EventError.NATIVE_BAD_DSYM)
            elif status == 'too_large':
                error = SymbolicationFailed(type=EventError.FETCH_TOO_LARGE)
            elif status == 'fetching_failed':
                error = SymbolicationFailed(
                    type=EventError.FETCH_GENERIC_ERROR)
            elif status == 'other':
                error = SymbolicationFailed(type=EventError.UNKNOWN_ERROR)
            else:
                logger.error("Unknown status: %s", status)
                continue

            error.image_arch = image['arch']
            error.image_path = image['code_file']
            error.image_name = image_name(image['code_file'])
            error.image_uuid = image['debug_id']
            self.data.setdefault('errors', []) \
                .extend(self._handle_symbolication_failed(error))

        assert len(stacktraces) == len(rv['stacktraces'])

        for pf_list, symbolicated_stacktrace in zip(processable_stacktraces,
                                                    rv['stacktraces']):
            for symbolicated_frame in symbolicated_stacktrace.get(
                    'frames') or ():
                pf = pf_list[symbolicated_frame['original_index']]
                pf.data['symbolicator_match'].append(symbolicated_frame)