Exemplo n.º 1
0
def handle_symbolicator_response_status(event_data, response_json):
    if not response_json:
        error = SymbolicationFailed(type=EventError.NATIVE_INTERNAL_FAILURE)
    elif response_json['status'] == 'completed':
        return True
    elif response_json['status'] == 'failed':
        error = SymbolicationFailed(message=response_json.get('message')
                                    or None,
                                    type=EventError.NATIVE_SYMBOLICATOR_FAILED)
    else:
        logger.error('Unexpected symbolicator status: %s',
                     response_json['status'])
        error = SymbolicationFailed(type=EventError.NATIVE_INTERNAL_FAILURE)

    handle_symbolication_failed(error, data=event_data)
Exemplo n.º 2
0
def handle_symbolicator_status(status, image, sdk_info,
                               handle_symbolication_failed):
    if status in ('found', 'unused'):
        return
    elif status in (
            'missing_debug_file',  # TODO(markus): Legacy key. Remove after next deploy
            'missing'):
        package = image.get('code_file')
        if not package or is_known_third_party(package, sdk_info=sdk_info):
            return

        if is_optional_package(package, sdk_info=sdk_info):
            error = SymbolicationFailed(
                type=EventError.NATIVE_MISSING_OPTIONALLY_BUNDLED_DSYM)
        else:
            error = SymbolicationFailed(type=EventError.NATIVE_MISSING_DSYM)
    elif status in (
            'malformed_debug_file',  # TODO(markus): Legacy key. Remove after next deploy
            'malformed'):
        error = SymbolicationFailed(type=EventError.NATIVE_BAD_DSYM)
    elif status == 'too_large':
        error = SymbolicationFailed(type=EventError.FETCH_TOO_LARGE)
    elif status == 'fetching_failed':
        error = SymbolicationFailed(type=EventError.FETCH_GENERIC_ERROR)
    elif status == 'other':
        error = SymbolicationFailed(type=EventError.UNKNOWN_ERROR)
    else:
        logger.error("Unknown status: %s", status)
        return

    error.image_arch = image.get('arch')
    error.image_path = image.get('code_file')
    error.image_name = image_name(image.get('code_file'))
    error.image_uuid = image.get('debug_id')
    handle_symbolication_failed(error)
Exemplo n.º 3
0
def reprocess_minidump(data):
    project = Project.objects.get_from_cache(id=data['project'])

    minidump_is_reprocessed_cache_key = minidump_reprocessed_cache_key_for_event(
        data)
    if default_cache.get(minidump_is_reprocessed_cache_key):
        return

    if not _is_symbolicator_enabled(project, data):
        rv = reprocess_minidump_with_cfi(data)
        default_cache.set(minidump_is_reprocessed_cache_key, True, 3600)
        return rv

    minidump = get_attached_minidump(data)

    if not minidump:
        logger.error("Missing minidump for minidump event")
        return

    request_id_cache_key = request_id_cache_key_for_event(data)

    response = run_symbolicator(project=project,
                                request_id_cache_key=request_id_cache_key,
                                create_task=create_minidump_task,
                                minidump=make_buffered_slice_reader(
                                    minidump.data, None))

    if not response:
        handle_symbolication_failed(
            SymbolicationFailed(type=EventError.NATIVE_SYMBOLICATOR_FAILED),
            data=data,
        )
        default_cache.set(minidump_is_reprocessed_cache_key, True, 3600)
        return

    merge_symbolicator_minidump_response(data, response)

    event_cache_key = cache_key_for_event(data)
    default_cache.set(event_cache_key, dict(data), 3600)
    default_cache.set(minidump_is_reprocessed_cache_key, True, 3600)

    return data
Exemplo n.º 4
0
    def run_symbolicator(self, processing_task):
        # TODO(markus): Make this work with minidumps. An unprocessed minidump
        # event will not contain unsymbolicated frames, because the minidump
        # upload already happened in store.
        # It will also presumably not contain images, so `self.available` will
        # already be `False`.

        if not self.available:
            return

        request_id_cache_key = request_id_cache_key_for_event(self.data)

        stacktraces = []
        processable_stacktraces = []
        has_frames = False

        for stacktrace_info, pf_list in processing_task.iter_processable_stacktraces(
        ):
            registers = stacktrace_info.stacktrace.get('registers') or {}

            # The filtering condition of this list comprehension is copied
            # from `iter_processable_frames`.
            #
            # We cannot reuse `iter_processable_frames` because the
            # symbolicator currently expects a list of stacktraces, not
            # flat frames.
            #
            # Right now we can't even filter out frames (e.g. using a frame
            # cache locally). The stacktraces have to be as complete as
            # possible because the symbolicator assumes the first frame of
            # a stacktrace to be the crashing frame. This assumption is
            # already violated because the SDK might chop off frames though
            # (which is less likely to be the case though).
            pf_list = [pf for pf in reversed(pf_list) if pf.processor == self]

            frames = []

            for pf in pf_list:
                frame = {'instruction_addr': pf['instruction_addr']}
                if pf.get('trust') is not None:
                    frame['trust'] = pf['trust']
                frames.append(frame)
                has_frames = True

            stacktraces.append({'registers': registers, 'frames': frames})

            processable_stacktraces.append(pf_list)

        if not has_frames:
            return

        rv = run_symbolicator(project=self.project,
                              request_id_cache_key=request_id_cache_key,
                              stacktraces=stacktraces,
                              modules=self.images,
                              signal=self.signal)

        if not rv:
            handle_symbolication_failed(
                SymbolicationFailed(
                    type=EventError.NATIVE_SYMBOLICATOR_FAILED),
                data=self.data,
            )
            return

        # TODO(markus): Set signal and os context from symbolicator response,
        # for minidumps

        assert len(self.images) == len(rv['modules']), (self.images, rv)

        for image, complete_image in zip(self.images, rv['modules']):
            merge_symbolicator_image(
                image, complete_image, self.sdk_info,
                lambda e: handle_symbolication_failed(e, data=self.data))

        assert len(stacktraces) == len(rv['stacktraces'])

        for pf_list, symbolicated_stacktrace in zip(processable_stacktraces,
                                                    rv['stacktraces']):
            for symbolicated_frame in symbolicated_stacktrace.get(
                    'frames') or ():
                pf = pf_list[symbolicated_frame['original_index']]
                pf.data['symbolicator_match'].append(symbolicated_frame)
Exemplo n.º 5
0
def merge_symbolicator_minidump_response(data, response):
    sdk_info = get_sdk_from_event(data)

    data['platform'] = 'native'
    if response.get('crashed') is not None:
        data['level'] = 'fatal' if response['crashed'] else 'info'

    if response.get('timestamp'):
        data['timestamp'] = float(response['timestamp'])

    if response.get('system_info'):
        merge_symbolicator_minidump_system_info(data, response['system_info'])

    images = []
    set_path(data, 'debug_meta', 'images', value=images)

    for complete_image in response['modules']:
        image = {}
        merge_symbolicator_image(
            image, complete_image, sdk_info,
            lambda e: handle_symbolication_failed(e, data=data))
        images.append(image)

    # Extract the crash reason and infos
    data_exception = get_path(data, 'exception', 'values', 0)
    exc_value = ('Assertion Error: %s' % response.get('assertion')
                 if response.get('assertion') else 'Fatal Error: %s' %
                 response.get('crash_reason'))
    data_exception['value'] = exc_value
    data_exception['type'] = response.get('crash_reason')

    data_threads = []
    if response['stacktraces']:
        data['threads'] = {'values': data_threads}
    else:
        error = SymbolicationFailed(message='minidump has no thread list',
                                    type=EventError.NATIVE_SYMBOLICATOR_FAILED)
        handle_symbolication_failed(error, data=data)

    for complete_stacktrace in response['stacktraces']:
        is_requesting = complete_stacktrace.get('is_requesting')
        thread_id = complete_stacktrace.get('thread_id')

        data_thread = {
            'id': thread_id,
            'crashed': is_requesting,
        }
        data_threads.append(data_thread)

        if is_requesting:
            data_exception['thread_id'] = thread_id
            data_stacktrace = data_exception.setdefault('stacktrace', {})
            # Make exemption specifically for unreal portable callstacks
            # TODO(markus): Allow overriding stacktrace more generically
            # (without looking into unreal context) once we no longer parse
            # minidump in the endpoint (right now we can't distinguish that
            # from user json).
            if data_stacktrace.get(
                    'frames') and is_unreal_exception_stacktrace(data):
                continue
            data_stacktrace['frames'] = []
        else:
            data_thread['stacktrace'] = data_stacktrace = {'frames': []}

        if complete_stacktrace.get('registers'):
            data_stacktrace['registers'] = complete_stacktrace['registers']

        for complete_frame in reversed(complete_stacktrace['frames']):
            new_frame = {}
            merge_symbolicated_frame(new_frame, complete_frame)
            data_stacktrace['frames'].append(new_frame)
Exemplo n.º 6
0
    def run_symbolicator(self, processing_task):
        # TODO(markus): Make this work with minidumps. An unprocessed minidump
        # event will not contain unsymbolicated frames, because the minidump
        # upload already happened in store.
        # It will also presumably not contain images, so `self.available` will
        # already be `False`.

        if not self.available:
            return

        request_id_cache_key = request_id_cache_key_for_event(self.data)

        stacktraces = []
        processable_stacktraces = []
        for stacktrace_info, pf_list in processing_task.iter_processable_stacktraces(
        ):
            registers = stacktrace_info.stacktrace.get('registers') or {}

            # The filtering condition of this list comprehension is copied
            # from `iter_processable_frames`.
            #
            # We cannot reuse `iter_processable_frames` because the
            # symbolicator currently expects a list of stacktraces, not
            # flat frames.
            #
            # Right now we can't even filter out frames (e.g. using a frame
            # cache locally). The stacktraces have to be as complete as
            # possible because the symbolicator assumes the first frame of
            # a stacktrace to be the crashing frame. This assumption is
            # already violated because the SDK might chop off frames though
            # (which is less likely to be the case though).
            pf_list = [pf for pf in reversed(pf_list) if pf.processor == self]

            frames = []

            for pf in pf_list:
                frame = {'instruction_addr': pf['instruction_addr']}
                if pf.get('trust') is not None:
                    frame['trust'] = pf['trust']
                frames.append(frame)

            stacktraces.append({'registers': registers, 'frames': frames})

            processable_stacktraces.append(pf_list)

        rv = run_symbolicator(stacktraces=stacktraces,
                              modules=self.images,
                              project=self.project,
                              arch=self.arch,
                              signal=self.signal,
                              request_id_cache_key=request_id_cache_key)
        if not rv:
            self.data \
                .setdefault('errors', []) \
                .extend(self._handle_symbolication_failed(
                    SymbolicationFailed(type=EventError.NATIVE_SYMBOLICATOR_FAILED)
                ))
            return

        # TODO(markus): Set signal and os context from symbolicator response,
        # for minidumps

        assert len(self.images) == len(rv['modules']), (self.images, rv)

        for image, fetched_debug_file in zip(self.images, rv['modules']):
            status = fetched_debug_file.pop('status')
            # Set image data from symbolicator as symbolicator might know more
            # than the SDK, especially for minidumps
            if fetched_debug_file.get('arch') == 'unknown':
                fetched_debug_file.pop('arch')
            image.update(fetched_debug_file)

            if status in ('found', 'unused'):
                continue
            elif status == 'missing_debug_file':
                error = SymbolicationFailed(
                    type=EventError.NATIVE_MISSING_DSYM)
            elif status == 'malformed_debug_file':
                error = SymbolicationFailed(type=EventError.NATIVE_BAD_DSYM)
            elif status == 'too_large':
                error = SymbolicationFailed(type=EventError.FETCH_TOO_LARGE)
            elif status == 'fetching_failed':
                error = SymbolicationFailed(
                    type=EventError.FETCH_GENERIC_ERROR)
            elif status == 'other':
                error = SymbolicationFailed(type=EventError.UNKNOWN_ERROR)
            else:
                logger.error("Unknown status: %s", status)
                continue

            error.image_arch = image['arch']
            error.image_path = image['code_file']
            error.image_name = image_name(image['code_file'])
            error.image_uuid = image['debug_id']
            self.data.setdefault('errors', []) \
                .extend(self._handle_symbolication_failed(error))

        assert len(stacktraces) == len(rv['stacktraces'])

        for pf_list, symbolicated_stacktrace in zip(processable_stacktraces,
                                                    rv['stacktraces']):
            for symbolicated_frame in symbolicated_stacktrace.get(
                    'frames') or ():
                pf = pf_list[symbolicated_frame['original_index']]
                pf.data['symbolicator_match'].append(symbolicated_frame)