def handle_symbolicator_status(status, image, sdk_info, handle_symbolication_failed): if status in ('found', 'unused'): return elif status in ( 'missing_debug_file', # TODO(markus): Legacy key. Remove after next deploy 'missing'): package = image.get('code_file') if not package or is_known_third_party(package, sdk_info=sdk_info): return if is_optional_package(package, sdk_info=sdk_info): error = SymbolicationFailed( type=EventError.NATIVE_MISSING_OPTIONALLY_BUNDLED_DSYM) else: error = SymbolicationFailed(type=EventError.NATIVE_MISSING_DSYM) elif status in ( 'malformed_debug_file', # TODO(markus): Legacy key. Remove after next deploy 'malformed'): error = SymbolicationFailed(type=EventError.NATIVE_BAD_DSYM) elif status == 'too_large': error = SymbolicationFailed(type=EventError.FETCH_TOO_LARGE) elif status == 'fetching_failed': error = SymbolicationFailed(type=EventError.FETCH_GENERIC_ERROR) elif status == 'other': error = SymbolicationFailed(type=EventError.UNKNOWN_ERROR) else: logger.error("Unknown status: %s", status) return error.image_arch = image.get('arch') error.image_path = image.get('code_file') error.image_name = image_name(image.get('code_file')) error.image_uuid = image.get('debug_id') handle_symbolication_failed(error)
def process_frame(self, processable_frame, processing_task): frame = processable_frame.frame raw_frame = dict(frame) errors = [] # Ensure that package is set in the raw frame, mapped from the # debug_images array in the payload. Grouping and UI can use this path # to infer in_app and exclude frames from grouping. if raw_frame.get('package') is None: obj = processable_frame.data['obj'] raw_frame['package'] = obj and obj.code_file or None if processable_frame.cache_value is None: # Construct a raw frame that is used by the symbolizer # backend. We only assemble the bare minimum we need here. instruction_addr = processable_frame.data['instruction_addr'] debug_id = processable_frame.data['debug_id'] if debug_id is not None: self.difs_referenced.add(debug_id) try: symbolicated_frames = self.sym.symbolize_frame( instruction_addr, self.sdk_info, symbolserver_match=processable_frame. data['symbolserver_match'], symbolicator_match=processable_frame.data.get( 'symbolicator_match'), trust=raw_frame.get('trust'), ) if not symbolicated_frames: if raw_frame.get('trust') == 'scan': return [], [raw_frame], [] else: return None, [raw_frame], [] except SymbolicationFailed as e: errors = [] handle_symbolication_failed(e, data=self.data, errors=errors) return [raw_frame], [raw_frame], errors _ignored = None # Used to be in_app processable_frame.set_cache_value([_ignored, symbolicated_frames]) else: # processable_frame.cache_value is present _ignored, symbolicated_frames = processable_frame.cache_value platform = raw_frame.get('platform') or self.data.get('platform') new_frames = [] for sfrm in symbolicated_frames: new_frame = dict(raw_frame) merge_symbolicated_frame(new_frame, sfrm, platform=platform) new_frames.append(new_frame) return new_frames, [raw_frame], []
def process_frame(self, processable_frame, processing_task): frame = processable_frame.frame raw_frame = dict(frame) errors = [] # Ensure that package is set in the raw frame, mapped from the # debug_images array in the payload. Grouping and UI can use this path # to infer in_app and exclude frames from grouping. if raw_frame.get('package') is None: obj = processable_frame.data['obj'] raw_frame['package'] = obj and obj.code_file or None if processable_frame.cache_value is None: # Construct a raw frame that is used by the symbolizer # backend. We only assemble the bare minimum we need here. instruction_addr = processable_frame.data['instruction_addr'] debug_id = processable_frame.data['debug_id'] if debug_id is not None: self.difs_referenced.add(debug_id) try: symbolicated_frames = self.sym.symbolize_frame( instruction_addr, self.sdk_info, symbolserver_match=processable_frame.data['symbolserver_match'], symbolicator_match=processable_frame.data.get('symbolicator_match'), trust=raw_frame.get('trust'), ) if not symbolicated_frames: if raw_frame.get('trust') == 'scan': return [], [raw_frame], [] else: return None, [raw_frame], [] except SymbolicationFailed as e: errors = [] handle_symbolication_failed(e, data=self.data, errors=errors) return [raw_frame], [raw_frame], errors _ignored = None # Used to be in_app processable_frame.set_cache_value([_ignored, symbolicated_frames]) else: # processable_frame.cache_value is present _ignored, symbolicated_frames = processable_frame.cache_value platform = raw_frame.get('platform') or self.data.get('platform') new_frames = [] for sfrm in symbolicated_frames: new_frame = dict(raw_frame) merge_symbolicated_frame(new_frame, sfrm, platform=platform) new_frames.append(new_frame) return new_frames, [raw_frame], []
def handle_symbolicator_response_status(event_data, response_json): if not response_json: error = SymbolicationFailed(type=EventError.NATIVE_INTERNAL_FAILURE) elif response_json['status'] == 'completed': return True elif response_json['status'] == 'failed': error = SymbolicationFailed(message=response_json.get('message') or None, type=EventError.NATIVE_SYMBOLICATOR_FAILED) else: logger.error('Unexpected symbolicator status: %s', response_json['status']) error = SymbolicationFailed(type=EventError.NATIVE_INTERNAL_FAILURE) handle_symbolication_failed(error, data=event_data)
def reprocess_minidump(data): project = Project.objects.get_from_cache(id=data['project']) minidump_is_reprocessed_cache_key = minidump_reprocessed_cache_key_for_event( data) if default_cache.get(minidump_is_reprocessed_cache_key): return if not _is_symbolicator_enabled(project, data): rv = reprocess_minidump_with_cfi(data) default_cache.set(minidump_is_reprocessed_cache_key, True, 3600) return rv minidump = get_attached_minidump(data) if not minidump: logger.error("Missing minidump for minidump event") return request_id_cache_key = request_id_cache_key_for_event(data) response = run_symbolicator(project=project, request_id_cache_key=request_id_cache_key, create_task=create_minidump_task, minidump=make_buffered_slice_reader( minidump.data, None)) if not response: handle_symbolication_failed( SymbolicationFailed(type=EventError.NATIVE_SYMBOLICATOR_FAILED), data=data, ) default_cache.set(minidump_is_reprocessed_cache_key, True, 3600) return merge_symbolicator_minidump_response(data, response) event_cache_key = cache_key_for_event(data) default_cache.set(event_cache_key, dict(data), 3600) default_cache.set(minidump_is_reprocessed_cache_key, True, 3600) return data
def merge_symbolicator_minidump_response(data, response): sdk_info = get_sdk_from_event(data) # TODO(markus): Add OS context here when `merge_process_state_event` is no # longer called for symbolicator projects images = [] set_path(data, 'debug_meta', 'images', value=images) for complete_image in response['modules']: image = {} merge_symbolicator_image( image, complete_image, sdk_info, lambda e: handle_symbolication_failed(e, data=data) ) images.append(image) data_threads = [] data['threads'] = {'values': data_threads} data_exception = get_path(data, 'exception', 'values', 0) for complete_stacktrace in response['stacktraces']: is_requesting = complete_stacktrace.get('is_requesting') thread_id = complete_stacktrace.get('thread_id') data_thread = { 'id': thread_id, 'crashed': is_requesting, } data_threads.append(data_thread) if is_requesting: data_stacktrace = get_path(data_exception, 'stacktrace') assert isinstance(data_stacktrace, dict), data_stacktrace # Make exemption specifically for unreal portable callstacks # TODO(markus): Allow overriding stacktrace more generically # (without looking into unreal context) once we no longer parse # minidump in the endpoint (right now we can't distinguish that # from user json). if data_stacktrace['frames'] and is_unreal_exception_stacktrace(data): continue del data_stacktrace['frames'][:] else: data_thread['stacktrace'] = data_stacktrace = {'frames': []} if complete_stacktrace.get('registers'): data_stacktrace['registers'] = complete_stacktrace['registers'] for complete_frame in reversed(complete_stacktrace['frames']): new_frame = {} merge_symbolicated_frame(new_frame, complete_frame) data_stacktrace['frames'].append(new_frame)
def merge_symbolicator_minidump_response(data, response): sdk_info = get_sdk_from_event(data) # TODO(markus): Add OS context here when `merge_process_state_event` is no # longer called for symbolicator projects images = [] set_path(data, 'debug_meta', 'images', value=images) for complete_image in response['modules']: image = {} merge_symbolicator_image( image, complete_image, sdk_info, lambda e: handle_symbolication_failed(e, data=data)) images.append(image) data_threads = [] data['threads'] = {'values': data_threads} data_exception = get_path(data, 'exception', 'values', 0) for complete_stacktrace in response['stacktraces']: is_requesting = complete_stacktrace.get('is_requesting') thread_id = complete_stacktrace.get('thread_id') data_thread = { 'id': thread_id, 'crashed': is_requesting, } data_threads.append(data_thread) if is_requesting: data_stacktrace = get_path(data_exception, 'stacktrace') assert isinstance(data_stacktrace, dict), data_stacktrace # Make exemption specifically for unreal portable callstacks # TODO(markus): Allow overriding stacktrace more generically # (without looking into unreal context) once we no longer parse # minidump in the endpoint (right now we can't distinguish that # from user json). if data_stacktrace['frames'] and is_unreal_exception_stacktrace( data): continue data_stacktrace['frames'] = [] else: data_thread['stacktrace'] = data_stacktrace = {'frames': []} if complete_stacktrace.get('registers'): data_stacktrace['registers'] = complete_stacktrace['registers'] for complete_frame in reversed(complete_stacktrace['frames']): new_frame = {} merge_symbolicated_frame(new_frame, complete_frame) data_stacktrace['frames'].append(new_frame)
def run_symbolicator(self, processing_task): # TODO(markus): Make this work with minidumps. An unprocessed minidump # event will not contain unsymbolicated frames, because the minidump # upload already happened in store. # It will also presumably not contain images, so `self.available` will # already be `False`. if not self.available: return request_id_cache_key = request_id_cache_key_for_event(self.data) stacktraces = [] processable_stacktraces = [] has_frames = False for stacktrace_info, pf_list in processing_task.iter_processable_stacktraces( ): registers = stacktrace_info.stacktrace.get('registers') or {} # The filtering condition of this list comprehension is copied # from `iter_processable_frames`. # # We cannot reuse `iter_processable_frames` because the # symbolicator currently expects a list of stacktraces, not # flat frames. # # Right now we can't even filter out frames (e.g. using a frame # cache locally). The stacktraces have to be as complete as # possible because the symbolicator assumes the first frame of # a stacktrace to be the crashing frame. This assumption is # already violated because the SDK might chop off frames though # (which is less likely to be the case though). pf_list = [pf for pf in reversed(pf_list) if pf.processor == self] frames = [] for pf in pf_list: frame = {'instruction_addr': pf['instruction_addr']} if pf.get('trust') is not None: frame['trust'] = pf['trust'] frames.append(frame) has_frames = True stacktraces.append({'registers': registers, 'frames': frames}) processable_stacktraces.append(pf_list) if not has_frames: return rv = run_symbolicator(project=self.project, request_id_cache_key=request_id_cache_key, stacktraces=stacktraces, modules=self.images, signal=self.signal) if not rv: handle_symbolication_failed( SymbolicationFailed( type=EventError.NATIVE_SYMBOLICATOR_FAILED), data=self.data, ) return # TODO(markus): Set signal and os context from symbolicator response, # for minidumps assert len(self.images) == len(rv['modules']), (self.images, rv) for image, complete_image in zip(self.images, rv['modules']): merge_symbolicator_image( image, complete_image, self.sdk_info, lambda e: handle_symbolication_failed(e, data=self.data)) assert len(stacktraces) == len(rv['stacktraces']) for pf_list, symbolicated_stacktrace in zip(processable_stacktraces, rv['stacktraces']): for symbolicated_frame in symbolicated_stacktrace.get( 'frames') or (): pf = pf_list[symbolicated_frame['original_index']] pf.data['symbolicator_match'].append(symbolicated_frame)
def merge_symbolicator_minidump_response(data, response): sdk_info = get_sdk_from_event(data) data['platform'] = 'native' if response.get('crashed') is not None: data['level'] = 'fatal' if response['crashed'] else 'info' if response.get('timestamp'): data['timestamp'] = float(response['timestamp']) if response.get('system_info'): merge_symbolicator_minidump_system_info(data, response['system_info']) images = [] set_path(data, 'debug_meta', 'images', value=images) for complete_image in response['modules']: image = {} merge_symbolicator_image( image, complete_image, sdk_info, lambda e: handle_symbolication_failed(e, data=data)) images.append(image) # Extract the crash reason and infos data_exception = get_path(data, 'exception', 'values', 0) exc_value = ('Assertion Error: %s' % response.get('assertion') if response.get('assertion') else 'Fatal Error: %s' % response.get('crash_reason')) data_exception['value'] = exc_value data_exception['type'] = response.get('crash_reason') data_threads = [] if response['stacktraces']: data['threads'] = {'values': data_threads} else: error = SymbolicationFailed(message='minidump has no thread list', type=EventError.NATIVE_SYMBOLICATOR_FAILED) handle_symbolication_failed(error, data=data) for complete_stacktrace in response['stacktraces']: is_requesting = complete_stacktrace.get('is_requesting') thread_id = complete_stacktrace.get('thread_id') data_thread = { 'id': thread_id, 'crashed': is_requesting, } data_threads.append(data_thread) if is_requesting: data_exception['thread_id'] = thread_id data_stacktrace = data_exception.setdefault('stacktrace', {}) # Make exemption specifically for unreal portable callstacks # TODO(markus): Allow overriding stacktrace more generically # (without looking into unreal context) once we no longer parse # minidump in the endpoint (right now we can't distinguish that # from user json). if data_stacktrace.get( 'frames') and is_unreal_exception_stacktrace(data): continue data_stacktrace['frames'] = [] else: data_thread['stacktrace'] = data_stacktrace = {'frames': []} if complete_stacktrace.get('registers'): data_stacktrace['registers'] = complete_stacktrace['registers'] for complete_frame in reversed(complete_stacktrace['frames']): new_frame = {} merge_symbolicated_frame(new_frame, complete_frame) data_stacktrace['frames'].append(new_frame)
def run_symbolicator(self, processing_task): # TODO(markus): Make this work with minidumps. An unprocessed minidump # event will not contain unsymbolicated frames, because the minidump # upload already happened in store. # It will also presumably not contain images, so `self.available` will # already be `False`. if not self.available: return request_id_cache_key = request_id_cache_key_for_event(self.data) stacktraces = [] processable_stacktraces = [] has_frames = False for stacktrace_info, pf_list in processing_task.iter_processable_stacktraces(): registers = stacktrace_info.stacktrace.get('registers') or {} # The filtering condition of this list comprehension is copied # from `iter_processable_frames`. # # We cannot reuse `iter_processable_frames` because the # symbolicator currently expects a list of stacktraces, not # flat frames. # # Right now we can't even filter out frames (e.g. using a frame # cache locally). The stacktraces have to be as complete as # possible because the symbolicator assumes the first frame of # a stacktrace to be the crashing frame. This assumption is # already violated because the SDK might chop off frames though # (which is less likely to be the case though). pf_list = [ pf for pf in reversed(pf_list) if pf.processor == self ] frames = [] for pf in pf_list: frame = {'instruction_addr': pf['instruction_addr']} if pf.get('trust') is not None: frame['trust'] = pf['trust'] frames.append(frame) has_frames = True stacktraces.append({ 'registers': registers, 'frames': frames }) processable_stacktraces.append(pf_list) if not has_frames: return rv = run_symbolicator( project=self.project, request_id_cache_key=request_id_cache_key, stacktraces=stacktraces, modules=self.images, signal=self.signal ) if not handle_symbolicator_response_status(self.data, rv): return # TODO(markus): Set signal and os context from symbolicator response, # for minidumps assert len(self.images) == len(rv['modules']), (self.images, rv) for image, complete_image in zip(self.images, rv['modules']): merge_symbolicator_image( image, complete_image, self.sdk_info, lambda e: handle_symbolication_failed(e, data=self.data) ) assert len(stacktraces) == len(rv['stacktraces']) for pf_list, symbolicated_stacktrace in zip( processable_stacktraces, rv['stacktraces'] ): for symbolicated_frame in symbolicated_stacktrace.get('frames') or (): pf = pf_list[symbolicated_frame['original_index']] pf.data['symbolicator_match'].append(symbolicated_frame)