def _pwrlevel_events_handler(self): """Handler function for GPU Pwr Level events""" self._pwrlevel_intervals_by_device = defaultdict(IntervalList) for device, events in self._pwrlevel_events_by_device.iteritems(): for pwr_a, pwr_b in zip(events, events[1:]): interval = Interval(pwr_a.timestamp, pwr_b.timestamp) freq_interval = FreqInterval(device=device, frequency=pwr_a.data.freq, pwrlevel=pwr_a.data.pwrlevel, interval=interval, ) self._pwrlevel_intervals_by_device[device].append(freq_interval) # again, we need some closure. if events: self._pwrlevel_intervals_by_device[device].append(FreqInterval( device=device, frequency=pwr_b.data.freq, pwrlevel=pwr_b.data.pwrlevel, interval=Interval( pwr_b.timestamp, self._trace.duration ) ) ) return self._pwrlevel_intervals_by_device
def _buslevel_events_handler(self): """Handler function for GPU Bus Level events""" self._buslevel_intervals_by_device = defaultdict(IntervalList) for device, events in self._buslevel_events_by_device.iteritems(): for bus_a, bus_b in zip(events, events[1:]): interval = Interval(bus_a.timestamp, bus_b.timestamp) bus_interval = BusLevelInterval(device=device, pwrlevel=bus_a.data.pwrlevel, bus=bus_a.data.bus, interval=interval, ) self._buslevel_intervals_by_device[device].append(bus_interval) # again, we need some closure. if events: self._buslevel_intervals_by_device[device].append(BusLevelInterval( device=device, pwrlevel=bus_b.data.pwrlevel, bus=bus_b.data.bus, interval=Interval( bus_b.timestamp, self._trace.duration ) ) ) return self._buslevel_intervals_by_device
def _pwrstate_events_handler(self): """Handler function for GPU Pwr State events""" self._pwrstate_intervals_by_device = defaultdict(IntervalList) for device, events in self._pwrstate_events_by_device.iteritems(): for pwr_a, pwr_b in zip(events, events[1:]): interval = Interval(pwr_a.timestamp, pwr_b.timestamp) state = BusyState.map(pwr_a.data.state) or BusyState.UNKNOWN idle_interval = IdleInterval(device=device, state=state, interval=interval, ) self._pwrstate_intervals_by_device[device].append(idle_interval) # again, we need some closure. if events: state = BusyState.map(pwr_b.data.state) or BusyState.UNKNOWN self._pwrstate_intervals_by_device[device].append(IdleInterval( device=device, state=state, interval=Interval( pwr_b.timestamp, self._trace.duration ) ) ) return self._pwrstate_intervals_by_device
def _clk_events_handler(self): """Handler function for clock enable/disable events""" self._clk_intervals_by_clock = defaultdict(IntervalList) for clock, events in self._clk_events_by_clock.iteritems(): last_timestamp = self._trace.interval.start for clk_event in events: # Yes, keep inverted as we track when change occurs state = ClockState.DISABLED if \ clk_event.tracepoint == 'clk_disable' \ else ClockState.ENABLED interval = Interval(last_timestamp, clk_event.timestamp) clk_interval = ClkInterval(clock, state, interval) self._clk_intervals_by_clock[clock].append(clk_interval) last_timestamp = clk_event.timestamp # again, we need some closure. if events: # not a change, so leave as is. state = ClockState.ENABLED if \ clk_event.tracepoint == 'clk_enable' \ else ClockState.DISABLED end_interval = ClkInterval( clock=clock, state=state, interval=Interval(clk_event.timestamp, self._trace.duration ) ) self._clk_intervals_by_clock[clock].append(end_interval) return self._clk_intervals_by_clock
def _cluster_idle_events_handler(self): """Handler function for Cluster Idle events""" self._cluster_idle_intervals_by_cluster = defaultdict(IntervalList) for cluster, events in self._cluster_idle_events_by_cluster.iteritems( ): last_event = None for cluster_idle in events: tp = cluster_idle.tracepoint if tp == 'cluster_exit': interval = Interval( last_event.timestamp if last_event else self._trace.interval.start, cluster_idle.timestamp) idle_interval = IdleInterval( cluster=cluster, state=BusyState.IDLE, idx=cluster_idle.data.idx, interval=interval, ) self._cluster_idle_intervals_by_cluster[cluster].append( idle_interval) elif tp == 'cluster_enter': interval = Interval( last_event.timestamp if last_event else self._trace.interval.start, cluster_idle.timestamp) idle_interval = IdleInterval( cluster=cluster, state=BusyState.BUSY, idx=-1, interval=interval, ) self._cluster_idle_intervals_by_cluster[cluster].append( idle_interval) last_event = cluster_idle # again, we need some closure. if last_event: if last_event.tracepoint == 'cluster_exit': state = BusyState.BUSY idx = -1 else: state = BusyState.IDLE idx = last_event.data.idx self._cluster_idle_intervals_by_cluster[cluster].append( IdleInterval(cluster=cluster, state=state, idx=idx, interval=Interval(last_event.timestamp, self._trace.duration))) return self._cluster_idle_intervals_by_cluster
def _switch_device_latency_handler(self): """ Returns list of all input events """ self._switch_latencies = IntervalList() deliver_inputs = self._trace.android.event_intervals( "deliverInputEvent") def _preview_intervals(): """ Generator that yields intervals when still images are captured """ last_timestamp = self._trace.interval.start sp_events = self._trace.android.event_intervals( 'doStopPreviewSync') if not sp_events: preview_events = self._trace.android.event_intervals( 'AndroidCamera.startPreview') if preview_events: camera_task = preview_events[0].event.task sp_events = (context for context in \ self._trace.android.event_intervals('disconnect') \ if context.event.task.pid == camera_task.pid) for sp_event in sp_events: yield Interval(last_timestamp, sp_event.interval.start) last_timestamp = sp_event.interval.start for interval in _preview_intervals(): touch_events = deliver_inputs.slice(interval=interval, trimmed=False) if touch_events: start_ts = touch_events[-1].interval.start end_ts = start_ts post_touch_interval = Interval(start_ts, self._trace.duration) si_events = self._trace.android.event_intervals( name='StartPreviewThread', interval=post_touch_interval) if not si_events: si_events = self._trace.android.event_intervals( name='AndroidCamera.startPreview', interval=post_touch_interval) if si_events: end_ts = si_events[0].interval.end shutter_lag_interval = Interval(start=start_ts, end=end_ts) self._switch_latencies.append( CameraLatency("Camera Switch", interval=shutter_lag_interval, latency=shutter_lag_interval.duration)) return self._switch_latencies
def _async_event_handler(self): """ TODO: Track by cookie. This is rarely used!!! """ last_timestamp = self._trace.interval.start last_event = None # Stack them like Jason (JSON) 'PID', then 'cookie' counter_events_by_cookie = defaultdict(EventList) counter_events_by_pid = defaultdict(lambda: counter_events_by_cookie) try: while True: event = (yield) pid, cookie = event.data.pid, event.data.cookie tag = event.data.atrace_tag event_list = counter_events_by_pid[pid][cookie] if tag is AtraceTag.ASYNC_BEGIN: event_list.append(event) elif tag is AtraceTag.ASYNC_END and event_list: last_event = event_list.pop() last_timestamp = last_event.timestamp interval = Interval(last_timestamp, event.timestamp) context = Context(pid=pid, name=last_event.data.section_name, interval=interval, event=last_event) self._tmw_intervals_by_name[context.name].append(context) else: log.warn( "Missing start marker {event}".format(event=event)) except GeneratorExit: # close things off for pid, by_name in counter_events_by_pid.iteritems(): for cookie, event_list in by_name.iteritems(): for event in event_list: last_timestamp = event.timestamp interval = Interval(last_timestamp, self._trace.duration) context = Context(pid=pid, name=event.data.section_name, interval=interval, event=event) self._tmw_intervals_by_name[context.name].append( context)
def _still_capture_intervals(): """ Generator that yields intervals when still images are captured """ last_timestamp = self._trace.interval.start for tp_event in self._trace.android.event_intervals( 'doTakePictureAsync'): yield Interval(last_timestamp, tp_event.interval.start) last_timestamp = tp_event.interval.start
def _start_launch_time(self, launched_event): """ Start time estimated as first time we ever saw (i.e. scheduled on CPU) the launched task. """ if launched_event: interval = Interval(0, launched_event.timestamp) return self._trace.cpu.task_intervals( task=launched_event.task, interval=interval)[0].interval.start
def _counter_handler(self): """ """ last_timestamp = self._trace.interval.start last_value = -1.0 last_event = None # Stack them like Jason (JSON) 'PID', then 'Counter name' counter_events_by_name = defaultdict(EventList) counter_events_by_pid = defaultdict(lambda: counter_events_by_name) try: while True: event = (yield) pid = event.data.pid counter_name = event.data.counter_name event_list = counter_events_by_pid[pid][counter_name] if event_list: last_event = event_list.pop() last_timestamp = last_event.timestamp last_value = last_event.data.value event_list.append(event) interval = Interval(last_timestamp, event.timestamp) counter = Counter(pid=pid, name=counter_name, event=last_event, value=last_value, interval=interval) self._tmw_intervals_by_name[counter.name].append(counter) except GeneratorExit: # close things off for pid, by_name in counter_events_by_pid.iteritems(): for counter_name, event_list in by_name.iteritems(): for event in event_list: last_timestamp = event.timestamp last_value = event.data.value interval = Interval(last_timestamp, self._trace.duration) counter = Counter(pid=pid, name=counter_name, event=event, value=last_value, interval=interval) self._tmw_intervals_by_name[counter.name].append( counter)
def _context_handler(self): """ """ last_timestamp = self._trace.interval.start last_event = None counter_events_by_pid = defaultdict(EventList) try: while True: event = (yield) pid = event.task.pid tag = event.data.atrace_tag if tag is AtraceTag.CONTEXT_BEGIN: counter_events_by_pid[pid].append(event) elif tag is AtraceTag.CONTEXT_END and counter_events_by_pid[ pid]: last_event = counter_events_by_pid[pid].pop() last_timestamp = last_event.timestamp last_pid, last_name = \ last_event.data.pid, last_event.data.section_name interval = Interval(last_timestamp, event.timestamp) context = Context(pid=last_pid, name=last_name, interval=interval, event=last_event) self._tmw_intervals_by_name[last_name].append(context) else: log.warn( "Missing start marker {event}".format(event=event)) except GeneratorExit: # close things off for pid, event_list in counter_events_by_pid.iteritems(): for event in event_list: last_timestamp = event.timestamp interval = Interval(last_timestamp, self._trace.duration) if event.data.atrace_tag is not AtraceTag.CONTEXT_END: pid, name = event.data.pid, event.data.section_name context = Context(pid=pid, name=name, interval=interval, event=event) self._tmw_intervals_by_name[name].append(context)
def _freq_events_handler(self): """Handler function for clock frequency events""" self._freq_intervals_by_clock = defaultdict(IntervalList) for clock, events in self._freq_events_by_clock.iteritems(): last_rate, last_timestamp = -1.0, self._trace.interval.start for freq_event in events: interval = Interval(last_timestamp, freq_event.timestamp) freq_interval = FreqInterval(clock, last_rate, interval) self._freq_intervals_by_clock[clock].append(freq_interval) last_rate = freq_event.data.state last_timestamp = freq_event.timestamp # again, we need some closure. end_interval = FreqInterval(clock=clock, frequency=last_rate, interval=Interval( last_timestamp, self._trace.duration)) self._freq_intervals_by_clock[clock].append(end_interval) return self._freq_intervals_by_clock
def rendering_intervals(self, interval=None): """ """ frames = self.frame_intervals(interval=interval) rendering_intervals = IntervalList() slice_start = frames[0].interval.start for i, j in zip(frames[:-1], frames[1:]): if j.interval.start - i.interval.end > 2 * VSYNC: # new group of frames. ri = Rendering(interval=Interval(slice_start, i.interval.end)) rendering_intervals.append(ri) slice_start = j.interval.start return rendering_intervals
def _shutter_lag_latency_handler(self): """ Returns list of all input events """ self._shutter_lag_latencies = IntervalList() deliver_inputs = self._trace.android.event_intervals( "deliverInputEvent") def _still_capture_intervals(): """ Generator that yields intervals when still images are captured """ last_timestamp = self._trace.interval.start for tp_event in self._trace.android.event_intervals( 'doTakePictureAsync'): yield Interval(last_timestamp, tp_event.interval.start) last_timestamp = tp_event.interval.start for interval in _still_capture_intervals(): touch_events = deliver_inputs.slice(interval=interval, trimmed=False) # Necessary as we may be interested in different IRQ name if touch_events: # Use last input event within this interval start_ts = touch_events[-1].interval.start end_ts = start_ts post_touch_interval = Interval(interval.end, self._trace.duration) si_events = self._trace.android.event_intervals( name='storeImage', interval=post_touch_interval) if si_events: end_ts = si_events[0].interval.end shutter_lag_interval = Interval(start=start_ts, end=end_ts) self._shutter_lag_latencies.append( CameraLatency("Shutter Lag", interval=shutter_lag_interval, latency=shutter_lag_interval.duration)) return self._shutter_lag_latencies
def _bur_events_handler(self): """Handler function for bus update request events""" self._bur_intervals_by_dev = defaultdict(IntervalList) for device, events in self._bur_events_by_dev.iteritems(): last_event = None for bur_event in events: try: interval = \ Interval(last_event.timestamp if last_event else self._trace.interval.start, bur_event.timestamp) except ValueError, e: raise e state = \ BusState.BUSY if (bur_event.data.active or \ bur_event.data.ib != 0) else BusState.IDLE bur_interval = BusRequestInterval( device=device, state=state, master_slave=(bur_event.data.src, bur_event.data.dest), average_bw_GBps=(bur_event.data.ab / 1e9), instantaneous_bw_GBps=(bur_event.data.ib / 1e9), interval=interval, ) self._bur_intervals_by_dev[device].append(bur_interval) last_event = bur_event # again, we need some closure. if last_event: state = BusState.BUSY if last_event.data.active else BusState.IDLE self._bur_intervals_by_dev[device].append( BusRequestInterval( device=device, state=state, master_slave=(last_event.data.src, last_event.data.dest), average_bw_GBps=(last_event.data.ib / 1e9), instantaneous_bw_GBps=(last_event.data.ib / 1e9), interval=Interval(last_event.timestamp, self._trace.duration)))
def _thermal_events_handler(self): """Handler function for thermal events""" self._thermal_intervals_by_tsens = defaultdict(IntervalList) for tsens, events in self._thermal_events_by_tsens.iteritems(): last_temp, last_timestamp, last_threshold = -1.0, 0.0, False current_threshold = False for thermal_event in events: current_temp, tracepoint = thermal_event.data.temp, \ thermal_event.tracepoint if tracepoint == 'tsens_threshold_hit': current_threshold = True elif tracepoint == 'tsens_threshold_clear': current_threshold = False # update when temperature or threshold state changes. if current_temp != last_temp or \ current_threshold != last_threshold: interval = Interval(last_timestamp, thermal_event.timestamp) thermal_interval = ThermalInterval(tsens, last_temp, interval, last_threshold) self._thermal_intervals_by_tsens[tsens].append( thermal_interval) last_temp = current_temp last_timestamp = thermal_event.timestamp last_threshold = current_threshold # again, we need some closure. end_interval = ThermalInterval( tsens=tsens, temp=last_temp, interval=Interval(last_timestamp, self._trace.duration), mitigated=last_threshold, ) self._thermal_intervals_by_tsens[tsens].append(end_interval) return self._thermal_intervals_by_tsens
def _end_launch_time(self, launched_event, next_launched_event=None): """ End time estimated as last `performTraversals`(screen update) that caused a `setTransactionState`. setTransactionState() is invoked to inform SurfaceFlinger state of changes of the surface; changes can be layer_state_t and Display_state (see native/include/private/gui/LayerState.h). layer_state_t indicates changes in position/color/depth/size/alpha/crop etc Display_state indicates changes in orientation, etc """ end_time = None max_end_time = self._start_launch_time(next_launched_event) \ if next_launched_event else self._trace.duration # after launch pl_interval = Interval(launched_event.timestamp, max_end_time) performTraversals = self.event_intervals(name=UI_THREAD_DRAW_NAMES, task=launched_event.task, interval=pl_interval, match_exact=False) last_end = max_end_time for pt_event in reversed(performTraversals): sts_interval = Interval(pt_event.interval.start, last_end) sts_events = self.event_intervals(name='setTransactionState', interval=sts_interval) # ignore 'setTransactionState' due to app close/focus switch # by checking 'wmUpdateFocus' wmuf_events = self.event_intervals(name='wmUpdateFocus', interval=sts_interval) if sts_events and not wmuf_events and sts_interval.end != max_end_time: end_time = sts_interval.end break last_end = pt_event.interval.start return end_time
def closure(dict_to_use, dest_dict): for sector, event_list in dict_to_use.iteritems(): for event in event_list: last_timestamp = event.timestamp io_type=event.data.rwbs.io_type device = (event.data.dev_major, event.data.dev_minor) interval = Interval(last_timestamp, self._trace.duration) block_io = IOInterval(io_type=io_type, task=event.task, device=device, sector=sector, errors=event.data.errors, numSectors=event.data.nr_sector, interval=interval, commands=event.data.rwbs.commands) dest_dict[io_type].append(block_io)
def _preview_intervals(): """ Generator that yields intervals when still images are captured """ last_timestamp = self._trace.interval.start sp_events = self._trace.android.event_intervals( 'doStopPreviewSync') if not sp_events: preview_events = self._trace.android.event_intervals( 'AndroidCamera.startPreview') if preview_events: camera_task = preview_events[0].event.task sp_events = (context for context in \ self._trace.android.event_intervals('disconnect') \ if context.event.task.pid == camera_task.pid) for sp_event in sp_events: yield Interval(last_timestamp, sp_event.interval.start) last_timestamp = sp_event.interval.start
def app_launch_latencies(self, task=None): """Return launch latency seen in trace""" launch_latencies = [] launched_events = list(self.launched_app_events()) launched_events.append(None) for curr_app_event, next_app_event in zip(launched_events, launched_events[1:]): event = curr_app_event.event next_event = next_app_event.event if next_app_event else None if task and event.task != task: continue start_time, end_time = \ self._start_launch_time(event), self._end_launch_time(event, next_event) if (start_time and end_time) is not None: launch_interval = Interval(start_time, end_time) launch_latencies.append( LaunchLatency(task=event.task, interval=launch_interval, latency=launch_interval.duration)) return launch_latencies
def _input_intervals(): """ Generator that yields intervals when discrete input event(s) are read & decoded by Android `Input Reader`. x__x__x____IR___ID_ID_ID___DI_SU__DI_SU__DI_SU______ x = multiple input IRQs (multi-touch translated by Android Input Framework) IR = Input Reader [read/decodes multiple events @ once] ID = Input Dispatch [dispatches each input event] DI = Deliver Input [ appropriate window consumes input event ] SU = SurfaceFlinger Screen Update due to window handling input event Please note InputReader 'iq' will be set to 1 whenever InputReader had event to process. This could be disabled in some systems. """ last_timestamp = self._trace.interval.start for ir_event in filter_by_task(all_tasks, 'name', 'InputReader', 'any'): yield Interval(last_timestamp, ir_event.interval.end) last_timestamp = ir_event.interval.end
def _block_handler(self): """ """ last_timestamp = self._trace.interval.start last_event = None block_issue_events_by_sector = defaultdict(list) block_insert_events_by_sector = defaultdict(list) try: while True: event = (yield) tracepoint = event.tracepoint sector = event.data.sector if tracepoint == 'block_rq_issue': block_issue_events_by_sector[sector].append(event) elif tracepoint == 'block_rq_insert': block_insert_events_by_sector[sector].append(event) elif tracepoint == 'block_rq_complete': # TODO: [CHUK] validate this, currently assuming # each block i/o request per sector is serially queued. # This is true for simple trace I have but maynot always hold. if block_issue_events_by_sector[sector]: last_event = block_issue_events_by_sector[sector].pop() last_timestamp = last_event.timestamp io_type=event.data.rwbs.io_type device = (event.data.dev_major, event.data.dev_minor) interval = Interval(last_timestamp, event.timestamp) block_io = IOInterval(io_type=io_type, task=last_event.task, device=device, sector=sector, errors=event.data.errors, numSectors=event.data.nr_sector, interval=interval, commands=event.data.rwbs.commands) self._io_issue_intervals_by_op[io_type].append(block_io) if block_insert_events_by_sector[sector]: last_event = block_insert_events_by_sector[sector].pop() last_timestamp = last_event.timestamp io_type=event.data.rwbs.io_type device = (event.data.dev_major, event.data.dev_minor) interval = Interval(last_timestamp, event.timestamp) block_io = IOInterval(io_type=io_type, task=last_event.task, device=device, sector=sector, errors=event.data.errors, numSectors=event.data.nr_sector, interval=interval, commands=event.data.rwbs.commands) self._io_insert_intervals_by_op[io_type].append(block_io) else: log.warn("Missing issue marker {event}".format(event=event)) except GeneratorExit: # close things off def closure(dict_to_use, dest_dict): for sector, event_list in dict_to_use.iteritems(): for event in event_list: last_timestamp = event.timestamp io_type=event.data.rwbs.io_type device = (event.data.dev_major, event.data.dev_minor) interval = Interval(last_timestamp, self._trace.duration) block_io = IOInterval(io_type=io_type, task=event.task, device=device, sector=sector, errors=event.data.errors, numSectors=event.data.nr_sector, interval=interval, commands=event.data.rwbs.commands) dest_dict[io_type].append(block_io) closure(block_issue_events_by_sector, self._io_issue_intervals_by_op) closure(block_insert_events_by_sector, self._io_insert_intervals_by_op)
def _input_latency_handler(self, irq_name): """ Returns list of all input events """ self._input_latencies = IntervalList() all_tasks = self._trace.cpu.task_intervals() all_aq_events = self.input_events() touch_irqs = IntervalList( filter_by_task(all_tasks, 'name', irq_name, 'any')) def _input_intervals(): """ Generator that yields intervals when discrete input event(s) are read & decoded by Android `Input Reader`. x__x__x____IR___ID_ID_ID___DI_SU__DI_SU__DI_SU______ x = multiple input IRQs (multi-touch translated by Android Input Framework) IR = Input Reader [read/decodes multiple events @ once] ID = Input Dispatch [dispatches each input event] DI = Deliver Input [ appropriate window consumes input event ] SU = SurfaceFlinger Screen Update due to window handling input event Please note InputReader 'iq' will be set to 1 whenever InputReader had event to process. This could be disabled in some systems. """ last_timestamp = self._trace.interval.start for ir_event in filter_by_task(all_tasks, 'name', 'InputReader', 'any'): yield Interval(last_timestamp, ir_event.interval.end) last_timestamp = ir_event.interval.end for interval in _input_intervals(): irqs = touch_irqs.slice(interval=interval, trimmed=False) # Necessary as we may be interested in different IRQ name if irqs: # Use longest IRQ start_ts = max( irqs, key=lambda x: x.interval.duration).interval.start end_ts = start_ts post_ir_interval = Interval(start_ts, self._trace.duration) di_events = self.event_intervals( name=['deliverInputEvent', 'input'], interval=post_ir_interval) if di_events: # IMPORTANT: If InputDispatcher sythesizes multiple # events to same application, we ignore consequent event # and only parse 1st event. This is because we heuristically # can't determine start of next input event to differentiate. di_event = di_events[0] # necessary in case a synthetic events is cancelled # canceled appropriately when the events are no longer # being resynthesized (because the application or IME is # already handling them or dropping them entirely) # This is done by checking for dumping input latencies when # active input event queue length (aq) is > 1 for same task. # For more details, see # https://android.googlesource.com/platform/frameworks/base.git/+ # /f9e989d5f09e72f5c9a59d713521f37d3fdd93dd%5E!/ # This returns first interval when aq has pending event(s) di_event_name = getattr(di_event, 'name', None) if di_event_name and di_event_name == 'input': pfb_events = self.event_intervals( name='doComposition', interval=post_ir_interval) else: aq_event = filter_by_task( all_aq_events.slice(interval=post_ir_interval), 'pid', di_event.event.task.pid) if aq_event and aq_event.value > 0: post_di_start = aq_event.interval.start else: if aq_event: continue # if AQ event exists. post_di_start = di_events[0].interval.start post_di_interval = Interval(post_di_start, self._trace.duration) pfb_events = self.event_intervals( name='doComposition', interval=post_di_interval) if pfb_events: end_ts = pfb_events[0].interval.end if start_ts != end_ts and end_ts > start_ts and start_ts not in self._input_latencies._start_timestamps: input_interval = Interval(start=start_ts, end=end_ts) self._input_latencies.append( InputLatency(interval=input_interval, latency=input_interval.duration)) return self._input_latencies