def get_data(self, outfile): # pylint: disable=R0914 all_channels = [c.label for c in self.list_channels()] active_channels = [c.label for c in self.active_channels] active_indexes = [all_channels.index(ac) for ac in active_channels] num_of_ports = len(self.resistor_values) struct_format = '{}I'.format(num_of_ports * self.attributes_per_sample) not_a_full_row_seen = False self.raw_data_file = os.path.join(self.raw_output_directory, '0000000000') self.logger.debug('Parsing raw data file: {}'.format( self.raw_data_file)) with open(self.raw_data_file, 'rb') as bfile: with csvwriter(outfile) as writer: writer.writerow(active_channels) while True: data = bfile.read(num_of_ports * self.bytes_per_sample) if data == '': break try: unpacked_data = struct.unpack(struct_format, data) row = [unpacked_data[i] / 1000 for i in active_indexes] writer.writerow(row) except struct.error: if not_a_full_row_seen: self.logger.warning( 'possibly missaligned caiman raw data, row contained {} bytes' .format(len(data))) continue else: not_a_full_row_seen = True return MeasurementsCsv(outfile, self.active_channels, self.sample_rate_hz)
def get_data(self, output_file): temp_file = tempfile.mktemp() self.target.pull(self.on_target_file, temp_file) self.target.remove(self.on_target_file) with open(temp_file, 'rb') as fh: reader = csv.reader(fh) headings = reader.next() # Figure out which columns from the collected csv we actually want select_columns = [] for chan in self.active_channels: try: select_columns.append(headings.index(chan.name)) except ValueError: raise HostError('Channel "{}" is not in {}'.format( chan.name, temp_file)) with open(output_file, 'wb') as wfh: write_headings = [ '{}_{}'.format(c.site, c.kind) for c in self.active_channels ] writer = csv.writer(wfh) writer.writerow(write_headings) for row in reader: write_row = [row[c] for c in select_columns] writer.writerow(write_row) return MeasurementsCsv(output_file, self.active_channels)
def get_data(self, outfile): if self.process: raise RuntimeError('`get_data` called before `stop`') stdout, stderr = self.output with csvwriter(outfile) as writer: active_sites = [c.site for c in self.active_channels] # Write column headers row = [] if 'output' in active_sites: row.append('output_power') if 'USB' in active_sites: row.append('USB_power') writer.writerow(row) # Write data for line in stdout.splitlines(): # Each output line is a main_output, usb_output measurement pair. # (If our user only requested one channel we still collect both, # and just ignore one of them) output, usb = line.split() row = [] if 'output' in active_sites: row.append(output) if 'USB' in active_sites: row.append(usb) writer.writerow(row) return MeasurementsCsv(outfile, self.active_channels, self.sample_rate_hz)
def _process_without_pandas(self, measurements_csv): per_frame_fps = [] start_vsync, end_vsync = None, None frame_count = 0 for frame_data in measurements_csv.iter_values(): if frame_data.Flags_flags != 0: continue frame_count += 1 if start_vsync is None: start_vsync = frame_data.Vsync_time_ns end_vsync = frame_data.Vsync_time_ns frame_time = frame_data.FrameCompleted_time_ns - frame_data.IntendedVsync_time_ns pff = 1e9 / frame_time if pff > self.drop_threshold: per_frame_fps.append([pff]) if frame_count: duration = end_vsync - start_vsync fps = (1e9 * frame_count) / float(duration) else: duration = 0 fps = 0 csv_file = self._get_csv_file_name(measurements_csv.path) with csvwriter(csv_file) as writer: writer.writerow(['fps']) writer.writerows(per_frame_fps) return [DerivedMetric('fps', fps, 'fps'), DerivedMetric('total_frames', frame_count, 'frames'), MeasurementsCsv(csv_file)]
def get_data(self, outfile): if self.keep_raw: self._raw_file = outfile + '.raw' self.collector.process_frames(self._raw_file) active_sites = [chan.label for chan in self.active_channels] self.collector.write_frames(outfile, columns=active_sites) return MeasurementsCsv(outfile, self.active_channels, self.sample_rate_hz)
def get_data(self, outfile): # pylint: disable=R0914 self.logger.debug("Parse data and compute consumed energy") self.parser.prepare(self.output_file_raw, self.output_file, self.output_file_figure) self.parser.parse_aep() self.parser.unprepare() skip_header = 1 all_channels = [c.label for c in self.list_channels()] active_channels = [c.label for c in self.active_channels] active_indexes = [all_channels.index(ac) for ac in active_channels] with csvreader(self.output_file, delimiter=' ') as reader: with csvwriter(outfile) as writer: for row in reader: if skip_header == 1: writer.writerow(active_channels) skip_header = 0 continue if len(row) < len(active_channels): continue # all data are in micro (seconds/watt) new = [float(row[i]) / 1000000 for i in active_indexes] writer.writerow(new) self.output_fd_error.close() shutil.rmtree(self.output_directory) return MeasurementsCsv(outfile, self.active_channels, self.sample_rate_hz)
def get_data(self, outfile): raw_log_file = tempfile.mktemp() self.target.dump_logcat(raw_log_file) data = extract_netstats(raw_log_file) measurements = netstats_to_measurements(data) write_measurements_csv(measurements, outfile) os.remove(raw_log_file) return MeasurementsCsv(outfile, self.active_channels)
def get_data(self, outfile): active_sites = [c.site for c in self.active_channels] with open(outfile, 'wb') as wfh: writer = csv.writer(wfh) writer.writerow([c.label for c in self.active_channels]) # headers for rec, rois in self.target.gem5stats.match_iter( active_sites, [self.roi_label]): writer.writerow([float(rec[s]) for s in active_sites]) return MeasurementsCsv(outfile, self.active_channels)
def get_data(self, outfile): # pylint: disable=R0914 tempdir = tempfile.mkdtemp(prefix='daq-raw-') self.execute('get_data', output_directory=tempdir) raw_file_map = {} for entry in os.listdir(tempdir): site = os.path.splitext(entry)[0] path = os.path.join(tempdir, entry) raw_file_map[site] = path self._raw_files.append(path) active_sites = unique([c.site for c in self.active_channels]) file_handles = [] try: site_readers = {} for site in active_sites: try: site_file = raw_file_map[site] fh = open(site_file, 'rb') site_readers[site] = csv.reader(fh) file_handles.append(fh) except KeyError: message = 'Could not get DAQ trace for {}; Obtained traces are in {}' raise HostError(message.format(site, tempdir)) # The first row is the headers channel_order = [] for site, reader in site_readers.iteritems(): channel_order.extend( ['{}_{}'.format(site, kind) for kind in reader.next()]) def _read_next_rows(): parts = [] for reader in site_readers.itervalues(): try: parts.extend(reader.next()) except StopIteration: parts.extend([None, None]) return list(chain(parts)) with open(outfile, 'wb') as wfh: field_names = [c.label for c in self.active_channels] writer = csv.writer(wfh) writer.writerow(field_names) raw_row = _read_next_rows() while any(raw_row): row = [ raw_row[channel_order.index(f)] for f in field_names ] writer.writerow(row) raw_row = _read_next_rows() return MeasurementsCsv(outfile, self.active_channels, self.sample_rate_hz) finally: for fh in file_handles: fh.close()
def get_data(self, outfile): # pylint: disable=R0914 self.tempdir = tempfile.mkdtemp(prefix='daq-raw-') self.daq_client.get_data(self.tempdir) raw_file_map = {} for entry in os.listdir(self.tempdir): site = os.path.splitext(entry)[0] path = os.path.join(self.tempdir, entry) raw_file_map[site] = path self._raw_files.append(path) active_sites = unique([c.site for c in self.active_channels]) file_handles = [] try: site_readers = {} for site in active_sites: try: site_file = raw_file_map[site] reader, fh = create_reader(site_file) site_readers[site] = reader file_handles.append(fh) except KeyError: if not site.startswith("Time"): message = 'Could not get DAQ trace for {}; Obtained traces are in {}' raise HostError(message.format(site, self.tempdir)) # The first row is the headers channel_order = ['Time_time'] for site, reader in site_readers.items(): channel_order.extend( ['{}_{}'.format(site, kind) for kind in next(reader)]) def _read_rows(): row_iter = zip_longest(*site_readers.values(), fillvalue=(None, None)) for raw_row in row_iter: raw_row = list(chain.from_iterable(raw_row)) raw_row.insert(0, _read_rows.row_time_s) yield raw_row _read_rows.row_time_s += 1.0 / self.sample_rate_hz _read_rows.row_time_s = self.target_boottime_clock_at_start with csvwriter(outfile) as writer: field_names = [c.label for c in self.active_channels] writer.writerow(field_names) for raw_row in _read_rows(): row = [ raw_row[channel_order.index(f)] for f in field_names ] writer.writerow(row) return MeasurementsCsv(outfile, self.active_channels, self.sample_rate_hz) finally: for fh in file_handles: fh.close()
def get_data(self, outfile): active_sites = [c.site for c in self.active_channels] with open(outfile, 'wb') as wfh: writer = csv.writer(wfh) writer.writerow([c.label for c in self.active_channels]) # headers sites_to_match = [self.site_mapping.get(s, s) for s in active_sites] for rec, rois in self.target.gem5stats.match_iter(sites_to_match, [self.roi_label], self._base_stats_dump): writer.writerow([rec[s] for s in active_sites]) return MeasurementsCsv(outfile, self.active_channels, self.sample_rate_hz)
def _process_with_pandas(self, measurements_csv): data = pd.read_csv(measurements_csv.path) # fiter out bogus frames. bogus_frames_filter = data.actual_present_time_us != 0x7fffffffffffffff actual_present_times = data.actual_present_time_us[bogus_frames_filter] actual_present_time_deltas = actual_present_times.diff().dropna() vsyncs_to_compose = actual_present_time_deltas.div(VSYNC_INTERVAL) vsyncs_to_compose.apply(lambda x: int(round(x, 0))) # drop values lower than drop_threshold FPS as real in-game frame # rate is unlikely to drop below that (except on loading screens # etc, which should not be factored in frame rate calculation). per_frame_fps = (1.0 / (vsyncs_to_compose.multiply(VSYNC_INTERVAL / 1e9))) keep_filter = per_frame_fps > self.drop_threshold filtered_vsyncs_to_compose = vsyncs_to_compose[keep_filter] per_frame_fps.name = 'fps' csv_file = self._get_csv_file_name(measurements_csv.path) per_frame_fps.to_csv(csv_file, index=False, header=True) if not filtered_vsyncs_to_compose.empty: fps = 0 total_vsyncs = filtered_vsyncs_to_compose.sum() frame_count = filtered_vsyncs_to_compose.size if total_vsyncs: fps = 1e9 * frame_count / (VSYNC_INTERVAL * total_vsyncs) janks = self._calc_janks(filtered_vsyncs_to_compose) not_at_vsync = self._calc_not_at_vsync(vsyncs_to_compose) else: fps = 0 frame_count = 0 janks = 0 not_at_vsync = 0 janks_pc = 0 if frame_count == 0 else janks * 100 / frame_count return [DerivedMetric('fps', fps, 'fps'), DerivedMetric('total_frames', frame_count, 'frames'), MeasurementsCsv(csv_file), DerivedMetric('janks', janks, 'count'), DerivedMetric('janks_pc', janks_pc, 'percent'), DerivedMetric('missed_vsync', not_at_vsync, 'count')]
def _process_with_pandas(self, measurements_csv): data = pd.read_csv(measurements_csv.path) data = data[data.Flags_flags == 0] frame_time = data.FrameCompleted_time_ns - data.IntendedVsync_time_ns per_frame_fps = (1e9 / frame_time) keep_filter = per_frame_fps > self.drop_threshold per_frame_fps = per_frame_fps[keep_filter] per_frame_fps.name = 'fps' frame_count = data.index.size if frame_count > 1: duration = data.Vsync_time_ns.iloc[-1] - data.Vsync_time_ns.iloc[0] fps = (1e9 * frame_count) / float(duration) else: duration = 0 fps = 0 csv_file = self._get_csv_file_name(measurements_csv.path) per_frame_fps.to_csv(csv_file, index=False, header=True) return [DerivedMetric('fps', fps, 'fps'), DerivedMetric('total_frames', frame_count, 'frames'), MeasurementsCsv(csv_file)]
def process(self, measurements_csv): if isinstance(measurements_csv, basestring): measurements_csv = MeasurementsCsv(measurements_csv) if pd is not None: return self._process_with_pandas(measurements_csv) return self._process_without_pandas(measurements_csv)