def report_power_stats(trace_file, idle_state_names, core_names, core_clusters, num_idle_states, first_cluster_state=sys.maxint, first_system_state=sys.maxint, use_ratios=False, timeline_csv_file=None): # pylint: disable=too-many-locals trace = TraceCmdTrace() ps_processor = PowerStateProcessor(core_clusters, num_idle_states=num_idle_states, first_cluster_state=first_cluster_state, first_system_state=first_system_state) reporters = [ ParallelStats(core_clusters, use_ratios), PowerStateStats(core_names, idle_state_names, use_ratios) ] if timeline_csv_file: reporters.append(PowerStateTimeline(timeline_csv_file, core_names, idle_state_names)) event_stream = trace.parse(trace_file, names=['cpu_idle', 'cpu_frequency']) transition_stream = stream_cpu_power_transitions(event_stream) power_state_stream = ps_processor.process(transition_stream) core_state_stream = gather_core_states(power_state_stream) for timestamp, states in core_state_stream: for reporter in reporters: reporter.update(timestamp, states) reports = [] for reporter in reporters: report = reporter.report() if report: reports.append(report) return reports
def report_power_stats(trace_file, idle_state_names, core_names, core_clusters, num_idle_states, first_cluster_state=sys.maxint, first_system_state=sys.maxint, use_ratios=False): # pylint: disable=too-many-locals trace = TraceCmdTrace() ps_processor = PowerStateProcessor(core_clusters, num_idle_states=num_idle_states, first_cluster_state=first_cluster_state, first_system_state=first_system_state) parallel_stats = ParallelStats(core_clusters, use_ratios) power_state_stats = PowerStateStats(core_names, idle_state_names, use_ratios) event_stream = trace.parse(trace_file, names=['cpu_idle', 'cpu_frequency']) transition_stream = stream_cpu_power_transitions(event_stream) power_state_stream = ps_processor.process(transition_stream) core_state_stream = gather_core_states(power_state_stream) for timestamp, states in core_state_stream: parallel_stats.update(timestamp, states) power_state_stats.update(timestamp, states) parallel_report = parallel_stats.report() ps_report = power_state_stats.report() return (parallel_report, ps_report)
def report_power_stats(trace_file, idle_state_names, core_names, core_clusters, num_idle_states, first_cluster_state=sys.maxint, first_system_state=sys.maxint, use_ratios=False, timeline_csv_file=None, cpu_utilisation=None, max_freq_list=None, start_marker_handling='error'): # pylint: disable=too-many-locals,too-many-branches trace = TraceCmdTrace(trace_file, filter_markers=False, names=['cpu_idle', 'cpu_frequency', 'print']) wait_for_start_marker = True if start_marker_handling == "error" and not trace.has_start_marker: raise DeviceError("Start marker was not found in the trace") elif start_marker_handling == "try": wait_for_start_marker = trace.has_start_marker if not wait_for_start_marker: logger.warning("Did not see a START marker in the trace, " "state residency and parallelism statistics may be inaccurate.") elif start_marker_handling == "ignore": wait_for_start_marker = False ps_processor = PowerStateProcessor(core_clusters, num_idle_states=num_idle_states, first_cluster_state=first_cluster_state, first_system_state=first_system_state, wait_for_start_marker=wait_for_start_marker) reporters = [ ParallelStats(core_clusters, use_ratios), PowerStateStats(core_names, idle_state_names, use_ratios) ] if timeline_csv_file: reporters.append(PowerStateTimeline(timeline_csv_file, core_names, idle_state_names)) if cpu_utilisation: if max_freq_list: reporters.append(CpuUtilisationTimeline(cpu_utilisation, core_names, max_freq_list)) else: logger.warning('Maximum frequencies not found. Cannot normalise. Skipping CPU Utilisation Timeline') event_stream = trace.parse() transition_stream = stream_cpu_power_transitions(event_stream) power_state_stream = ps_processor.process(transition_stream) core_state_stream = gather_core_states(power_state_stream) for timestamp, states in core_state_stream: for reporter in reporters: reporter.update(timestamp, states) if ps_processor.exceptions: logger.warning('There were errors while processing trace:') for e in ps_processor.exceptions: logger.warning(str(e)) reports = [] for reporter in reporters: report = reporter.report() reports.append(report) return reports
def report_power_stats(trace_file, idle_state_names, core_names, core_clusters, num_idle_states, first_cluster_state=sys.maxint, first_system_state=sys.maxint, use_ratios=False, timeline_csv_file=None, filter_trace=False, cpu_utilisation=None, max_freq_list=None): # pylint: disable=too-many-locals trace = TraceCmdTrace(filter_markers=filter_trace) ps_processor = PowerStateProcessor(core_clusters, num_idle_states=num_idle_states, first_cluster_state=first_cluster_state, first_system_state=first_system_state, wait_for_start_marker=not filter_trace) reporters = [ ParallelStats(core_clusters, use_ratios), PowerStateStats(core_names, idle_state_names, use_ratios) ] if timeline_csv_file: reporters.append( PowerStateTimeline(timeline_csv_file, core_names, idle_state_names)) if cpu_utilisation: if max_freq_list: reporters.append( CpuUtilisationTimeline(cpu_utilisation, core_names, max_freq_list)) else: logger.warning( 'Maximum frequencies not found. Cannot normalise. Skipping CPU Utilisation Timeline' ) event_stream = trace.parse(trace_file, names=['cpu_idle', 'cpu_frequency', 'print']) transition_stream = stream_cpu_power_transitions(event_stream) power_state_stream = ps_processor.process(transition_stream) core_state_stream = gather_core_states(power_state_stream) for timestamp, states in core_state_stream: for reporter in reporters: reporter.update(timestamp, states) reports = [] for reporter in reporters: report = reporter.report() if report: reports.append(report) return reports
def report_power_stats(trace_file, idle_state_names, core_names, core_clusters, num_idle_states, first_cluster_state=sys.maxint, first_system_state=sys.maxint, use_ratios=False, timeline_csv_file=None, filter_trace=False, cpu_utilisation=None, max_freq_list=None): # pylint: disable=too-many-locals trace = TraceCmdTrace(filter_markers=filter_trace) ps_processor = PowerStateProcessor(core_clusters, num_idle_states=num_idle_states, first_cluster_state=first_cluster_state, first_system_state=first_system_state, wait_for_start_marker=not filter_trace) reporters = [ ParallelStats(core_clusters, use_ratios), PowerStateStats(core_names, idle_state_names, use_ratios) ] if timeline_csv_file: reporters.append(PowerStateTimeline(timeline_csv_file, core_names, idle_state_names)) if cpu_utilisation: if max_freq_list: reporters.append(CpuUtilisationTimeline(cpu_utilisation, core_names, max_freq_list)) else: logger.warning('Maximum frequencies not found. Cannot normalise. Skipping CPU Utilisation Timeline') event_stream = trace.parse(trace_file, names=['cpu_idle', 'cpu_frequency', 'print']) transition_stream = stream_cpu_power_transitions(event_stream) power_state_stream = ps_processor.process(transition_stream) core_state_stream = gather_core_states(power_state_stream) for timestamp, states in core_state_stream: for reporter in reporters: reporter.update(timestamp, states) reports = [] for reporter in reporters: report = reporter.report() if report: reports.append(report) return reports
def report_power_stats(trace_file, idle_state_names, core_names, core_clusters, num_idle_states, first_cluster_state=sys.maxint, first_system_state=sys.maxint, use_ratios=False, timeline_csv_file=None, filter_trace=True): # pylint: disable=too-many-locals trace = TraceCmdTrace(filter_markers=filter_trace) ps_processor = PowerStateProcessor(core_clusters, num_idle_states=num_idle_states, first_cluster_state=first_cluster_state, first_system_state=first_system_state) reporters = [ ParallelStats(core_clusters, use_ratios), PowerStateStats(core_names, idle_state_names, use_ratios) ] if timeline_csv_file: reporters.append( PowerStateTimeline(timeline_csv_file, core_names, idle_state_names)) event_stream = trace.parse(trace_file, names=['cpu_idle', 'cpu_frequency']) transition_stream = stream_cpu_power_transitions(event_stream) power_state_stream = ps_processor.process(transition_stream) core_state_stream = gather_core_states(power_state_stream) for timestamp, states in core_state_stream: for reporter in reporters: reporter.update(timestamp, states) reports = [] for reporter in reporters: report = reporter.report() if report: reports.append(report) return reports
def report_power_stats(trace_file, idle_state_names, core_names, core_clusters, num_idle_states, first_cluster_state=sys.maxint, first_system_state=sys.maxint, use_ratios=False, timeline_csv_file=None, cpu_utilisation=None, max_freq_list=None, start_marker_handling='error', transitions_csv_file=None, no_idle=False): # pylint: disable=too-many-locals,too-many-branches trace = TraceCmdTrace(trace_file, filter_markers=False, names=['cpu_idle', 'cpu_frequency', 'print']) wait_for_start_marker = True if start_marker_handling == "error" and not trace.has_start_marker: raise DeviceError("Start marker was not found in the trace") elif start_marker_handling == "try": wait_for_start_marker = trace.has_start_marker if not wait_for_start_marker: logger.warning("Did not see a START marker in the trace, " "state residency and parallelism statistics may be inaccurate.") elif start_marker_handling == "ignore": wait_for_start_marker = False ps_processor = PowerStateProcessor(core_clusters, num_idle_states=num_idle_states, first_cluster_state=first_cluster_state, first_system_state=first_system_state, wait_for_start_marker=wait_for_start_marker, no_idle=no_idle) reporters = [ ParallelStats(core_clusters, use_ratios), PowerStateStats(core_names, idle_state_names, use_ratios) ] if timeline_csv_file: reporters.append(PowerStateTimeline(timeline_csv_file, core_names, idle_state_names)) if cpu_utilisation: if max_freq_list: reporters.append(CpuUtilisationTimeline(cpu_utilisation, core_names, max_freq_list)) else: logger.warning('Maximum frequencies not found. Cannot normalise. Skipping CPU Utilisation Timeline') event_stream = trace.parse() transition_stream = stream_cpu_power_transitions(event_stream) if transitions_csv_file: trans_reporter = PowerStateTransitions(transitions_csv_file) reporters.append(trans_reporter) recorded_trans_stream = record_state_transitions(trans_reporter, transition_stream) power_state_stream = ps_processor.process(recorded_trans_stream) else: power_state_stream = ps_processor.process(transition_stream) core_state_stream = gather_core_states(power_state_stream) for timestamp, states in core_state_stream: for reporter in reporters: reporter.update(timestamp, states) if ps_processor.exceptions: logger.warning('There were errors while processing trace:') for e in ps_processor.exceptions: logger.warning(str(e)) reports = [] for reporter in reporters: report = reporter.report() reports.append(report) return reports