def __rtapp_reference(self, tid, base_idx): _results = self.results["rtapp"] logging.debug("Test %s: compare against [%s] base", tid, base_idx) res_line = "{0:12s}: {1:22s} | ".format(tid, base_idx) # Dump all energy metrics for cpus in ["LITTLE", "big", "Total"]: res_base = _results[tid][base_idx]["energy"][cpus]["avg"] # Dump absolute values res_line += " {0:10.3f}".format(res_base) res_line += " |" # If available, dump also performance results if "performance" not in _results[tid][base_idx].keys(): print res_line return for pidx in ["perf_avg", "slack_pct", "edp1", "edp2", "edp3"]: res_base = _results[tid][base_idx]["performance"][pidx]["avg"] logging.debug("idx: %s, base: %s", pidx, res_base) if pidx in ["perf_avg"]: res_line += " {0:s}".format(TestColors.rate(res_base)) continue if pidx in ["slack_pct"]: res_line += " {0:s}".format(TestColors.rate(res_base, positive_is_good=False)) continue if "edp" in pidx: res_line += " {0:10.2e}".format(res_base) continue res_line += " |" print res_line
def __rtapp_compare(self, tid, base_idx, test_idx, formats): _results = self.results['rtapp'] logging.debug('Test %s: compare %s with %s', tid, base_idx, test_idx) res_line = '{0:12s}: {1:20s} | '.format(tid, test_idx) # Dump all energy metrics for cpus in ['LITTLE', 'big', 'Total']: res_base = _results[tid][base_idx]['energy'][cpus]['avg'] res_test = _results[tid][test_idx]['energy'][cpus]['avg'] speedup_cnt = res_test - res_base if 'absolute' in formats: res_line += ' {0:10.2f}'.format(speedup_cnt) else: speedup_pct = 0 if res_base != 0: speedup_pct = 100.0 * speedup_cnt / res_base res_line += ' {0:s}'\ .format(TestColors.rate( speedup_pct, positive_is_good = False)) res_line += ' |' # If available, dump also performance results if 'performance' not in _results[tid][base_idx].keys(): print res_line return for pidx in ['perf_avg', 'slack_pct', 'edp1', 'edp2', 'edp3']: res_base = _results[tid][base_idx]['performance'][pidx]['avg'] res_test = _results[tid][test_idx]['performance'][pidx]['avg'] logging.debug('idx: %s, base: %s, test: %s', pidx, res_base, res_test) if pidx in ['perf_avg']: res_line += ' {0:s}'.format(TestColors.rate(res_test)) continue if pidx in ['slack_pct']: res_line += ' {0:s}'.format( TestColors.rate(res_test, positive_is_good = False)) continue # Compute difference base-vs-test if 'edp' in pidx: speedup_cnt = res_base - res_test if 'absolute': res_line += ' {0:10.2e}'.format(speedup_cnt) else: res_line += ' {0:s}'.format(TestColors.rate(speedup_pct)) res_line += ' |' print res_line
def __rtapp_compare(self, tid, base_idx, test_idx, formats): _results = self.results['rtapp'] self._log.debug('Test %s: compare %s with %s', tid, base_idx, test_idx) res_line = '{0:12s}: {1:20s} | '.format(tid, test_idx) # Dump all energy metrics for cpus in ['LITTLE', 'big', 'Total']: res_base = _results[tid][base_idx]['energy'][cpus]['avg'] res_test = _results[tid][test_idx]['energy'][cpus]['avg'] speedup_cnt = res_test - res_base if 'absolute' in formats: res_line += ' {0:10.2f}'.format(speedup_cnt) else: speedup_pct = 0 if res_base != 0: speedup_pct = 100.0 * speedup_cnt / res_base res_line += ' {0:s}'\ .format(TestColors.rate( speedup_pct, positive_is_good = False)) res_line += ' |' # If available, dump also performance results if 'performance' not in _results[tid][base_idx].keys(): print res_line return for pidx in ['perf_avg', 'slack_pct', 'edp1', 'edp2', 'edp3']: res_base = _results[tid][base_idx]['performance'][pidx]['avg'] res_test = _results[tid][test_idx]['performance'][pidx]['avg'] self._log.debug('idx: %s, base: %s, test: %s', pidx, res_base, res_test) if pidx in ['perf_avg']: res_line += ' {0:s}'.format(TestColors.rate(res_test)) continue if pidx in ['slack_pct']: res_line += ' {0:s}'.format( TestColors.rate(res_test, positive_is_good = False)) continue # Compute difference base-vs-test if 'edp' in pidx: speedup_cnt = res_base - res_test if 'absolute': res_line += ' {0:10.2e}'.format(speedup_cnt) else: res_line += ' {0:s}'.format(TestColors.rate(speedup_pct)) res_line += ' |' print res_line
def __rtapp_compare(self, tid, base_idx, test_idx, formats): _results = self.results["rtapp"] logging.debug("Test %s: compare %s with %s", tid, base_idx, test_idx) res_line = "{0:12s}: {1:20s} | ".format(tid, test_idx) # Dump all energy metrics for cpus in ["LITTLE", "big", "Total"]: res_base = _results[tid][base_idx]["energy"][cpus]["avg"] res_test = _results[tid][test_idx]["energy"][cpus]["avg"] speedup_cnt = res_test - res_base if "absolute" in formats: res_line += " {0:10.2f}".format(speedup_cnt) else: speedup_pct = 0 if res_base != 0: speedup_pct = 100.0 * speedup_cnt / res_base res_line += " {0:s}".format(TestColors.rate(speedup_pct, positive_is_good=False)) res_line += " |" # If available, dump also performance results if "performance" not in _results[tid][base_idx].keys(): print res_line return for pidx in ["perf_avg", "slack_pct", "edp1", "edp2", "edp3"]: res_base = _results[tid][base_idx]["performance"][pidx]["avg"] res_test = _results[tid][test_idx]["performance"][pidx]["avg"] logging.debug("idx: %s, base: %s, test: %s", pidx, res_base, res_test) if pidx in ["perf_avg"]: res_line += " {0:s}".format(TestColors.rate(res_test)) continue if pidx in ["slack_pct"]: res_line += " {0:s}".format(TestColors.rate(res_test, positive_is_good=False)) continue # Compute difference base-vs-test if "edp" in pidx: speedup_cnt = res_base - res_test if "absolute": res_line += " {0:10.2e}".format(speedup_cnt) else: res_line += " {0:s}".format(TestColors.rate(speedup_pct)) res_line += " |" print res_line
def __rtapp_reference(self, tid, base_idx): _results = self.results['rtapp'] logging.debug('Test %s: compare against [%s] base', tid, base_idx) res_line = '{0:12s}: {1:22s} | '.format(tid, base_idx) # Dump all energy metrics for cpus in ['LITTLE', 'big', 'Total']: res_base = _results[tid][base_idx]['energy'][cpus]['avg'] # Dump absolute values res_line += ' {0:10.3f}'.format(res_base) res_line += ' |' # If available, dump also performance results if 'performance' not in _results[tid][base_idx].keys(): print res_line return for pidx in ['perf_avg', 'slack_pct', 'edp1', 'edp2', 'edp3']: res_base = _results[tid][base_idx]['performance'][pidx]['avg'] logging.debug('idx: %s, base: %s', pidx, res_base) if pidx in ['perf_avg']: res_line += ' {0:s}'.format(TestColors.rate(res_base)) continue if pidx in ['slack_pct']: res_line += ' {0:s}'.format( TestColors.rate(res_base, positive_is_good = False)) continue if 'edp' in pidx: res_line += ' {0:10.2e}'.format(res_base) continue res_line += ' |' print res_line
def __rtapp_reference(self, tid, base_idx): _results = self.results['rtapp'] self._log.debug('Test %s: compare against [%s] base', tid, base_idx) res_line = '{0:12s}: {1:22s} | '.format(tid, base_idx) # Dump all energy metrics for cpus in ['LITTLE', 'big', 'Total']: res_base = _results[tid][base_idx]['energy'][cpus]['avg'] # Dump absolute values res_line += ' {0:10.3f}'.format(res_base) res_line += ' |' # If available, dump also performance results if 'performance' not in _results[tid][base_idx].keys(): print res_line return for pidx in ['perf_avg', 'slack_pct', 'edp1', 'edp2', 'edp3']: res_base = _results[tid][base_idx]['performance'][pidx]['avg'] self._log.debug('idx: %s, base: %s', pidx, res_base) if pidx in ['perf_avg']: res_line += ' {0:s}'.format(TestColors.rate(res_base)) continue if pidx in ['slack_pct']: res_line += ' {0:s}'.format( TestColors.rate(res_base, positive_is_good = False)) continue if 'edp' in pidx: res_line += ' {0:10.2e}'.format(res_base) continue res_line += ' |' print res_line
def __default_compare(self, wtype, tid, base_idx, test_idx, formats): _results = self.results[wtype] logging.debug('Test %s: compare %s with %s', tid, base_idx, test_idx) res_comp = '{0:s} vs {1:s}'.format(test_idx, base_idx) res_line = '{0:8s}: {1:22s} | '.format(tid, res_comp) # Dump all energy metrics for cpus in ['LITTLE', 'big', 'Total']: # If either base of test have a 0 MAX energy, this measn that # energy has not been collected base_max = _results[tid][base_idx]['energy'][cpus]['max'] test_max = _results[tid][test_idx]['energy'][cpus]['max'] if base_max == 0 or test_max == 0: res_line += ' {0:10s}'.format('NA') continue # Otherwise, report energy values res_base = _results[tid][base_idx]['energy'][cpus]['avg'] res_test = _results[tid][test_idx]['energy'][cpus]['avg'] speedup_cnt = res_test - res_base if 'absolute' in formats: res_line += ' {0:10.2f}'.format(speedup_cnt) else: speedup_pct = 100.0 * speedup_cnt / res_base res_line += ' {0:s}'\ .format(TestColors.rate( speedup_pct, positive_is_good = False)) res_line += ' |' # If available, dump also performance results if 'performance' not in _results[tid][base_idx].keys(): print res_line return for pidx in ['perf_avg', 'ctime_avg', 'edp1', 'edp2', 'edp3']: res_base = _results[tid][base_idx]['performance'][pidx]['avg'] res_test = _results[tid][test_idx]['performance'][pidx]['avg'] logging.debug('idx: %s, base: %s, test: %s', pidx, res_base, res_test) # Compute difference base-vs-test speedup_cnt = 0 if res_base != 0: if pidx in ['perf_avg']: speedup_cnt = res_test - res_base else: speedup_cnt = res_base - res_test # Compute speedup if required speedup_pct = 0 if 'absolute' in formats: if 'edp' in pidx: res_line += ' {0:10.2e}'.format(speedup_cnt) else: res_line += ' {0:10.2f}'.format(speedup_cnt) else: if res_base != 0: if pidx in ['perf_avg']: # speedup_pct = 100.0 * speedup_cnt / res_base speedup_pct = speedup_cnt else: speedup_pct = 100.0 * speedup_cnt / res_base res_line += ' {0:s}'.format(TestColors.rate(speedup_pct)) res_line += ' |' print res_line
def __default_compare(self, wtype, tid, base_idx, test_idx, formats): _results = self.results[wtype] self._log.debug('Test %s: compare %s with %s', tid, base_idx, test_idx) res_comp = '{0:s} vs {1:s}'.format(test_idx, base_idx) res_line = '{0:8s}: {1:22s} | '.format(tid, res_comp) # Dump all energy metrics for cpus in ['LITTLE', 'big', 'Total']: # If either base of test have a 0 MAX energy, this measn that # energy has not been collected base_max = _results[tid][base_idx]['energy'][cpus]['max'] test_max = _results[tid][test_idx]['energy'][cpus]['max'] if base_max == 0 or test_max == 0: res_line += ' {0:10s}'.format('NA') continue # Otherwise, report energy values res_base = _results[tid][base_idx]['energy'][cpus]['avg'] res_test = _results[tid][test_idx]['energy'][cpus]['avg'] speedup_cnt = res_test - res_base if 'absolute' in formats: res_line += ' {0:10.2f}'.format(speedup_cnt) else: speedup_pct = 100.0 * speedup_cnt / res_base res_line += ' {0:s}'\ .format(TestColors.rate( speedup_pct, positive_is_good = False)) res_line += ' |' # If available, dump also performance results if 'performance' not in _results[tid][base_idx].keys(): print res_line return for pidx in ['perf_avg', 'ctime_avg', 'edp1', 'edp2', 'edp3']: res_base = _results[tid][base_idx]['performance'][pidx]['avg'] res_test = _results[tid][test_idx]['performance'][pidx]['avg'] self._log.debug('idx: %s, base: %s, test: %s', pidx, res_base, res_test) # Compute difference base-vs-test speedup_cnt = 0 if res_base != 0: if pidx in ['perf_avg']: speedup_cnt = res_test - res_base else: speedup_cnt = res_base - res_test # Compute speedup if required speedup_pct = 0 if 'absolute' in formats: if 'edp' in pidx: res_line += ' {0:10.2e}'.format(speedup_cnt) else: res_line += ' {0:10.2f}'.format(speedup_cnt) else: if res_base != 0: if pidx in ['perf_avg']: # speedup_pct = 100.0 * speedup_cnt / res_base speedup_pct = speedup_cnt else: speedup_pct = 100.0 * speedup_cnt / res_base res_line += ' {0:s}'.format(TestColors.rate(speedup_pct)) res_line += ' |' print res_line
def __default_compare(self, wtype, tid, base_idx, test_idx, formats): _results = self.results[wtype] logging.debug("Test %s: compare %s with %s", tid, base_idx, test_idx) res_comp = "{0:s} vs {1:s}".format(test_idx, base_idx) res_line = "{0:8s}: {1:22s} | ".format(tid, res_comp) # Dump all energy metrics for cpus in ["LITTLE", "big", "Total"]: # If either base of test have a 0 MAX energy, this measn that # energy has not been collected base_max = _results[tid][base_idx]["energy"][cpus]["max"] test_max = _results[tid][test_idx]["energy"][cpus]["max"] if base_max == 0 or test_max == 0: res_line += " {0:10s}".format("NA") continue # Otherwise, report energy values res_base = _results[tid][base_idx]["energy"][cpus]["avg"] res_test = _results[tid][test_idx]["energy"][cpus]["avg"] speedup_cnt = res_test - res_base if "absolute" in formats: res_line += " {0:10.2f}".format(speedup_cnt) else: speedup_pct = 100.0 * speedup_cnt / res_base res_line += " {0:s}".format(TestColors.rate(speedup_pct, positive_is_good=False)) res_line += " |" # If available, dump also performance results if "performance" not in _results[tid][base_idx].keys(): print res_line return for pidx in ["perf_avg", "ctime_avg", "edp1", "edp2", "edp3"]: res_base = _results[tid][base_idx]["performance"][pidx]["avg"] res_test = _results[tid][test_idx]["performance"][pidx]["avg"] logging.debug("idx: %s, base: %s, test: %s", pidx, res_base, res_test) # Compute difference base-vs-test speedup_cnt = 0 if res_base != 0: if pidx in ["perf_avg"]: speedup_cnt = res_test - res_base else: speedup_cnt = res_base - res_test # Compute speedup if required speedup_pct = 0 if "absolute" in formats: if "edp" in pidx: res_line += " {0:10.2e}".format(speedup_cnt) else: res_line += " {0:10.2f}".format(speedup_cnt) else: if res_base != 0: if pidx in ["perf_avg"]: # speedup_pct = 100.0 * speedup_cnt / res_base speedup_pct = speedup_cnt else: speedup_pct = 100.0 * speedup_cnt / res_base res_line += " {0:s}".format(TestColors.rate(speedup_pct)) res_line += " |" print res_line