# One folder for each algorithm log_list = os.listdir(os.path.join(args.folder, algo)) successful_job_count = 0 total_jobs = 0 total_time_ms = 0.0 pipeline_counts = np.zeros(len(cfg.PIPELINES), dtype=np.int) metrics[algo + '.makespans'] = [] dev_counts = np.zeros(len(devices)) for filename in log_list: filepath = os.path.join(args.folder, algo, filename) reader = LogParser(filepath) results = reader.extract() df = pd.DataFrame(data=results['job_pipelines'], columns=['id', 'pipeline']) # Update count for each pipeline counts = df.pipeline.value_counts() keys = counts.keys() values = counts.values for i in range(len(keys)): pipeline_counts[keys[i]] += values[i] # Throughput constraint if T_o is None: T_o = results['T_o'][0][1] else: