def test_sustained_levels(self): definition = ExperimentDefinition(seed="AAAAA", machine="edison", trace_type="single", manifest_list=[], workflow_policy="no", workflow_period_s=0, workflow_handling="single", preload_time_s=3600 * 4, workload_duration_s=3600 * 1, overload_target=1.5) ExperimentRunner.configure("tmp/trace_folder", "tmp", True, "myhost", "myUser") trace_generator = MyTraceGen() machine = definition.get_machine() er = ExperimentRunner(definition) er._generate_trace_files(definition, trace_generator=trace_generator) acc_cores, period = trace_generator.check_pressure( machine.get_total_cores(), 3600, 1.5, self, 1.0) total_pressure = float(acc_cores) / float( period * machine.get_total_cores()) print(total_pressure) self.assertAlmostEqual(total_pressure, 1.5, delta=0.01) self.assertLess(total_pressure, 1.8)
def do_full_analysis(self, db_obj): result_trace = self.load_trace(db_obj) first = True last = False for trace_id in self._definition._subtraces: last = trace_id == self._definition._subtraces[-1] result_trace.load_trace(db_obj, trace_id) result_trace.do_workflow_pre_processing(append=not first) one_definition = ExperimentDefinition() one_definition.load(db_obj, trace_id) result_trace.fill_job_values( start=one_definition.get_start_epoch(), stop=one_definition.get_end_epoch(), append=not first) result_trace.fill_workflow_values( start=one_definition.get_start_epoch(), stop=one_definition.get_end_epoch(), append=not first) result_trace.calculate_job_results_grouped_core_seconds( one_definition.get_machine().get_core_seconds_edges(), last, db_obj, self._definition._trace_id, start=one_definition.get_start_epoch(), stop=one_definition.get_end_epoch(), append=not first) first = False result_trace.calculate_and_store_job_results( store=True, db_obj=db_obj, trace_id=self._definition._trace_id) result_trace._wf_extractor.calculate_and_store_overall_results( store=True, db_obj=db_obj, trace_id=self._definition._trace_id) result_trace._wf_extractor.calculate_and_store_per_manifest_results( store=True, db_obj=db_obj, trace_id=self._definition._trace_id) result_trace.calculate_utilization_median_result( self._definition._subtraces, store=True, db_obj=db_obj, trace_id=self._definition._trace_id) result_trace.calculate_utilization_mean_result( self._definition._subtraces, store=True, db_obj=db_obj, trace_id=self._definition._trace_id) self._definition.mark_analysis_done(db_obj)
db_obj = get_central_db() base_trace_id_percent, lim = get_args(2459, True) print("Base Exp", base_trace_id_percent) print("Using analysis of limited workflows:", lim) edge_keys= {0: "[0,48] core.h", 48*3600:"(48, 960] core.h", 960*3600:"(960, inf.) core.h"} trace_id_rows = [] base_exp=170 exp=ExperimentDefinition() exp.load(db_obj, base_exp) core_seconds_edges=exp.get_machine().get_core_seconds_edges() trace_id_rows= gen_trace_ids_exps(base_trace_id_percent, inverse=False, group_jump=18, block_count=6, base_exp_group=None) time_labels = ["", "10%", "", "", "25%", "", "", "50%", "", "", "75%", "", "", "100%", ""] manifest_label=["floodP", "longW", "wideL", "cybers", "sipht", "montage"]
db_obj = get_central_db() target_dir = "utilization-20160616-udog" if len(sys.argv) == 3: target_dir = sys.argv[1] trace_id = sys.argv[2] else: raise ValueError("Missing trace id to analyze") exp = ExperimentDefinition() exp.load(db_obj, trace_id) rt = ResultTrace() rt.load_trace(db_obj, trace_id) machine = exp.get_machine() max_cores = machine.get_total_cores() max_submit_time = rt._lists_submit["time_submit"][-1] def adjust_ut_plot(ut_stamps, ut_values): new_stamps = [] new_values = [] last_value = None for (st, vl) in zip(ut_stamps, ut_values): if last_value is not None: new_stamps.append(st) new_values.append(last_value) new_stamps.append(st) new_values.append(vl)