def test_run(): wf_names = [ 'Montage_25', 'CyberShake_30', 'Inspiral_30', 'Sipht_30', 'Epigenomics_24' ] BASE_PARAMS["ga_params"]["population"] = 5 BASE_PARAMS["ga_params"]["Kbest"] = 1 BASE_PARAMS["ga_params"]["generations"] = 10 seq = make_linear_sequence( BASE_PARAMS, { "transfer_time": [0, 10, 50, 100, 300, 500, 2000], "ideal_flops": [1, 20, 50, 100, 500] }) to_run = [ partial(do_exp, wf_name, **params) for wf_name in wf_names for params in seq ] results = [t() for t in to_run for _ in range(3)] saver = UniqueNameSaver(TEMP_PATH, EXPERIMENT_NAME) for result in results: saver(result) pass
def inherited_pop_run(exp, wf_tasksids_mapping, repeat_count, base_params, is_debug=False): path = os.path.join(TEMP_PATH, "igaheft_series") saver = UniqueNameSaver(path, base_params["experiment_name"]) to_run = [] for wf_name, ids in wf_tasksids_mapping.items(): for id in ids: params = deepcopy(base_params) params["executor_params"]["task_id_to_fail"] = id func = partial(exp, saver=saver, wf_name=wf_name, **params) to_run.append(func) to_run = randomize_order(to_run) if is_debug: results = [t() for t in to_run for _ in range(repeat_count)] else: results = multi_repeat(repeat_count, to_run) # path = save_path if save_path is not None else os.path.join(TEMP_PATH, "igaheft_series") # saver = UniqueNameSaver(path, base_params["experiment_name"]) # for result in results: # saver(result) pass
def do_exp(): result, logbook = run_dcga(_wf, estimator, rm, heft_mapping, heft_ordering, **params) saver = UniqueNameSaver("{0}/temp/dcga_exp".format(__root_path__)) data = { "final_makespan": result, "iterations": logbook } saver(data) return result
def coeff_run(): """ coefficient of compute/data intensivity """ # wf_names = ['Montage_25', 'CyberShake_30', 'Inspiral_30', 'Sipht_30', 'Epigenomics_24'] #all_coeffs = [1/100, 1/50, 1/10, 1/5, 1/2.766, 15, 20, 25, 30, 35, 40, 45, 75] all_coeffs = [1 / 100, 1 / 50, 1 / 10, 1 / 5, 1 / 2.766, 1, 2.766] + list( range(5, 101, 5)) # wf_names = [('Montage_25', [10]), # ('CyberShake_30', [0.1] + list(range(10, 46, 1))), # ('Inspiral_30', [10, 1, 1/2.766]), # ('Sipht_30', [0.02] + list(range(30, 50, 1)) + list(range(50, 101, 5))), # ('Epigenomics_24', list(range(5, 46, 1)) + list(range(45, 101, 5)))] # wf_names = [('Montage_25', [2.766])] wf_names = [ #('Montage_25', [10]), ('CyberShake_30', all_coeffs), #('Inspiral_30', [10, 1, 1/2.766]), #('Sipht_30', [0.02] + list(range(30, 50, 1)) + list(range(50, 101, 5))), ('Epigenomics_24', all_coeffs) ] def transfer_time(max_runtime, c): transfer = max_runtime * BASE_PARAMS["ideal_flops"] / c return transfer to_run = [] for wf_name, coeffs in wf_names: _wf = wf(wf_name) max_runtime = max(_wf.get_all_unique_tasks(), key=lambda x: x.runtime).runtime param_sets = [ copy_and_set(BASE_PARAMS, transfer_time=transfer_time(max_runtime, c), data_intensive_coeff=c) for c in coeffs ] exps = [partial(do_exp, wf_name, **params) for params in param_sets] to_run = to_run + exps # m_repeat = lambda n, funcs: [f() for f in funcs for _ in range(n)] #results = m_repeat(REPEAT_COUNT, to_run) results = multi_repeat(REPEAT_COUNT, to_run) saver = UniqueNameSaver(TEMP_PATH, EXPERIMENT_NAME) for result in results: saver(result) # coeff_aggregate(saver.directory, "coeff.png") pass
def changing_reliability_run(exp, reliability, individuals_counts, repeat_count, wf_names, base_params, is_debug=False): path = os.path.join(TEMP_PATH, "gaheft_series") saver = UniqueNameSaver(path, base_params["experiment_name"]) configs = [] for r in reliability: for ind_count in individuals_counts: params = deepcopy(base_params) params["estimator_settings"]["reliability"] = r params["alg_params"]["n"] = ind_count params["alg_params"]["migrCount"] = int(0.1 * ind_count) configs.append(params) to_run = [ partial(exp, saver=saver, wf_name=wf_name, **params) for wf_name in wf_names for params in configs ] to_run = randomize_order(to_run) # i = 0 # results = [] # for _ in range(repeat_count): # for t in to_run: # print("//////////////////////RUN NUMBER {0}=================".format(i)) # i += 1 # results.append(t()) if is_debug: results = [t() for t in to_run for _ in range(repeat_count)] else: results = multi_repeat(repeat_count, to_run) # path = save_path if save_path is not None else os.path.join(TEMP_PATH, "gaheft_series") # saver = UniqueNameSaver(path, base_params["experiment_name"]) # for result in results: # saver(result) pass
def real_run(): wf_names = [ 'Montage_25', 'CyberShake_30', 'Inspiral_30', 'Sipht_30', 'Epigenomics_24' ] seq = make_linear_sequence( BASE_PARAMS, { "transfer_time": [0, 10, 50, 100, 300, 500, 2000], "ideal_flops": [1, 20, 50, 100, 500] }) to_run = [ partial(do_exp, wf_name, **params) for wf_name in wf_names for params in seq ] results = multi_repeat(REPEAT_COUNT, to_run) saver = UniqueNameSaver(TEMP_PATH, EXPERIMENT_NAME) for result in results: saver(result) pass
def test_run(exp, base_params): configs = [] # reliability = [1.0, 0.95, 0.9] # reliability = [1.0] reliability = [0.95] wf_name = "Montage_25" for r in reliability: params = deepcopy(base_params) params["estimator_settings"]["reliability"] = r configs.append(params) to_run = [partial(exp, wf_name=wf_name, **params) for params in configs] results = [t() for t in to_run] # results = multi_repeat(REPEAT_COUNT, to_run) saver = UniqueNameSaver(os.path.join(TEMP_PATH, "gaheft_series"), base_params["experiment_name"]) for result in results: saver(result) pass
def do_exp_schedule(takeHeftSchedule=True): saver = UniqueNameSaver("../../temp/ga_vs_heft_exp_heft_schedule") ga_makespan, heft_makespan, ga_schedule, heft_schedule = run(wf_names[0]) ## TODO: pure hack schedule = heft_schedule if takeHeftSchedule else ga_schedule mapping = [(item.job.id, node.flops) for node, items in schedule.mapping.items() for item in items] mapping = sorted(mapping, key=lambda x: x[0]) ordering = [(item.job.id, item.start_time) for node, items in heft_schedule.mapping.items() for item in items] ordering = [t for t, time in sorted(ordering, key=lambda x: x[1])] data = {"mapping": mapping, "ordering": ordering} name = saver(data) return ga_makespan, heft_makespan, ga_schedule, heft_schedule, name
def do_exp(arg): config, name = tasks[arg] saver = UniqueNameSaver(os.path.join(base_path, name)) return do_experiment(saver, config, _wf, rm, estimator)
"ideal_inds": { MAPPING_SPECIE: ms_str_repr, ORDERING_SPECIE: os_ideal_ind }, "wf_name": _wf.name }, "initial_pops": initial_pops, "final_solution": solution, "final_makespan": m, "iterations": logbook } saver(data) return m saver = UniqueNameSaver(os.path.join(__root_path__, "temp/cga_exp")) def do_exp(): ## TODO: remove time measure tstart = datetime.now() res = do_experiment(saver, config, _wf, rm, estimator) tend = datetime.now() tres = tend - tstart print("Time Result: " + str(tres.total_seconds())) return res if __name__ == "__main__": res = repeat(do_exp, 1)
"best_components_itself": best_components_itself(sols), "best": -1 * Utility.makespan( build_schedule(_wf, estimator, rm, max(sols, key=lambda x: x.fitness))) }, "operators": { # "choose": default_choose, "build_solutions": default_build_solutions, # "fitness": fitness_mapping_and_ordering, "fitness": overhead_fitness_mapping_and_ordering, # "assign_credits": default_assign_credits # "assign_credits": max_assign_credits "assign_credits": assign_from_transfer_overhead } } saver = UniqueNameSaver("../../temp/cga_heft_mixin") def do_exp(): return do_experiment(saver, config, _wf, rm, estimator) if __name__ == "__main__": res = repeat(do_exp, 1) print("RESULTS: ") print(res)
"ga_params": { "Kbest": 5, "population": 50, "crossover_probability": 0.3, #0.8 "replacing_mutation_probability": 0.1, #0.5 "sweep_mutation_probability": 0.3, #0.4 "generations": 100 }, "nodes_conf": [10, 15, 25, 30], "transfer_time": 100, "heft_initial": False } run = functools.partial(MixRunner(), **PARAMS) directory = "../../temp/ga_vs_heft_exp" saver = UniqueNameSaver("../../temp/ga_vs_heft_exp") # def do_exp(): # ga_makespan, heft_makespan, ga_schedule, heft_schedule = run(wf_names[0]) # saver(ga_makespan) # return ga_makespan def do_exp_schedule(takeHeftSchedule=True): saver = UniqueNameSaver("../../temp/ga_vs_heft_exp_heft_schedule") ga_makespan, heft_makespan, ga_schedule, heft_schedule = run(wf_names[0]) ## TODO: pure hack schedule = heft_schedule if takeHeftSchedule else ga_schedule
"assign_credits": max_assign_credits } } def do_experiment(saver, config, _wf, rm, estimator): islands = [CoevolutionGA(**config), CoevolutionGA(**config), CoevolutionGA(**config), CoevolutionGA(**config)] # islands = [CoevolutionGA(**config), CoevolutionGA(**config)] migration = partial(equal_social_migration_scheme, k=3, selection=best_selection) best, islands = run_island_ga(islands, migration, 100, 20) return best.fitness saver = UniqueNameSaver("{0}/temp/icga_exp".format(__root_path__)) def do_exp(): res = do_experiment(saver, config, _wf, rm, estimator) return res if __name__ == "__main__": res = repeat(do_exp, 1) print("RESULTS: ") print(res)
_wf = wf("Montage_100") rm = ExperimentResourceManager(rg.r([10, 15, 25, 30])) estimator = ExperimentEstimator(None, ideal_flops=20, transfer_time=100) selector = ArchivedSelector(5)(tourn) ms_ideal_ind = build_ms_ideal_ind(_wf, rm) os_ideal_ind = build_os_ideal_ind(_wf) os_representative = extract_ordering_from_ga_file( "{0}/temp/ga_schedule_full_439_tr100_m100.json".format(__root_path__)) heft_mapping = extract_mapping_from_ga_file( "{0}/temp/heft_etalon_full_tr100_m100.json".format(__root_path__), rm) saver = UniqueNameSaver("../../temp/cga_fixed_ordering") def do_exp(): config = { "interact_individuals_count": 100, "generations": 300, "env": Env(_wf, rm, estimator), "species": [ Specie( name=MAPPING_SPECIE, pop_size=50, cxb=0.9,
def do_exp(): saver = UniqueNameSaver("{0}/temp/cga_init_pop_bad".format(__root_path__)) return do_experiment(saver, cfg, _wf, rm, estimator)
def __init__(self, dir_name, experiment_name): if os.path.isabs(dir_name): path = dir_name else: path = os.path.join(TEMP_PATH, dir_name) self._saver = UniqueNameSaver(path, experiment_name)
ctx, pop, heft_ordering, 3), stat=lambda pop: {"hamming_distances": hamming_distances(pop, os_ideal_ind)}) ], "solstat": lambda sols: { "best_components": hamming_for_best_components(sols, ms_ideal_ind, os_ideal_ind), "best_components_itself": best_components_itself(sols) }, "operators": { # "choose": default_choose, "build_solutions": default_build_solutions, # "fitness": fitness_mapping_and_ordering, "fitness": overhead_fitness_mapping_and_ordering, "assign_credits": default_assign_credits } } saver = UniqueNameSaver("../../temp/cga_fixed_mapping") def do_exp(): return do_experiment(saver, config, _wf, rm, estimator) if __name__ == "__main__": res = repeat(do_exp, 1) print("RESULTS: ") print(res)