def benchmark(loopcount, workers, matn, verbose=False): t1 = time.time() N = workers loopcounts = [(loopcount,N),]*N source = pyLambdaFlows.Source() compute_op = pyLambdaFlows.Map(source, "./compute.py") sess = pyLambdaFlows.Session(credentials_csv="./accessKeys.csv") compute_op.compile(sess=sess, purge=True) promess = compute_op.eval(feed_dict={source: loopcounts}, wait=False) print("invocation done, dur=", time.time() - t1) local_jobs_done_timeline = [] result_count = 0 while result_count < N: result_count = promess.getStatus() local_jobs_done_timeline.append((time.time(), result_count)) est_flop = 2 * result_count * loopcount * matn ** 3 est_gflops = est_flop / 1e9 / (time.time() - t1) if verbose: print("jobs done: {:5d} runtime: {:5.1f}s {:8.1f} GFLOPS ".format(result_count, time.time() - t1, est_gflops)) if result_count == N: break time.sleep(1) if verbose: print("getting results") #results = [f.result(throw_except=False) for f in futures] results = promess.getResult() if verbose: print("getting status") run_statuses = [f.run_status for f in futures] invoke_statuses = [f.invoke_status for f in futures] all_done = time.time() total_time = all_done - t1 print("total time", total_time) est_flop = result_count * 2 * loopcount * matn ** 3 print(est_flop / 1e9 / total_time, "GFLOPS") res = {'total_time': total_time, 'est_flop': est_flop, 'run_statuses': run_statuses, 'invoke_statuses': invoke_statuses, 'callset_id': futures[0].callset_id, 'local_jobs_done_timeline': local_jobs_done_timeline, 'results': results} return res
def run(): start=time() # Creation des sources pyLambdaFlow, cad, l'origine des invocations, on lui donne les donnees d'entrees a la ligne 48 param = pyLambdaFlows.Source() param.files.append("./population.py") # Il est important de passer a la source les fichiers necessaires pour l'execution des autres fonctions Lambda param.files.append("./schedule.py") param.files.append("./domain.py") param.files.append("./utils.py") param.files.append("./data.py") param.files.append("./genetic_algorithm.py") # Le kernel est appelle par un operateur Map et la liste d'entree (param_list) comporte SPLIT_VAR element, le kernel Create_&_Evaluate sera donc appelle sur chaque element de param_list # Il faut lui passer en donnees la liste des fichiers necessaires a son execution b = pyLambdaFlows.op.Map(param, ["./Create_And_Evaluate.py", "./population.py", "./schedule.py", "./domain.py", "./utils.py", "./data.py", "./genetic_algorithm.py"]) # dependances size=SPLIT_VAR : [[0], [1], [2], [3]] # Les kernels suivant utilisent un operateur cree specialement pour realiser un Map mais pour que tous les resultats soient envoyes a tous les kernels de la layer suivante (voir schema du rapport) c = MapToMapOp([b, param], ["./Rate_And_Generate.py", "./population.py", "./schedule.py", "./domain.py", "./utils.py", "./data.py", "./genetic_algorithm.py"]) # dependances size = SPLIT_VAR : [[0, 1, 2, 3], [0, 1, 2, 3], [0, 1, 2, 3], [0, 1, 2, 3]] d = MapToMapOp([c, param], ["./Rate_And_Generate.py", "./population.py", "./schedule.py", "./domain.py", "./utils.py", "./data.py", "./genetic_algorithm.py"]) e = MapToMapOp([d, param], ["./Rate_And_Generate.py", "./population.py", "./schedule.py", "./domain.py", "./utils.py", "./data.py", "./genetic_algorithm.py"]) f = MapToMapOp([e, param], ["./Rate_And_Generate.py", "./population.py", "./schedule.py", "./domain.py", "./utils.py", "./data.py", "./genetic_algorithm.py"]) # Creation de la liste de parametre envoyee en entree de la source param_list = [] for _ in range (SPLIT_VAR): param_list.append([POPULATION_SIZE,MUTATION_RATE,CROSSOVER_RATE,TOURNAMENT_SELECTION_SIZE,SPLIT_VAR,NUMB_OF_ELITE_SCHEDULES,NUMB_OF_GENERATION]) # Creation d'une nouvelle session Lambda avec les informations de connexion dans 'accessKeys.csv', et lancement de l'invocation des Kernels with pyLambdaFlows.Session(credentials_csv="./accessKeys.csv") as sess: f.compile(purge=False) result = f.eval(feed_dict={param:param_list}) end=time() # Visualisation du resultat #print(result) res_list=[] for i in range (SPLIT_VAR): for s in range(NUMB_OF_ELITE_SCHEDULES): #print(result[i][s]._fitness) res_list.append(result[i][s]) print("Temps d'exécution : {}".format(end-start)) res = sorted(res_list,key = lambda schedule: schedule._fitness,reverse=True) print("Schedule: {} avec une fitness = {}".format(res[0],res[0]._fitness)) final_res = (res[0]._fitness,(end-start)) return final_res
def run(): start = time() # Creation des sources pyLambdaFlow, cad, l'origine des invocations, on lui donne les donnees d'entrees a la ligne 41 param = pyLambdaFlows.Source() param.files.append( "./population.py" ) # Il est important de passer a la source les fichiers necessaires pour l'execution des autres fonctions Lambda param.files.append("./schedule.py") param.files.append("./domain.py") param.files.append("./utils.py") param.files.append("./data.py") param.files.append("./genetic_algorithm.py") # Le kernel est appelle par un operateur Map mais la liste d'entree (param_list) ne comportant qu'un seul element, ce mapper ne sera appelle qu'une fois # Il faut lui passer en donnees la liste des fichiers necessaires a son execution b = pyLambdaFlows.op.Map(param, [ "./Sequential_Kernel.py", "./population.py", "./schedule.py", "./domain.py", "./utils.py", "./data.py", "./genetic_algorithm.py" ]) # dependances avec size=SPLIT_VAR = [[0]] # Creation de la liste de parametre envoyee en entree de la source param_list = [] for _ in range(SPLIT_VAR): param_list.append([ POPULATION_SIZE, MUTATION_RATE, CROSSOVER_RATE, TOURNAMENT_SELECTION_SIZE, SPLIT_VAR, NUMB_OF_ELITE_SCHEDULES, NUMB_OF_GENERATION ]) # Creation d'une nouvelle session Lambda avec les informations de connexion dans 'accessKeys.csv', et lancement de l'invocation des Kernels with pyLambdaFlows.Session(credentials_csv="./accessKeys.csv") as sess: b.compile(purge=False) result = b.eval(feed_dict={param: param_list}) end = time() # Visualisation du resultat print(result) print("Temps d'exécution : {}".format(end - start)) print("Schedule: {} avec une fitness = {}".format(result[0], result[0]._fitness)) final_res = (result[0]._fitness, (end - start)) return final_res
NB_IT = 6 SIZE = 202 MAX = 1. GATHERING = 2 # 100 - it 3 # 5 - 202 - 10 DATA = [[0 for _ in range(SIZE)] for _ in range(SIZE)] for i in range(SIZE): DATA[i][0] = MAX DATA[i][-1] = MAX DATA[0][i] = MAX DATA[-1][i] = MAX source = pyLambdaFlows.Source() operation = ConvOp(source, "./lambda_kernel.py", name="iteration_0") for i in range(NB_IT - 1): operation = ConvOp(operation, "./lambda_kernel.py", name="iteration_{}".format(i)) NB_KERNEL = (SIZE - 2) / GATHERING if NB_KERNEL != int(NB_KERNEL): raise RuntimeError("Invalid parameters") NB_KERNEL += 2 NB_KERNEL = int(NB_KERNEL) print("Nb Kernel ", NB_KERNEL)