def main(): import matplotlib.pyplot as plt global nodes global cores # Parse command line arguments args = parse_args() ntask = args.ntask nrun = args.nrun TUNER_NAME = args.optimization perfmodel = args.perfmodel (machine, processor, nodes, cores) = GetMachineConfiguration() print ("machine: " + machine + " processor: " + processor + " num_nodes: " + str(nodes) + " num_cores: " + str(cores)) os.environ['MACHINE_NAME'] = machine os.environ['TUNER_NAME'] = TUNER_NAME input_space = Space([Real(0., 10., transform="normalize", name="t")]) parameter_space = Space([Real(0., 1., transform="normalize", name="x")]) # input_space = Space([Real(0., 0.0001, "uniform", "normalize", name="t")]) # parameter_space = Space([Real(-1., 1., "uniform", "normalize", name="x")]) output_space = Space([Real(float('-Inf'), float('Inf'), name="y")]) constraints = {"cst1": "x >= 0. and x <= 1."} if(perfmodel==1): problem = TuningProblem(input_space, parameter_space,output_space, objectives, constraints, models) # with performance model else: problem = TuningProblem(input_space, parameter_space,output_space, objectives, constraints, None) # no performance model computer = Computer(nodes=nodes, cores=cores, hosts=None) options = Options() options['model_restarts'] = 1 options['distributed_memory_parallelism'] = False options['shared_memory_parallelism'] = False options['objective_evaluation_parallelism'] = False options['objective_multisample_threads'] = 1 options['objective_multisample_processes'] = 1 options['objective_nprocmax'] = 1 options['model_processes'] = 1 # options['model_threads'] = 1 # options['model_restart_processes'] = 1 # options['search_multitask_processes'] = 1 # options['search_multitask_threads'] = 1 # options['search_threads'] = 16 # options['mpi_comm'] = None #options['mpi_comm'] = mpi4py.MPI.COMM_WORLD options['model_class'] = 'Model_LCM' #'Model_GPy_LCM' options['verbose'] = False # options['sample_algo'] = 'MCS' # options['sample_class'] = 'SampleLHSMDU' options.validate(computer=computer) # giventask = [[6],[6.5]] giventask = [[i] for i in np.arange(0, ntask/2, 0.5).tolist()] giventask = [1.0, 1.0] NI=len(giventask) NS=nrun TUNER_NAME = os.environ['TUNER_NAME'] if(TUNER_NAME=='GPTune'): data = Data(problem) gt = GPTune(problem, computer=computer, data=data, options=options,driverabspath=os.path.abspath(__file__)) (data, modeler, stats) = gt.MLA(NS=NS, Igiven=giventask, NI=NI, NS1=int(NS/2)) # (data, modeler, stats) = gt.MLA(NS=NS, Igiven=giventask, NI=NI, NS1=NS-1) print("stats: ", stats) """ Print all input and parameter samples """ for tid in range(NI): print("tid: %d" % (tid)) print(" t:%f " % (data.I[tid][0])) print(" Ps ", data.P[tid]) print(" Os ", data.O[tid].tolist()) print(' Popt ', data.P[tid][np.argmin(data.O[tid])], 'Oopt ', min(data.O[tid])[0], 'nth ', np.argmin(data.O[tid])) if(TUNER_NAME=='opentuner'): (data,stats)=OpenTuner(T=giventask, NS=NS, tp=problem, computer=computer, run_id="OpenTuner", niter=1, technique=None) print("stats: ", stats) """ Print all input and parameter samples """ for tid in range(NI): print("tid: %d" % (tid)) print(" t:%f " % (data.I[tid][0])) print(" Ps ", data.P[tid]) print(" Os ", data.O[tid].tolist()) print(' Popt ', data.P[tid][np.argmin(data.O[tid])], 'Oopt ', min(data.O[tid])[0], 'nth ', np.argmin(data.O[tid])) if(TUNER_NAME=='hpbandster'): (data,stats)=HpBandSter(T=giventask, NS=NS, tp=problem, computer=computer, run_id="HpBandSter", niter=1) print("stats: ", stats) """ Print all input and parameter samples """ for tid in range(NI): print("tid: %d" % (tid)) print(" t:%f " % (data.I[tid][0])) print(" Ps ", data.P[tid]) print(" Os ", data.O[tid].tolist()) print(' Popt ', data.P[tid][np.argmin(data.O[tid])], 'Oopt ', min(data.O[tid])[0], 'nth ', np.argmin(data.O[tid])) plot=1 if plot==1: x = np.arange(0., 1., 0.00001) Nplot=9.5 # for t in np.linspace(0,Nplot,20): for t in [1, 2, 4, 6]: fig = plt.figure(figsize=[12.8, 9.6]) I_orig=[t] kwargst = {input_space[k].name: I_orig[k] for k in range(len(input_space))} y=np.zeros([len(x),1]) for i in range(len(x)): P_orig=[x[i]] kwargs = {parameter_space[k].name: P_orig[k] for k in range(len(parameter_space))} kwargs.update(kwargst) y[i]=objectives(kwargs) fontsize=30 plt.rcParams.update({'font.size': 21}) plt.plot(x, y, 'b') plt.xlabel('x',fontsize=fontsize+2) plt.ylabel('y(t,x)',fontsize=fontsize+2) plt.title('t=%d'%t,fontsize=fontsize+2) print('t:',t,'x:',x[np.argmin(y)],'ymin:',y.min()) annot_min(x,y) # plt.show() # plt.show(block=False) # fig.savefig('obj_t_%d.eps'%t) fig.savefig('obj_t_%d.pdf'%t)
def main(): global ROOTDIR global nodes global cores global target global nprocmax global nprocmin # Parse command line arguments args = parse_args() # Extract arguments # mmax = args.mmax # nmax = args.nmax ntask = args.ntask nodes = args.nodes cores = args.cores nprocmin_pernode = args.nprocmin_pernode machine = args.machine optimization = args.optimization nruns = args.nruns truns = args.truns # JOBID = args.jobid TUNER_NAME = args.optimization os.environ['MACHINE_NAME'] = machine os.environ['TUNER_NAME'] = TUNER_NAME nprocmax = nodes * cores - 1 # YL: there is one proc doing spawning, so nodes*cores should be at least 2 nprocmin = min( nodes * nprocmin_pernode, nprocmax - 1 ) # YL: ensure strictly nprocmin<nprocmax, required by the Integer space # Task parameters geomodels = [ "cavity_5cell_30K_feko", "pillbox_4000", "pillbox_1000", "cavity_wakefield_4K_feko", "cavity_rec_5K_feko", "cavity_rec_17K_feko" ] # geomodels = ["cavity_wakefield_4K_feko"] model = Categoricalnorm(geomodels, transform="onehot", name="model") # Input parameters # the frequency resolution is 100Khz # freq = Integer (22000, 23500, transform="normalize", name="freq") freq = Integer(6320, 6430, transform="normalize", name="freq") # freq = Integer (21000, 22800, transform="normalize", name="freq") # freq = Integer (11400, 12000, transform="normalize", name="freq") # freq = Integer (500, 900, transform="normalize", name="freq") result1 = Real(float("-Inf"), float("Inf"), name="r1") result2 = Real(float("-Inf"), float("Inf"), name="r2") IS = Space([model]) PS = Space([freq]) # OS = Space([result1,result2]) OS = Space([result1]) constraints = {} models = {} """ Print all input and parameter samples """ print(IS, PS, OS, constraints, models) problem = TuningProblem(IS, PS, OS, objectives, constraints, None) computer = Computer(nodes=nodes, cores=cores, hosts=None) """ Set and validate options """ options = Options() options['model_processes'] = 1 # options['model_threads'] = 1 options['model_restarts'] = 1 # options['search_multitask_processes'] = 1 # options['model_restart_processes'] = 1 options['distributed_memory_parallelism'] = False options['shared_memory_parallelism'] = False options['model_class '] = 'Model_LCM' # 'Model_GPy_LCM' options['verbose'] = False # options['search_algo'] = 'nsga2' #'maco' #'moead' #'nsga2' #'nspso' # options['search_pop_size'] = 1000 # 1000 # options['search_gen'] = 10 options.validate(computer=computer) # """ Intialize the tuner with existing data stored as last check point""" # try: # data = pickle.load(open('Data_nodes_%d_cores_%d_nprocmin_pernode_%d_tasks_%s_machine_%s.pkl' % (nodes, cores, nprocmin_pernode, geomodels, machine), 'rb')) # giventask = data.I # except (OSError, IOError) as e: # data = Data(problem) # giventask = [[np.random.choice(geomodels,size=1)[0]] for i in range(ntask)] # """ Building MLA with the given list of tasks """ # giventask = [["big.rua"]] # giventask = [["pillbox_4000"]] giventask = [["cavity_5cell_30K_feko"]] # giventask = [["cavity_rec_17K_feko"]] # giventask = [["cavity_wakefield_4K_feko"]] data = Data(problem) if (TUNER_NAME == 'GPTune'): gt = GPTune(problem, computer=computer, data=data, options=options, driverabspath=os.path.abspath(__file__)) NI = len(giventask) NS = nruns (data, model, stats) = gt.MLA(NS=NS, NI=NI, Igiven=giventask, NS1=max(NS // 2, 1)) print("stats: ", stats) # """ Dump the data to file as a new check point """ # pickle.dump(data, open('Data_nodes_%d_cores_%d_nprocmin_pernode_%d_tasks_%s_machine_%s.pkl' % (nodes, cores, nprocmin_pernode, matrices, machine), 'wb')) # """ Dump the tuner to file for TLA use """ # pickle.dump(gt, open('MLA_nodes_%d_cores_%d_nprocmin_pernode_%d_tasks_%s_machine_%s.pkl' % (nodes, cores, nprocmin_pernode, matrices, machine), 'wb')) """ Print all input and parameter samples """ for tid in range(NI): print("tid: %d" % (tid)) print(" matrix:%s" % (data.I[tid][0])) print(" Ps ", data.P[tid]) OL = np.asarray([o[0] for o in data.O[tid]], dtype=np.float64) np.set_printoptions(suppress=False, precision=8) print(" Os ", OL) print(' Popt ', data.P[tid][np.argmin(data.O[tid])], 'Oopt ', min(data.O[tid])[0], 'nth ', np.argmin(data.O[tid])) # ndf, dl, dc, ndr = pg.fast_non_dominated_sorting(data.O[tid]) # front = ndf[0] # # print('front id: ',front) # fopts = data.O[tid][front] # xopts = [data.P[tid][i] for i in front] # print(' Popts ', xopts) # print(' Oopts ', fopts) if (TUNER_NAME == 'opentuner'): NI = ntask NS = nruns (data, stats) = OpenTuner(T=giventask, NS=NS, tp=problem, computer=computer, run_id="OpenTuner", niter=1, technique=None) print("stats: ", stats) """ Print all input and parameter samples """ for tid in range(NI): print("tid: %d" % (tid)) print(" matrix:%s" % (data.I[tid][0])) print(" Ps ", data.P[tid]) print(" Os ", data.O[tid]) print(' Popt ', data.P[tid][np.argmin(data.O[tid])], 'Oopt ', min(data.O[tid])[0], 'nth ', np.argmin(data.O[tid])) if (TUNER_NAME == 'hpbandster'): NI = ntask NS = nruns (data, stats) = HpBandSter(T=giventask, NS=NS, tp=problem, computer=computer, run_id="HpBandSter", niter=1) print("stats: ", stats) """ Print all input and parameter samples """ for tid in range(NI): print("tid: %d" % (tid)) print(" matrix:%s" % (data.I[tid][0])) print(" Ps ", data.P[tid]) print(" Os ", data.O[tid]) print(' Popt ', data.P[tid][np.argmin(data.O[tid])], 'Oopt ', min(data.O[tid])[0], 'nth ', np.argmin(data.O[tid]))
def main(): (machine, processor, nodes, cores) = GetMachineConfiguration() print("machine: " + machine + " processor: " + processor + " num_nodes: " + str(nodes) + " num_cores: " + str(cores)) # Parse command line arguments args = parse_args() bmin = args.bmin device = args.device bmax = args.bmax eta = args.eta nrun = args.nrun npernode = args.npernode ntask = args.ntask Nloop = args.Nloop restart = args.restart TUNER_NAME = args.optimization ot.RandomGenerator.SetSeed(args.seed) TLA = False print(args) print("machine: " + machine + " processor: " + processor + " num_nodes: " + str(nodes) + " num_cores: " + str(cores)) os.environ['MACHINE_NAME'] = machine os.environ['TUNER_NAME'] = TUNER_NAME dataset = Categoricalnorm(['cora', 'citeseer'], transform="onehot", name="dataset") lr = Real(1e-5, 1e-2, name="lr") hidden = Integer(4, 64, transform="normalize", name="hidden") weight_decay = Real(1e-5, 1e-2, name="weight_decay") dropout = Real(0.1, 0.9, name="dropout") validation_loss = Real(0., 1., name="validation_loss") IS = Space([dataset]) PS = Space([weight_decay, hidden, lr, dropout]) OS = Space([validation_loss]) constraints = {} constants = { "nodes": nodes, "cores": cores, "npernode": npernode, "bmin": bmin, "bmax": bmax, "eta": eta, "device": device } print(IS, PS, OS, constraints) problem = TuningProblem(IS, PS, OS, objectives, constraints, constants=constants) computer = Computer(nodes=nodes, cores=cores, hosts=None) options = Options() options['model_processes'] = 4 # parallel cholesky for each LCM kernel # options['model_threads'] = 1 # options['model_restarts'] = args.Nrestarts # options['distributed_memory_parallelism'] = False # parallel model restart options['model_restarts'] = restart options['distributed_memory_parallelism'] = False options['shared_memory_parallelism'] = False # options['mpi_comm'] = None options['model_class'] = 'Model_LCM' # Model_GPy_LCM or Model_LCM options['verbose'] = False options['sample_class'] = 'SampleOpenTURNS' options['budget_min'] = bmin options['budget_max'] = bmax options['budget_base'] = eta smax = int( np.floor( np.log(options['budget_max'] / options['budget_min']) / np.log(options['budget_base']))) budgets = [ options['budget_max'] / options['budget_base']**x for x in range(smax + 1) ] NSs = [ int((smax + 1) / (s + 1)) * options['budget_base']**s for s in range(smax + 1) ] NSs_all = NSs.copy() budget_all = budgets.copy() for s in range(smax + 1): for n in range(s): NSs_all.append(int(NSs[s] / options['budget_base']**(n + 1))) budget_all.append(int(budgets[s] * options['budget_base']**(n + 1))) Ntotal = int(sum(NSs_all) * Nloop) Btotal = int( np.dot(np.array(NSs_all), np.array(budget_all)) / options['budget_max'] * Nloop ) # total number of evaluations at highest budget -- used for single-fidelity tuners print(f"bmin = {bmin}, bmax = {bmax}, eta = {eta}, smax = {smax}") print("samples in one multi-armed bandit loop, NSs_all = ", NSs_all) print("total number of samples: ", Ntotal) print("total number of evaluations at highest budget: ", Btotal) print() options.validate(computer=computer) data = Data(problem) # giventask = [[0.2, 0.5]] giventask = [] dataset_list = args.dataset.split('-') for dataset in dataset_list: giventask.append([dataset]) NI = len(giventask) assert NI == ntask # make sure number of tasks match if (TUNER_NAME == 'GPTune'): gt = GPTune(problem, computer=computer, data=data, options=options, driverabspath=os.path.abspath(__file__)) """ Building MLA with the given list of tasks """ NS = Btotal if args.nrun > 0: NS = args.nrun NS1 = max(NS // 2, 1) (data, model, stats) = gt.MLA(NS=NS, NI=NI, Igiven=giventask, NS1=NS1) print("Tuner: ", TUNER_NAME) print("stats: ", stats) results_file = open( f"GCN_{args.dataset}_ntask{args.ntask}_bandit{args.bmin}-{args.bmax}-{args.eta}_Nloop{args.Nloop}_expid{args.expid}.txt", "a") results_file.write(f"Tuner: {TUNER_NAME}\n") results_file.write(f"stats: {stats}\n") """ Print all input and parameter samples """ for tid in range(NI): print("tid: %d" % (tid)) print(f" dataset = {data.I[tid][0]}") print(" Ps ", data.P[tid]) print(" Os ", data.O[tid]) print(' Popt ', data.P[tid][np.argmin(data.O[tid])], 'Oopt ', min(data.O[tid])[0], 'nth ', np.argmin(data.O[tid])) results_file.write(f"tid: {tid:d}\n") results_file.write(f" dataset = {data.I[tid][0]}\n") results_file.write(f" Os {data.O[tid].tolist()}\n") # results_file.write(f' Popt {data.P[tid][np.argmin(data.O[tid])]} Oopt {-min(data.O[tid])[0]} nth {np.argmin(data.O[tid])}\n') results_file.close() if (TUNER_NAME == 'opentuner'): NS = Btotal (data, stats) = OpenTuner(T=giventask, NS=NS, tp=problem, computer=computer, run_id="OpenTuner", niter=1, technique=None) print("Tuner: ", TUNER_NAME) print("stats: ", stats) results_file = open( f"GCN_{args.dataset}_ntask{args.ntask}_bandit{args.bmin}-{args.bmax}-{args.eta}_Nloop{args.Nloop}_expid{args.expid}.txt", "a") results_file.write(f"Tuner: {TUNER_NAME}\n") results_file.write(f"stats: {stats}\n") """ Print all input and parameter samples """ for tid in range(NI): print("tid: %d" % (tid)) print(f" dataset = {data.I[tid][0]}") print(" Ps ", data.P[tid]) print(" Os ", data.O[tid]) print(' Popt ', data.P[tid][np.argmin(data.O[tid][:NS])], 'Oopt ', min(data.O[tid][:NS])[0], 'nth ', np.argmin(data.O[tid][:NS])) results_file.write(f"tid: {tid:d}\n") results_file.write(f" dataset = {data.I[tid][0]}\n") # results_file.write(f" Ps {data.P[tid][:NS]}\n") results_file.write(f" Os {data.O[tid][:NS].tolist()}\n") # results_file.write(f' Popt {data.P[tid][np.argmin(data.O[tid])]} Oopt {-min(data.O[tid])[0]} nth {np.argmin(data.O[tid])}\n') results_file.close() # single-fidelity version of hpbandster if (TUNER_NAME == 'TPE'): NS = Btotal (data, stats) = HpBandSter(T=giventask, NS=NS, tp=problem, computer=computer, options=options, run_id="HpBandSter", niter=1) print("Tuner: ", TUNER_NAME) print("stats: ", stats) results_file = open( f"GCN_{args.dataset}_ntask{args.ntask}_bandit{args.bmin}-{args.bmax}-{args.eta}_Nloop{args.Nloop}_expid{args.expid}.txt", "a") results_file.write(f"Tuner: {TUNER_NAME}\n") results_file.write(f"stats: {stats}\n") """ Print all input and parameter samples """ for tid in range(NI): print("tid: %d" % (tid)) print(f" dataset = {data.I[tid][0]}") print(" Ps ", data.P[tid]) print(" Os ", data.O[tid].tolist()) print(' Popt ', data.P[tid][np.argmin(data.O[tid])], 'Oopt ', min(data.O[tid])[0], 'nth ', np.argmin(data.O[tid])) results_file.write(f"tid: {tid:d}\n") results_file.write(f" dataset = {data.I[tid][0]}\n") results_file.write(f" Os {data.O[tid].tolist()}\n") # results_file.write(f' Popt {data.P[tid][np.argmin(data.O[tid])]} Oopt {-min(data.O[tid])[0]} nth {np.argmin(data.O[tid])}\n') results_file.close() if (TUNER_NAME == 'GPTuneBand'): data = Data(problem) gt = GPTune_MB(problem, computer=computer, NS=Nloop, options=options) (data, stats, data_hist) = gt.MB_LCM(NS=Nloop, Igiven=giventask) print("Tuner: ", TUNER_NAME) print("stats: ", stats) results_file = open( f"GCN_{args.dataset}_ntask{args.ntask}_bandit{args.bmin}-{args.bmax}-{args.eta}_Nloop{args.Nloop}_expid{args.expid}.txt", "a") results_file.write(f"Tuner: {TUNER_NAME}\n") results_file.write(f"stats: {stats}\n") """ Print all input and parameter samples """ for tid in range(NI): print("tid: %d" % (tid)) print(f" dataset = {data.I[tid][0]}") print(" Ps ", data.P[tid]) print(" Os ", data.O[tid].tolist()) nth = np.argmin(data.O[tid]) Popt = data.P[tid][nth] # find which arm and which sample the optimal param is from for arm in range(len(data_hist.P)): try: idx = (data_hist.P[arm]).index(Popt) arm_opt = arm except ValueError: pass print(' Popt ', Popt, 'Oopt ', min(data.O[tid])[0], 'nth ', nth) results_file.write(f"tid: {tid:d}\n") results_file.write(f" dataset = {data.I[tid][0]}\n") # results_file.write(f" Ps {data.P[tid]}\n") results_file.write(f" Os {data.O[tid].tolist()}\n") # results_file.write(f' Popt {data.P[tid][np.argmin(data.O[tid])]} Oopt {-min(data.O[tid])[0]} nth {np.argmin(data.O[tid])}\n') results_file.close() # multi-fidelity version if (TUNER_NAME == 'hpbandster'): NS = Ntotal (data, stats) = HpBandSter_bandit(T=giventask, NS=NS, tp=problem, computer=computer, options=options, run_id="hpbandster_bandit", niter=1) print("Tuner: ", TUNER_NAME) print("stats: ", stats) results_file = open( f"GCN_{args.dataset}_ntask{args.ntask}_bandit{args.bmin}-{args.bmax}-{args.eta}_Nloop{args.Nloop}_expid{args.expid}.txt", "a") results_file.write(f"Tuner: {TUNER_NAME}\n") results_file.write(f"stats: {stats}\n") """ Print all input and parameter samples """ for tid in range(NI): print("tid: %d" % (tid)) print(f" dataset = {data.I[tid][0]}") print(" Ps ", data.P[tid]) print(" Os ", data.O[tid].tolist()) # print(' Popt ', data.P[tid][np.argmin(data.O[tid])], 'Oopt ', min(data.O[tid])[0], 'nth ', np.argmin(data.O[tid])) max_budget = 0. Oopt = 99999 Popt = None nth = None for idx, (config, out) in enumerate(zip(data.P[tid], data.O[tid].tolist())): for subout in out[0]: budget_cur = subout[0] if budget_cur > max_budget: max_budget = budget_cur Oopt = subout[1] Popt = config nth = idx elif budget_cur == max_budget: if subout[1] < Oopt: Oopt = subout[1] Popt = config nth = idx print(' Popt ', Popt, 'Oopt ', Oopt, 'nth ', nth) results_file.write(f"tid: {tid:d}\n") results_file.write(f" dataset = {data.I[tid][0]}\n") # results_file.write(f" Ps {data.P[tid]}\n") results_file.write(f" Os {data.O[tid].tolist()}\n") # results_file.write(f' Popt {data.P[tid][np.argmin(data.O[tid])]} Oopt {-min(data.O[tid])[0]} nth {np.argmin(data.O[tid])}\n') results_file.close()
def main(): # Parse command line arguments args = parse_args() # Extract arguments ntask = args.ntask nprocmin_pernode = args.nprocmin_pernode optimization = args.optimization nrun = args.nrun TUNER_NAME = args.optimization (machine, processor, nodes, cores) = GetMachineConfiguration() print("machine: " + machine + " processor: " + processor + " num_nodes: " + str(nodes) + " num_cores: " + str(cores)) os.environ['MACHINE_NAME'] = machine os.environ['TUNER_NAME'] = TUNER_NAME # Task parameters gridsize = Integer(30, 100, transform="normalize", name="gridsize") # Input parameters sp_reordering_method = Categoricalnorm(['metis', 'parmetis', 'geometric'], transform="onehot", name="sp_reordering_method") # sp_reordering_method = Categoricalnorm (['metis','geometric'], transform="onehot", name="sp_reordering_method") # sp_compression = Categoricalnorm (['none','hss'], transform="onehot", name="sp_compression") # sp_compression = Categoricalnorm (['none','hss','hodlr','hodbf'], transform="onehot", name="sp_compression") sp_compression = Categoricalnorm(['none', 'hss', 'hodlr', 'hodbf', 'blr'], transform="onehot", name="sp_compression") npernode = Integer(int(math.log2(nprocmin_pernode)), int(math.log2(cores)), transform="normalize", name="npernode") sp_nd_param = Integer(8, 32, transform="normalize", name="sp_nd_param") sp_compression_min_sep_size = Integer(2, 5, transform="normalize", name="sp_compression_min_sep_size") sp_compression_min_front_size = Integer( 4, 10, transform="normalize", name="sp_compression_min_front_size") sp_compression_leaf_size = Integer(5, 9, transform="normalize", name="sp_compression_leaf_size") sp_compression_rel_tol = Integer(-6, -1, transform="normalize", name="sp_compression_rel_tol") result = Real(float("-Inf"), float("Inf"), name="r") IS = Space([gridsize]) PS = Space([ sp_reordering_method, sp_compression, sp_nd_param, sp_compression_min_sep_size, sp_compression_min_front_size, sp_compression_leaf_size, sp_compression_rel_tol, npernode ]) OS = Space([result]) constraints = {} models = {} constants = {"nodes": nodes, "cores": cores} """ Print all input and parameter samples """ print(IS, PS, OS, constraints, models) problem = TuningProblem(IS, PS, OS, objectives, constraints, None, constants=constants) computer = Computer(nodes=nodes, cores=cores, hosts=None) """ Set and validate options """ options = Options() options['model_processes'] = 1 # options['model_threads'] = 1 options['model_restarts'] = 1 # options['search_multitask_processes'] = 1 # options['model_restart_processes'] = 1 options['distributed_memory_parallelism'] = False options['shared_memory_parallelism'] = False options['model_class'] = 'Model_LCM' # 'Model_GPy_LCM' options['verbose'] = False options.validate(computer=computer) # """ Building MLA with the given list of tasks """ giventask = [[30]] data = Data(problem) if (TUNER_NAME == 'GPTune'): gt = GPTune(problem, computer=computer, data=data, options=options, driverabspath=os.path.abspath(__file__)) NI = len(giventask) NS = nrun (data, model, stats) = gt.MLA(NS=NS, NI=NI, Igiven=giventask, NS1=max(NS // 2, 1)) print("stats: ", stats) """ Print all input and parameter samples """ for tid in range(NI): print("tid: %d" % (tid)) print(" matrix:%s" % (data.I[tid][0])) print(" Ps ", data.P[tid]) print(" Os ", data.O[tid]) print(' Popt ', data.P[tid][np.argmin(data.O[tid])], 'Oopt ', min(data.O[tid])[0], 'nth ', np.argmin(data.O[tid])) if (TUNER_NAME == 'opentuner'): NI = ntask NS = nrun (data, stats) = OpenTuner(T=giventask, NS=NS, tp=problem, computer=computer, run_id="OpenTuner", niter=1, technique=None) print("stats: ", stats) """ Print all input and parameter samples """ for tid in range(NI): print("tid: %d" % (tid)) print(" matrix:%s" % (data.I[tid][0])) print(" Ps ", data.P[tid]) print(" Os ", data.O[tid]) print(' Popt ', data.P[tid][np.argmin(data.O[tid])], 'Oopt ', min(data.O[tid])[0], 'nth ', np.argmin(data.O[tid])) if (TUNER_NAME == 'hpbandster'): NI = ntask NS = nrun (data, stats) = HpBandSter(T=giventask, NS=NS, tp=problem, computer=computer, run_id="HpBandSter", niter=1) print("stats: ", stats) """ Print all input and parameter samples """ for tid in range(NI): print("tid: %d" % (tid)) print(" matrix:%s" % (data.I[tid][0])) print(" Ps ", data.P[tid]) print(" Os ", data.O[tid]) print(' Popt ', data.P[tid][np.argmin(data.O[tid])], 'Oopt ', min(data.O[tid])[0], 'nth ', np.argmin(data.O[tid]))
def main(): import matplotlib.pyplot as plt global nodes global cores # Parse command line arguments args = parse_args() global nrun nrun = args.nrun global attribute attribute = args.attribute TUNER_NAME = args.optimization (machine, processor, nodes, cores) = GetMachineConfiguration() print ("machine: " + machine + " processor: " + processor + " num_nodes: " + str(nodes) + " num_cores: " + str(cores)) os.environ['MACHINE_NAME'] = machine os.environ['TUNER_NAME'] = TUNER_NAME attributes = ["fixed_acidity", "volatile_acidity", "citric_acid", "residual_sugar", "chlorides", "free_sulfur_dioxide", "total_sulfur_dioxide", "density", "pH", "sulphates", "alcohol"] t = Categoricalnorm (attributes, transform="onehot", name="t") #t = Real(0., 10., transform="normalize", name="t") k = Integer(4, 13, transform="normalize", name="k") l = Real(0., 1., transform="normalize", name="l") o = Real(float('-Inf'), float('Inf'), name="o") IS = Space([t]) PS = Space([k, l]) OS = Space([o]) constraints = {} problem = TuningProblem(IS, PS, OS, objectives, constraints, None) computer = Computer(nodes=nodes, cores=cores, hosts=None) options = Options() options['model_restarts'] = 1 options['distributed_memory_parallelism'] = False options['shared_memory_parallelism'] = False options['objective_evaluation_parallelism'] = False options['objective_multisample_threads'] = 1 options['objective_multisample_processes'] = 1 options['objective_nprocmax'] = 1 options['model_processes'] = 1 #options['model_class'] = 'Model_LCM' options['model_class'] = 'Model_GPy_LCM' options['verbose'] = False options.validate(computer=computer) giventask = [[attribute]] global X_train global Y_train global X_test global Y_test X_train = [] Y_train = [] X_test = [] Y_test = [] if not os.path.exists("gptune-search-gpy.db"): os.system("mkdir -p gptune-search-gpy.db") with open("gptune-search-gpy.db/winequality."+str(nrun)+"."+attribute+".log", "w") as f_out: f_out.write("NKnots,Lambda,RegressionTime,InTestTime,InMSE,InR2,InAR2,OutTestTime,OutMSE,OutR2,OutAR2\n") idx = attributes.index(attribute) with open("winequality/wine_train.txt", "r") as f_in: f_in.readline() for dataline in f_in.readlines(): data = dataline.split(" ") X_train.append(float(data[idx])) with open("winequality/score_train.txt", "r") as f_in: f_in.readline() for dataline in f_in.readlines(): data = dataline.split(" ") Y_train.append(float(data[0])) with open("winequality/wine_test.txt", "r") as f_in: f_in.readline() for dataline in f_in.readlines(): data = dataline.split(" ") X_test.append(float(data[idx])) with open("winequality/score_test.txt", "r") as f_in: f_in.readline() for dataline in f_in.readlines(): data = dataline.split(" ") Y_test.append(float(data[0])) NI=len(giventask) NS=nrun TUNER_NAME = os.environ['TUNER_NAME'] if(TUNER_NAME=='GPTune'): data = Data(problem) gt = GPTune(problem, computer=computer, data=data, options=options,driverabspath=os.path.abspath(__file__)) (data, modeler, stats) = gt.MLA(NS=NS, Igiven=giventask, NI=NI, NS1=int(NS/2)) print("stats: ", stats) """ Print all input and parameter samples """ for tid in range(NI): print("tid: %d" % (tid)) print(" t:%s " % (data.I[tid][0])) print(" Ps ", data.P[tid]) print(" Os ", data.O[tid].tolist()) print(' Popt ', data.P[tid][np.argmin(data.O[tid])], 'Oopt ', min(data.O[tid])[0], 'nth ', np.argmin(data.O[tid])) if(TUNER_NAME=='opentuner'): (data,stats)=OpenTuner(T=giventask, NS=NS, tp=problem, computer=computer, run_id="OpenTuner", niter=1, technique=None) print("stats: ", stats) """ Print all input and parameter samples """ for tid in range(NI): print("tid: %d" % (tid)) print(" t:%s " % (data.I[tid][0])) print(" Ps ", data.P[tid]) print(" Os ", data.O[tid].tolist()) print(' Popt ', data.P[tid][np.argmin(data.O[tid])], 'Oopt ', min(data.O[tid])[0], 'nth ', np.argmin(data.O[tid])) if(TUNER_NAME=='hpbandster'): (data,stats)=HpBandSter(T=giventask, NS=NS, tp=problem, computer=computer, run_id="HpBandSter", niter=1) print("stats: ", stats) """ Print all input and parameter samples """ for tid in range(NI): print("tid: %d" % (tid)) print(" t:%s " % (data.I[tid][0])) print(" Ps ", data.P[tid]) print(" Os ", data.O[tid].tolist()) print(' Popt ', data.P[tid][np.argmin(data.O[tid])], 'Oopt ', min(data.O[tid])[0], 'nth ', np.argmin(data.O[tid]))
def main(): import matplotlib.pyplot as plt global nodes global cores # Parse command line arguments args = parse_args() global nrun nrun = args.nrun global attribute attribute = 'Time' TUNER_NAME = args.optimization (machine, processor, nodes, cores) = GetMachineConfiguration() print("machine: " + machine + " processor: " + processor + " num_nodes: " + str(nodes) + " num_cores: " + str(cores)) os.environ['MACHINE_NAME'] = machine os.environ['TUNER_NAME'] = TUNER_NAME t = Categoricalnorm(["Time"], transform="onehot", name="t") #t = Real(0., 10., transform="normalize", name="t") k = Integer(5, 105, transform="normalize", name="k") l = Real(0., 1., transform="normalize", name="l") o = Real(float('-Inf'), float('Inf'), name="o") IS = Space([t]) PS = Space([k, l]) OS = Space([o]) constraints = {} problem = TuningProblem(IS, PS, OS, objectives, constraints, None) computer = Computer(nodes=nodes, cores=cores, hosts=None) options = Options() options['model_restarts'] = 1 options['distributed_memory_parallelism'] = False options['shared_memory_parallelism'] = False options['objective_evaluation_parallelism'] = False options['objective_multisample_threads'] = 1 options['objective_multisample_processes'] = 1 options['objective_nprocmax'] = 1 options['model_processes'] = 1 options['model_class'] = 'Model_LCM' #options['model_class'] = 'Model_GPy_LCM' options['verbose'] = False options.validate(computer=computer) giventask = [[attribute]] global X_train global Y_train global X_test global Y_test X_train = [] Y_train = [] X_test = [] Y_test = [] if not os.path.exists("gptune-search-lcm.db"): os.system("mkdir -p gptune-search-lcm.db") with open( "gptune-search-lcm.db/household." + str(nrun) + "." + attribute + ".log", "w") as f_out: f_out.write( "NKnots,Lambda,RegressionTime,InTestTime,InMSE,InR2,InAR2,OutTestTime,OutMSE,OutR2,OutAR2\n" ) with open("household/household_power_consumption.txt", "r") as f_in: f_in.readline() datalines = f_in.readlines() traindata_arr = datalines[0:1000000] testdata_arr = datalines[1000000:1100000] wrong_data_cnt = 0 import time from datetime import datetime start_dt = datetime(2006, 12, 16, 17, 24, 00) start_time_val = int(round(start_dt.timestamp())) for traindata in traindata_arr: data = traindata.split(";") date = data[0] time = data[1] date_split = date.split("/") time_split = time.split(":") try: dt = datetime(int(date_split[2]), int(date_split[1]), int(date_split[0]), int(time_split[0]), int(time_split[1]), int(time_split[2])) time_val = (int(round(dt.timestamp())) - start_time_val) / 60 sub_metering_3 = float(data[-1]) X_train.append(time_val) Y_train.append(sub_metering_3 + 0.00001) except: wrong_data_cnt += 1 print("wrong_data_cnt (train): ", wrong_data_cnt) wrong_data_cnt = 0 for testdata in testdata_arr: data = traindata.split(";") date = data[0] time = data[1] date_split = date.split("/") time_split = time.split(":") try: dt = datetime(int(date_split[2]), int(date_split[1]), int(date_split[0]), int(time_split[0]), int(time_split[1]), int(time_split[2])) time_val = (int(round(dt.timestamp())) - start_time_val) / 60 sub_metering_3 = float(data[-1]) X_test.append(time_val) Y_test.append(sub_metering_3 + 0.00001) except: wrong_data_cnt += 1 print("wrong_data_cnt (test): ", wrong_data_cnt) NI = len(giventask) NS = nrun TUNER_NAME = os.environ['TUNER_NAME'] if (TUNER_NAME == 'GPTune'): data = Data(problem) gt = GPTune(problem, computer=computer, data=data, options=options, driverabspath=os.path.abspath(__file__)) (data, modeler, stats) = gt.MLA(NS=NS, Igiven=giventask, NI=NI, NS1=int(NS / 2)) print("stats: ", stats) """ Print all input and parameter samples """ for tid in range(NI): print("tid: %d" % (tid)) print(" t:%s " % (data.I[tid][0])) print(" Ps ", data.P[tid]) print(" Os ", data.O[tid].tolist()) print(' Popt ', data.P[tid][np.argmin(data.O[tid])], 'Oopt ', min(data.O[tid])[0], 'nth ', np.argmin(data.O[tid])) if (TUNER_NAME == 'opentuner'): (data, stats) = OpenTuner(T=giventask, NS=NS, tp=problem, computer=computer, run_id="OpenTuner", niter=1, technique=None) print("stats: ", stats) """ Print all input and parameter samples """ for tid in range(NI): print("tid: %d" % (tid)) print(" t:%s " % (data.I[tid][0])) print(" Ps ", data.P[tid]) print(" Os ", data.O[tid].tolist()) print(' Popt ', data.P[tid][np.argmin(data.O[tid])], 'Oopt ', min(data.O[tid])[0], 'nth ', np.argmin(data.O[tid])) if (TUNER_NAME == 'hpbandster'): (data, stats) = HpBandSter(T=giventask, NS=NS, tp=problem, computer=computer, run_id="HpBandSter", niter=1) print("stats: ", stats) """ Print all input and parameter samples """ for tid in range(NI): print("tid: %d" % (tid)) print(" t:%s " % (data.I[tid][0])) print(" Ps ", data.P[tid]) print(" Os ", data.O[tid].tolist()) print(' Popt ', data.P[tid][np.argmin(data.O[tid])], 'Oopt ', min(data.O[tid])[0], 'nth ', np.argmin(data.O[tid]))
def main(): (machine, processor, nodes, cores) = GetMachineConfiguration() print ("machine: " + machine + " processor: " + processor + " num_nodes: " + str(nodes) + " num_cores: " + str(cores)) # Parse command line arguments args = parse_args() bmin = args.bmin bmax = args.bmax eta = args.eta amin = args.amin amax = args.amax cmin = args.cmin cmax = args.cmax nprocmin_pernode = args.nprocmin_pernode ntask = args.ntask Nloop = args.Nloop restart = args.restart TUNER_NAME = args.optimization TLA = False os.environ['MACHINE_NAME'] = machine os.environ['TUNER_NAME'] = TUNER_NAME # os.system("mkdir -p scalapack-driver/bin/%s; cp ../build/pdqrdriver scalapack-driver/bin/%s/.;" %(machine, machine)) nprocmax = nodes*cores nprocmin = nodes*nprocmin_pernode a_val = Real(amin, amax, transform="normalize", name="a_val") c_val = Real(cmin, cmax, transform="normalize", name="c_val") Px = Integer(1, nprocmax, transform="normalize", name="Px") Py = Integer(1, nprocmax, transform="normalize", name="Py") Nproc = Integer(nprocmin, nprocmax, transform="normalize", name="Nproc") strong_threshold = Real(0, 1, transform="normalize", name="strong_threshold") trunc_factor = Real(0, 0.999, transform="normalize", name="trunc_factor") P_max_elmts = Integer(1, 12, transform="normalize", name="P_max_elmts") # coarsen_type = Categoricalnorm (['0', '1', '2', '3', '4', '6', '8', '10'], transform="onehot", name="coarsen_type") coarsen_type = Categoricalnorm (['0', '1', '2', '3', '4', '8', '10'], transform="onehot", name="coarsen_type") relax_type = Categoricalnorm (['-1', '0', '6', '8', '16', '18'], transform="onehot", name="relax_type") smooth_type = Categoricalnorm (['5', '6', '8', '9'], transform="onehot", name="smooth_type") smooth_num_levels = Integer(0, 5, transform="normalize", name="smooth_num_levels") interp_type = Categoricalnorm (['0', '3', '4', '5', '6', '8', '12'], transform="onehot", name="interp_type") agg_num_levels = Integer(0, 5, transform="normalize", name="agg_num_levels") r = Real(float("-Inf"), float("Inf"), name="r") IS = Space([a_val, c_val]) PS = Space([Px, Py, Nproc, strong_threshold, trunc_factor, P_max_elmts, coarsen_type, relax_type, smooth_type, smooth_num_levels, interp_type, agg_num_levels]) OS = Space([r]) cst1 = f"Px * Py <= Nproc" cst2 = f"not(P_max_elmts==10 and coarsen_type=='6' and relax_type=='18' and smooth_type=='6' and smooth_num_levels==3 and interp_type=='8' and agg_num_levels==1)" constraints = {"cst1": cst1,"cst2": cst2} constants={"nodes":nodes,"cores":cores,"bmin":bmin,"bmax":bmax,"eta":eta} print(IS, PS, OS, constraints) problem = TuningProblem(IS, PS, OS, objectives, constraints, constants=constants) computer = Computer(nodes=nodes, cores=cores, hosts=None) options = Options() options['model_processes'] = 4 # parallel cholesky for each LCM kernel # options['model_threads'] = 1 # options['model_restarts'] = args.Nrestarts # options['distributed_memory_parallelism'] = False # parallel model restart options['model_restarts'] = restart options['distributed_memory_parallelism'] = False options['shared_memory_parallelism'] = False # options['mpi_comm'] = None options['model_class'] = 'Model_LCM' # Model_GPy_LCM or Model_LCM options['verbose'] = False # choose sampler # options['sample_class'] = 'SampleOpenTURNS' if args.lhs == 1: options['sample_class'] = 'SampleLHSMDU' options['sample_algo'] = 'LHS-MDU' options.validate(computer=computer) options['budget_min'] = bmin options['budget_max'] = bmax options['budget_base'] = eta smax = int(np.floor(np.log(options['budget_max']/options['budget_min'])/np.log(options['budget_base']))) budgets = [options['budget_max'] /options['budget_base']**x for x in range(smax+1)] NSs = [int((smax+1)/(s+1))*options['budget_base']**s for s in range(smax+1)] NSs_all = NSs.copy() budget_all = budgets.copy() for s in range(smax+1): for n in range(s): NSs_all.append(int(NSs[s]/options['budget_base']**(n+1))) budget_all.append(int(budgets[s]*options['budget_base']**(n+1))) Ntotal = int(sum(NSs_all) * Nloop) Btotal = int(np.dot(np.array(NSs_all), np.array(budget_all))/options['budget_max'] * Nloop) # total number of evaluations at highest budget -- used for single-fidelity tuners print(f"bmin = {bmin}, bmax = {bmax}, eta = {eta}, smax = {smax}") print("samples in one multi-armed bandit loop, NSs_all = ", NSs_all) print("total number of samples: ", Ntotal) print("total number of evaluations at highest budget: ", Btotal) print() data = Data(problem) giventask = [[(amax-amin)*random()+amin,(cmax-cmin)*random()+cmin] for i in range(ntask)] # giventask = [[0.2, 0.5]] if ntask == 1: giventask = [[args.a, args.c]] NI=len(giventask) assert NI == ntask # make sure number of tasks match # # the following will use only task lists stored in the pickle file # data = Data(problem) if(TUNER_NAME=='GPTune'): gt = GPTune(problem, computer=computer, data=data, options=options, driverabspath=os.path.abspath(__file__)) """ Building MLA with the given list of tasks """ NS = Btotal if args.nrun > 0: NS = args.nrun NS1 = max(NS//2, 1) (data, model, stats) = gt.MLA(NS=NS, NI=NI, Igiven=giventask, NS1=NS1) print("stats: ", stats) """ Print all input and parameter samples """ for tid in range(NI): print("tid: %d" % (tid)) print(f" [a_val, c_val] = [{data.I[tid][0]:.3f}, {data.I[tid][1]:.3f}]") print(" Ps ", data.P[tid]) print(" Os ", data.O[tid]) print(' Popt ', data.P[tid][np.argmin(data.O[tid])], 'Oopt ', min(data.O[tid])[0], 'nth ', np.argmin(data.O[tid])) if TLA is True: """ Call TLA for 2 new tasks using the constructed LCM model""" newtask = [[0.5, 0.3], [0.2, 1.0]] (aprxopts, objval, stats) = gt.TLA1(newtask, NS=None) print("stats: ", stats) """ Print the optimal parameters and function evaluations""" for tid in range(len(newtask)): print("new task: %s" % (newtask[tid])) print(' predicted Popt: ', aprxopts[tid], ' objval: ', objval[tid]) if(TUNER_NAME=='opentuner'): NS = Btotal (data,stats) = OpenTuner(T=giventask, NS=NS, tp=problem, computer=computer, run_id="OpenTuner", niter=1, technique=None) print("stats: ", stats) """ Print all input and parameter samples """ for tid in range(NI): print("tid: %d" % (tid)) print(f" [a_val, c_val] = [{data.I[tid][0]:.3f}, {data.I[tid][1]:.3f}]") print(" Ps ", data.P[tid]) print(" Os ", data.O[tid]) print(' Popt ', data.P[tid][np.argmin(data.O[tid][:NS])], 'Oopt ', min(data.O[tid][:NS])[0], 'nth ', np.argmin(data.O[tid][:NS])) # single-fidelity version of hpbandster if(TUNER_NAME=='TPE'): NS = Btotal (data,stats)=HpBandSter(T=giventask, NS=NS, tp=problem, computer=computer, run_id="HpBandSter", niter=1) print("stats: ", stats) """ Print all input and parameter samples """ for tid in range(NI): print("tid: %d" % (tid)) print(f" [a_val, c_val] = [{data.I[tid][0]:.3f}, {data.I[tid][1]:.3f}]") print(" Ps ", data.P[tid]) print(" Os ", data.O[tid].tolist()) print(' Popt ', data.P[tid][np.argmin(data.O[tid])], 'Oopt ', min(data.O[tid])[0], 'nth ', np.argmin(data.O[tid])) if(TUNER_NAME=='GPTuneBand'): data = Data(problem) gt = GPTune_MB(problem, computer=computer, NS=Nloop, options=options) (data, stats, data_hist)=gt.MB_LCM(NS = Nloop, Igiven = giventask) print("Tuner: ", TUNER_NAME) print("stats: ", stats) """ Print all input and parameter samples """ for tid in range(NI): print("tid: %d" % (tid)) print(f" [a_val, c_val] = [{data.I[tid][0]:.3f}, {data.I[tid][1]:.3f}]") print(" Ps ", data.P[tid]) print(" Os ", data.O[tid].tolist()) nth = np.argmin(data.O[tid]) Popt = data.P[tid][nth] # find which arm and which sample the optimal param is from for arm in range(len(data_hist.P)): try: idx = (data_hist.P[arm]).index(Popt) arm_opt = arm except ValueError: pass print(' Popt ', Popt, 'Oopt ', min(data.O[tid])[0], 'nth ', nth, 'nth-bandit (s, nth) = ', (arm_opt, idx)) if(TUNER_NAME=='GPTuneBand_single'): def merge_dict(mydict, newdict): for key in mydict.keys(): mydict[key] += newdict[key] data_all = [] stats_all = {} for singletask in giventask: NI = 1 cur_task = [singletask] data = Data(problem) gt = GPTune_MB(problem, computer=computer, NS=Nloop, options=options) (data, stats)=gt.MB_LCM(NS = Nloop, Igiven = cur_task) data_all.append(data) merge_dict(stats_all, stats) print("Finish one single task tuning") print("Tuner: ", TUNER_NAME) print("stats: ", stats) tid = 0 print(f" [a_val, c_val] = [{data.I[tid][0]:.3f}, {data.I[tid][1]:.3f}]") print(" Ps ", data.P[tid]) print(" Os ", data.O[tid].tolist()) print(' Popt ', data.P[tid][np.argmin(data.O[tid])], 'Oopt ', min(data.O[tid])[0], 'nth ', np.argmin(data.O[tid])) print("Finish tuning...") print("Tuner: ", TUNER_NAME) print("stats_all: ", stats_all) for i in range(len(data_all)): data = data_all[i] for tid in range(NI): print("tid: %d" % (i)) print(f" [a_val, c_val] = [{data.I[tid][0]:.3f}, {data.I[tid][1]:.3f}]") print(" Ps ", data.P[tid]) print(" Os ", data.O[tid].tolist()) print(' Popt ', data.P[tid][np.argmin(data.O[tid])], 'Oopt ', min(data.O[tid])[0], 'nth ', np.argmin(data.O[tid])) # multi-fidelity version if(TUNER_NAME=='hpbandster'): NS = Ntotal (data,stats)=HpBandSter_bandit(T=giventask, NS=NS, tp=problem, computer=computer, options=options, run_id="hpbandster_bandit", niter=1) print("stats: ", stats) """ Print all input and parameter samples """ for tid in range(NI): print("tid: %d" % (tid)) print(f" [a_val, c_val] = [{data.I[tid][0]:.3f}, {data.I[tid][1]:.3f}]") print(" Ps ", data.P[tid]) print(" Os ", data.O[tid].tolist()) # print(' Popt ', data.P[tid][np.argmin(data.O[tid])], 'Oopt ', min(data.O[tid])[0], 'nth ', np.argmin(data.O[tid])) max_budget = 0. Oopt = 99999 Popt = None nth = None for idx, (config, out) in enumerate(zip(data.P[tid], data.O[tid].tolist())): for subout in out[0]: budget_cur = subout[0] if budget_cur > max_budget: max_budget = budget_cur Oopt = subout[1] Popt = config nth = idx elif budget_cur == max_budget: if subout[1] < Oopt: Oopt = subout[1] Popt = config nth = idx print(' Popt ', Popt, 'Oopt ', Oopt, 'nth ', nth)
def main(): # Parse command line arguments args = parse_args() # Extract arguments ntask = args.ntask npernode = args.npernode optimization = args.optimization nrun = args.nrun TUNER_NAME = args.optimization (machine, processor, nodes, cores) = GetMachineConfiguration() print("machine: " + machine + " processor: " + processor + " num_nodes: " + str(nodes) + " num_cores: " + str(cores)) os.environ['MACHINE_NAME'] = machine os.environ['TUNER_NAME'] = TUNER_NAME datafiles = ["data/susy_10Kn"] # datafiles = ["data/branin_10k"] # Task input parameters datafile = Categoricalnorm(datafiles, transform="onehot", name="datafile") # Tuning parameters h = Real(-10, 10, transform="normalize", name="h") Lambda = Real(-10, 10, transform="normalize", name="Lambda") # npernode = Integer(int(math.log2(nprocmin_pernode)), int(math.log2(cores)), transform="normalize", name="npernode") result = Real(0, float("Inf"), name="r") IS = Space([datafile]) # PS = Space([h,Lambda,npernode]) PS = Space([h, Lambda]) OS = Space([result]) constraints = {} models = {} constants = {"nodes": nodes, "cores": cores, "npernode": npernode} """ Print all input and parameter samples """ print(IS, PS, OS, constraints, models) problem = TuningProblem(IS, PS, OS, objectives, constraints, constants=constants) computer = Computer(nodes=nodes, cores=cores, hosts=None) """ Set and validate options """ options = Options() options['model_processes'] = 1 # options['model_threads'] = 1 options['model_restarts'] = 1 # options['search_multitask_processes'] = 1 # options['model_restart_processes'] = 1 options['distributed_memory_parallelism'] = False options['shared_memory_parallelism'] = False options['model_class'] = 'Model_LCM' # 'Model_GPy_LCM' options['verbose'] = False options.validate(computer=computer) # """ Building MLA with the given list of tasks """ giventask = [["data/susy_10Kn"]] data = Data(problem) if (TUNER_NAME == 'GPTune'): gt = GPTune(problem, computer=computer, data=data, options=options, driverabspath=os.path.abspath(__file__)) NI = len(giventask) NS = nrun (data, model, stats) = gt.MLA(NS=NS, NI=NI, Igiven=giventask, NS1=max(NS // 2, 1)) print("stats: ", stats) """ Print all input and parameter samples """ for tid in range(NI): print("tid: %d" % (tid)) print(" matrix:%s" % (data.I[tid][0])) print(" Ps ", data.P[tid]) print(" Os ", -data.O[tid]) print(' Popt ', data.P[tid][np.argmin(data.O[tid])], 'Oopt ', -min(data.O[tid])[0], 'nth ', np.argmin(data.O[tid])) if (TUNER_NAME == 'opentuner'): NI = ntask NS = nrun (data, stats) = OpenTuner(T=giventask, NS=NS, tp=problem, computer=computer, run_id="OpenTuner", niter=1, technique=None) print("stats: ", stats) """ Print all input and parameter samples """ for tid in range(NI): print("tid: %d" % (tid)) print(" matrix:%s" % (data.I[tid][0])) print(" Ps ", data.P[tid]) print(" Os ", -data.O[tid]) print(' Popt ', data.P[tid][np.argmin(data.O[tid])], 'Oopt ', -min(data.O[tid])[0], 'nth ', np.argmin(data.O[tid])) if (TUNER_NAME == 'hpbandster'): NI = ntask NS = nrun (data, stats) = HpBandSter(T=giventask, NS=NS, tp=problem, computer=computer, run_id="HpBandSter", niter=1) print("stats: ", stats) """ Print all input and parameter samples """ for tid in range(NI): print("tid: %d" % (tid)) print(" matrix:%s" % (data.I[tid][0])) print(" Ps ", data.P[tid]) print(" Os ", -data.O[tid]) print(' Popt ', data.P[tid][np.argmin(data.O[tid])], 'Oopt ', -min(data.O[tid])[0], 'nth ', np.argmin(data.O[tid]))
def main(): # Parse command line arguments args = parse_args() # Extract arguments ntask = args.ntask nprocmin_pernode = args.nprocmin_pernode optimization = args.optimization nrun = args.nrun TUNER_NAME = args.optimization (machine, processor, nodes, cores) = GetMachineConfiguration() print("machine: " + machine + " processor: " + processor + " num_nodes: " + str(nodes) + " num_cores: " + str(cores)) os.environ['MACHINE_NAME'] = machine os.environ['TUNER_NAME'] = TUNER_NAME nprocmax = nodes * cores # Task parameters model2d = Integer(1, 13, transform="normalize", name="model2d") nunk = Integer(2000, 10000000, transform="normalize", name="nunk") wavelength = Real(0.00001, 0.02, name="wavelength") # Input parameters lrlevel = Categoricalnorm(['0', '100'], transform="onehot", name="lrlevel") xyzsort = Categoricalnorm(['0', '1', '2'], transform="onehot", name="xyzsort") nmin_leaf = Integer(5, 9, transform="normalize", name="nmin_leaf") npernode = Integer(int(math.log2(nprocmin_pernode)), int(math.log2(cores)), transform="normalize", name="npernode") result1 = Real(float("-Inf"), float("Inf"), name="r1") IS = Space([model2d, nunk, wavelength]) PS = Space([lrlevel, xyzsort, nmin_leaf, npernode]) OS = Space([result1]) constraints = {} models = {} constants = {"nodes": nodes, "cores": cores} """ Print all input and parameter samples """ print(IS, PS, OS, constraints, models) problem = TuningProblem(IS, PS, OS, objectives, constraints, None, constants=constants) computer = Computer(nodes=nodes, cores=cores, hosts=None) """ Set and validate options """ options = Options() options['model_processes'] = 1 # options['model_threads'] = 1 options['model_restarts'] = 1 # options['search_multitask_processes'] = 1 # options['model_restart_processes'] = 1 options['distributed_memory_parallelism'] = False options['shared_memory_parallelism'] = False options['model_class'] = 'Model_LCM' # 'Model_GPy_LCM' options['verbose'] = False # options['search_algo'] = 'nsga2' #'maco' #'moead' #'nsga2' #'nspso' # options['search_pop_size'] = 1000 # 1000 # options['search_gen'] = 10 options.validate(computer=computer) # """ Building MLA with the given list of tasks """ # giventask = [[7,100000,0.001]] giventask = [[7, 5000, 0.02]] # giventask = [[7,2000,0.05]] data = Data(problem) if (TUNER_NAME == 'GPTune'): gt = GPTune(problem, computer=computer, data=data, options=options, driverabspath=os.path.abspath(__file__)) NI = len(giventask) NS = nrun (data, model, stats) = gt.MLA(NS=NS, NI=NI, Igiven=giventask, NS1=max(NS // 2, 1)) print("stats: ", stats) """ Print all input and parameter samples """ for tid in range(NI): print("tid: %d" % (tid)) print(" model2d:%d nunk:%d wavelength:%1.6e" % (data.I[tid][0], data.I[tid][1], data.I[tid][2])) print(" Ps ", data.P[tid]) OL = np.asarray([o[0] for o in data.O[tid]], dtype=np.float64) np.set_printoptions(suppress=False, precision=8) print(" Os ", OL) print(' Popt ', data.P[tid][np.argmin(data.O[tid])], 'Oopt ', min(data.O[tid])[0], 'nth ', np.argmin(data.O[tid])) # ndf, dl, dc, ndr = pg.fast_non_dominated_sorting(data.O[tid]) # front = ndf[0] # # print('front id: ',front) # fopts = data.O[tid][front] # xopts = [data.P[tid][i] for i in front] # print(' Popts ', xopts) # print(' Oopts ', fopts) if (TUNER_NAME == 'opentuner'): NI = ntask NS = nrun (data, stats) = OpenTuner(T=giventask, NS=NS, tp=problem, computer=computer, run_id="OpenTuner", niter=1, technique=None) print("stats: ", stats) """ Print all input and parameter samples """ for tid in range(NI): print("tid: %d" % (tid)) print(" matrix:%s" % (data.I[tid][0])) print(" Ps ", data.P[tid]) print(" Os ", data.O[tid]) print(' Popt ', data.P[tid][np.argmin(data.O[tid])], 'Oopt ', min(data.O[tid])[0], 'nth ', np.argmin(data.O[tid])) if (TUNER_NAME == 'hpbandster'): NI = ntask NS = nrun (data, stats) = HpBandSter(T=giventask, NS=NS, tp=problem, computer=computer, run_id="HpBandSter", niter=1) print("stats: ", stats) """ Print all input and parameter samples """ for tid in range(NI): print("tid: %d" % (tid)) print(" matrix:%s" % (data.I[tid][0])) print(" Ps ", data.P[tid]) print(" Os ", data.O[tid]) print(' Popt ', data.P[tid][np.argmin(data.O[tid])], 'Oopt ', min(data.O[tid])[0], 'nth ', np.argmin(data.O[tid]))
def main(): global nodes global cores global JOBID global nprocmax global nprocmin # Get arguments from CK-GPTune # if not given by CK-GPTune: -nxmax 100 -nymax 100 -nzmax 100 -nodes 1 -cores 32 -nprocmin_pernode 1 -ntask 2 -nrun 5 -machine cori -jobid 0 nxmax = int(os.environ.get('nxmax','100')) nymax = int(os.environ.get('nymax','100')) nzmax = int(os.environ.get('nzmax','100')) nodes = int(os.environ.get('nodes','1')) cores = int(os.environ.get('cores','4')) nprocmin_pernode = int(os.environ.get('nprocmin_pernode','1')) machine = str(os.environ.get('machine','mymachine')) ntask = int(os.environ.get('ntask','2')) nruns = int(os.environ.get('nruns','5')) JOBID = int(os.environ.get('jobid','0')) TUNER_NAME = str(os.environ.get('optimization','GPTune')) TLA = False print ('run_autotuner arguments\n \ nxmax: %d\ nymax: %d\ nzmax: %d\ nodes: %d\ cores: %d\ nprocmin_pernode: %d\ machine: %s\ ntask: %d\ nruns: %d\ jobid: %d\ tuner: %s' %(nxmax, nymax, nzmax, nodes, cores, nprocmin_pernode, machine, ntask, nruns, JOBID, TUNER_NAME)) # # Parse command line arguments # args = parse_args() # # nxmax = args.nxmax # nymax = args.nymax # nzmax = args.nzmax # nodes = args.nodes # cores = args.cores # nprocmin_pernode = args.nprocmin_pernode # machine = args.machine # ntask = args.ntask # nruns = args.nruns # JOBID = args.jobid # TUNER_NAME = args.optimization # TLA = False os.environ['MACHINE_NAME'] = machine os.environ['TUNER_NAME'] = TUNER_NAME # os.system("mkdir -p scalapack-driver/bin/%s; cp ../build/pdqrdriver scalapack-driver/bin/%s/.;" %(machine, machine)) nprocmax = nodes*cores-1 # YL: there is one proc doing spawning, so nodes*cores should be at least 2 nprocmin = min(nodes*nprocmin_pernode,nprocmax-1) # YL: ensure strictly nprocmin<nprocmax, required by the Integer space nxmin = 20 nymin = 20 nzmin = 20 nx = Integer(nxmin, nxmax, transform="normalize", name="nx") ny = Integer(nymin, nymax, transform="normalize", name="ny") nz = Integer(nzmin, nzmax, transform="normalize", name="nz") Px = Integer(1, nprocmax, transform="normalize", name="Px") Py = Integer(1, nprocmax, transform="normalize", name="Py") Nproc = Integer(nprocmin, nprocmax, transform="normalize", name="Nproc") strong_threshold = Real(0, 1, transform="normalize", name="strong_threshold") trunc_factor = Real(0, 1, transform="normalize", name="trunc_factor") P_max_elmts = Integer(1, 12, transform="normalize", name="P_max_elmts") coarsen_type = Categoricalnorm (['0', '1', '2', '3', '4', '6', '8', '10'], transform="onehot", name="coarsen_type") relax_type = Categoricalnorm (['-1', '0', '6', '8', '16', '18'], transform="onehot", name="relax_type") smooth_type = Categoricalnorm (['5', '6', '7', '8', '9'], transform="onehot", name="smooth_type") smooth_num_levels = Integer(0, 5, transform="normalize", name="smooth_num_levels") interp_type = Categoricalnorm (['0', '3', '4', '5', '6', '8', '12'], transform="onehot", name="interp_type") agg_num_levels = Integer(0, 5, transform="normalize", name="agg_num_levels") r = Real(float("-Inf"), float("Inf"), name="r") IS = Space([nx, ny, nz]) PS = Space([Px, Py, Nproc, strong_threshold, trunc_factor, P_max_elmts, coarsen_type, relax_type, smooth_type, smooth_num_levels, interp_type, agg_num_levels]) OS = Space([r]) # Question: how to set constraints cst1 = f"Px * Py <= Nproc" cst2 = f"not(coarsen_type=='0' and P_max_elmts==10 and relax_type=='18' and smooth_type=='6' and smooth_num_levels==3 and interp_type=='8' and agg_num_levels==1)" constraints = {"cst1": cst1,"cst2": cst2} print(IS, PS, OS, constraints) problem = TuningProblem(IS, PS, OS, objectives, constraints, None) # no performance model computer = Computer(nodes=nodes, cores=cores, hosts=None) options = Options() options['model_processes'] = 1 # options['model_threads'] = 1 options['model_restarts'] = 1 options['distributed_memory_parallelism'] = False options['shared_memory_parallelism'] = False # options['mpi_comm'] = None options['model_class '] = 'Model_LCM' options['verbose'] = False options.validate(computer=computer) data = Data(problem) giventask = [[randint(nxmin,nxmax),randint(nymin,nymax),randint(nzmin,nzmax)] for i in range(ntask)] if(TUNER_NAME=='GPTune'): gt = GPTune(problem, computer=computer, data=data, options=options, driverabspath=os.path.abspath(__file__)) """ Building MLA with the given list of tasks """ NI = len(giventask) NS = nruns (data, model, stats) = gt.MLA(NS=NS, NI=NI, Igiven=giventask, NS1=max(NS//2, 1)) print("stats: ", stats) """ Print all input and parameter samples """ for tid in range(NI): print("tid: %d" % (tid)) print(" nx:%d ny:%d nz:%d" % (data.I[tid][0], data.I[tid][1], data.I[tid][2])) print(" Ps ", data.P[tid]) print(" Os ", data.O[tid]) print(' Popt ', data.P[tid][np.argmin(data.O[tid])], 'Oopt ', min(data.O[tid])[0], 'nth ', np.argmin(data.O[tid])) if TLA is True: """ Call TLA for 2 new tasks using the constructed LCM model""" newtask = [[50, 50, 60], [80, 60, 70]] (aprxopts, objval, stats) = gt.TLA1(newtask, NS=None) print("stats: ", stats) """ Print the optimal parameters and function evaluations""" for tid in range(len(newtask)): print("new task: %s" % (newtask[tid])) print(' predicted Popt: ', aprxopts[tid], ' objval: ', objval[tid]) if(TUNER_NAME=='opentuner'): NI = ntask NS = nruns (data,stats) = OpenTuner(T=giventask, NS=NS, tp=problem, computer=computer, run_id="OpenTuner", niter=1, technique=None) print("stats: ", stats) """ Print all input and parameter samples """ for tid in range(NI): print("tid: %d" % (tid)) print(" nx:%d ny:%d nz:%d" % (data.I[tid][0], data.I[tid][1], data.I[tid][2])) print(" Ps ", data.P[tid]) print(" Os ", data.O[tid]) print(' Popt ', data.P[tid][np.argmin(data.O[tid])], 'Oopt ', min(data.O[tid])[0], 'nth ', np.argmin(data.O[tid])) if(TUNER_NAME=='hpbandster'): NI = ntask NS = nruns (data,stats)=HpBandSter(T=giventask, NS=NS, tp=problem, computer=computer, run_id="HpBandSter", niter=1) print("stats: ", stats) """ Print all input and parameter samples """ for tid in range(NI): print("tid: %d" % (tid)) print(" nx:%d ny:%d nz:%d" % (data.I[tid][0], data.I[tid][1], data.I[tid][2])) print(" Ps ", data.P[tid]) print(" Os ", data.O[tid].tolist()) print(' Popt ', data.P[tid][np.argmin(data.O[tid])], 'Oopt ', min(data.O[tid])[0], 'nth ', np.argmin(data.O[tid]))
def main(): (machine, processor, nodes, cores) = GetMachineConfiguration() print("machine: " + machine + " processor: " + processor + " num_nodes: " + str(nodes) + " num_cores: " + str(cores)) # Parse command line arguments args = parse_args() nxmax = args.nxmax nymax = args.nymax nzmax = args.nzmax nprocmin_pernode = args.nprocmin_pernode ntask = args.ntask nrun = args.nrun TUNER_NAME = args.optimization TLA = False os.environ['MACHINE_NAME'] = machine os.environ['TUNER_NAME'] = TUNER_NAME # os.system("mkdir -p scalapack-driver/bin/%s; cp ../build/pdqrdriver scalapack-driver/bin/%s/.;" %(machine, machine)) nprocmax = nodes * cores nprocmin = nodes * nprocmin_pernode nxmin = 20 nymin = 20 nzmin = 20 nx = Integer(nxmin, nxmax, transform="normalize", name="nx") ny = Integer(nymin, nymax, transform="normalize", name="ny") nz = Integer(nzmin, nzmax, transform="normalize", name="nz") Px = Integer(1, nprocmax, transform="normalize", name="Px") Py = Integer(1, nprocmax, transform="normalize", name="Py") Nproc = Integer(nprocmin, nprocmax, transform="normalize", name="Nproc") strong_threshold = Real(0, 1, transform="normalize", name="strong_threshold") trunc_factor = Real(0, 1, transform="normalize", name="trunc_factor") P_max_elmts = Integer(1, 12, transform="normalize", name="P_max_elmts") coarsen_type = Categoricalnorm(['0', '1', '2', '3', '4', '6', '8', '10'], transform="onehot", name="coarsen_type") relax_type = Categoricalnorm(['-1', '0', '6', '8', '16', '18'], transform="onehot", name="relax_type") smooth_type = Categoricalnorm(['5', '6', '7', '8', '9'], transform="onehot", name="smooth_type") smooth_num_levels = Integer(0, 5, transform="normalize", name="smooth_num_levels") interp_type = Categoricalnorm(['0', '3', '4', '5', '6', '8', '12'], transform="onehot", name="interp_type") agg_num_levels = Integer(0, 5, transform="normalize", name="agg_num_levels") r = Real(float("-Inf"), float("Inf"), name="r") IS = Space([nx, ny, nz]) PS = Space([ Px, Py, Nproc, strong_threshold, trunc_factor, P_max_elmts, coarsen_type, relax_type, smooth_type, smooth_num_levels, interp_type, agg_num_levels ]) OS = Space([r]) # Question: how to set constraints cst1 = f"Px * Py <= Nproc" cst2 = f"not(coarsen_type=='0' and P_max_elmts==10 and relax_type=='18' and smooth_type=='6' and smooth_num_levels==3 and interp_type=='8' and agg_num_levels==1)" constraints = {"cst1": cst1, "cst2": cst2} constants = {"nodes": nodes, "cores": cores} print(IS, PS, OS, constraints) problem = TuningProblem(IS, PS, OS, objectives, constraints, None, constants=constants) computer = Computer(nodes=nodes, cores=cores, hosts=None) options = Options() options['model_processes'] = 1 # options['model_threads'] = 1 options['model_restarts'] = 1 options['distributed_memory_parallelism'] = False options['shared_memory_parallelism'] = False # options['mpi_comm'] = None options['model_class'] = 'Model_LCM' options['verbose'] = False options.validate(computer=computer) seed(1) if (ntask == 1): giventask = [[nxmax, nymax, nzmax]] else: giventask = [[ randint(nxmin, nxmax), randint(nymin, nymax), randint(nzmin, nzmax) ] for i in range(ntask)] # giventask = [[50, 60, 80], [60, 80, 100]] # # the following will use only task lists stored in the pickle file data = Data(problem) if (TUNER_NAME == 'GPTune'): gt = GPTune(problem, computer=computer, data=data, options=options, driverabspath=os.path.abspath(__file__)) """ Building MLA with the given list of tasks """ NI = len(giventask) NS = nrun (data, model, stats) = gt.MLA(NS=NS, NI=NI, Igiven=giventask, NS1=max(NS // 2, 1)) print("stats: ", stats) """ Print all input and parameter samples """ for tid in range(NI): print("tid: %d" % (tid)) print(" nx:%d ny:%d nz:%d" % (data.I[tid][0], data.I[tid][1], data.I[tid][2])) print(" Ps ", data.P[tid]) print(" Os ", data.O[tid]) print(' Popt ', data.P[tid][np.argmin(data.O[tid])], 'Oopt ', min(data.O[tid])[0], 'nth ', np.argmin(data.O[tid])) if TLA is True: """ Call TLA for 2 new tasks using the constructed LCM model""" newtask = [[50, 50, 60], [80, 60, 70]] (aprxopts, objval, stats) = gt.TLA1(newtask, NS=None) print("stats: ", stats) """ Print the optimal parameters and function evaluations""" for tid in range(len(newtask)): print("new task: %s" % (newtask[tid])) print(' predicted Popt: ', aprxopts[tid], ' objval: ', objval[tid]) if (TUNER_NAME == 'opentuner'): NI = len(giventask) NS = nrun (data, stats) = OpenTuner(T=giventask, NS=NS, tp=problem, computer=computer, run_id="OpenTuner", niter=1, technique=None) print("stats: ", stats) """ Print all input and parameter samples """ for tid in range(NI): print("tid: %d" % (tid)) print(" nx:%d ny:%d nz:%d" % (data.I[tid][0], data.I[tid][1], data.I[tid][2])) print(" Ps ", data.P[tid]) print(" Os ", data.O[tid]) print(' Popt ', data.P[tid][np.argmin(data.O[tid])], 'Oopt ', min(data.O[tid])[0], 'nth ', np.argmin(data.O[tid])) if (TUNER_NAME == 'hpbandster'): NI = len(giventask) NS = nrun (data, stats) = HpBandSter(T=giventask, NS=NS, tp=problem, computer=computer, run_id="HpBandSter", niter=1) print("stats: ", stats) """ Print all input and parameter samples """ for tid in range(NI): print("tid: %d" % (tid)) print(" nx:%d ny:%d nz:%d" % (data.I[tid][0], data.I[tid][1], data.I[tid][2])) print(" Ps ", data.P[tid]) print(" Os ", data.O[tid].tolist()) print(' Popt ', data.P[tid][np.argmin(data.O[tid])], 'Oopt ', min(data.O[tid])[0], 'nth ', np.argmin(data.O[tid]))
def main(): # Parse command line arguments args = parse_args() # Extract arguments ntask = args.ntask npernode = args.npernode optimization = args.optimization nrun = args.nrun bmin = args.bmin bmax = args.bmax eta = args.eta Nloop = args.Nloop restart = args.restart expid = args.expid TUNER_NAME = args.optimization (machine, processor, nodes, cores) = GetMachineConfiguration() ot.RandomGenerator.SetSeed(args.seed) print(args) print ("machine: " + machine + " processor: " + processor + " num_nodes: " + str(nodes) + " num_cores: " + str(cores)) os.environ['MACHINE_NAME'] = machine os.environ['TUNER_NAME'] = TUNER_NAME datasets = args.dataset.split('-') datafiles = [] giventask = [] assert len(datasets) == ntask for i in range(len(datasets)): datafiles.append(f"data/{datasets[i]}") giventask.append([f"data/{datasets[i]}"]) # datafiles = ["data/branin"] # Task input parameters datafile = Categoricalnorm(datafiles, transform="onehot", name="datafile") # Tuning parameters h = Real(-10, 10, transform="normalize", name="h") Lambda = Real(-10, 10, transform="normalize", name="Lambda") # npernode = Integer(int(math.log2(nprocmin_pernode)), int(math.log2(cores)), transform="normalize", name="npernode") result = Real(0 , float("Inf"),name="r") IS = Space([datafile]) # PS = Space([h,Lambda,npernode]) PS = Space([h,Lambda]) OS = Space([result]) constraints = {} models = {} constants={"nodes":nodes,"cores":cores,"npernode":npernode,"bmin":bmin,"bmax":bmax,"eta":eta} """ Print all input and parameter samples """ print(IS, PS, OS, constraints, models) problem = TuningProblem(IS, PS, OS, objectives, constraints, constants=constants) computer = Computer(nodes = nodes, cores = cores, hosts = None) """ Set and validate options """ options = Options() options['model_processes'] = 1 # options['model_threads'] = 1 options['model_restarts'] = 1 # options['search_multitask_processes'] = 1 # options['model_restart_processes'] = 1 options['distributed_memory_parallelism'] = False options['shared_memory_parallelism'] = False options['model_class '] = 'Model_LCM' # 'Model_GPy_LCM' options['verbose'] = False options['sample_class'] = 'SampleOpenTURNS' options['budget_min'] = bmin options['budget_max'] = bmax options['budget_base'] = eta smax = int(np.floor(np.log(options['budget_max']/options['budget_min'])/np.log(options['budget_base']))) budgets = [options['budget_max'] /options['budget_base']**x for x in range(smax+1)] NSs = [int((smax+1)/(s+1))*options['budget_base']**s for s in range(smax+1)] NSs_all = NSs.copy() budget_all = budgets.copy() for s in range(smax+1): for n in range(s): NSs_all.append(int(NSs[s]/options['budget_base']**(n+1))) budget_all.append(int(budgets[s]*options['budget_base']**(n+1))) Ntotal = int(sum(NSs_all) * Nloop) Btotal = int(np.dot(np.array(NSs_all), np.array(budget_all))/options['budget_max'] * Nloop) # total number of evaluations at highest budget -- used for single-fidelity tuners print(f"bmin = {bmin}, bmax = {bmax}, eta = {eta}, smax = {smax}") print("samples in one multi-armed bandit loop, NSs_all = ", NSs_all) print("total number of samples: ", Ntotal) print("total number of evaluations at highest budget: ", Btotal) print() options.validate(computer = computer) # """ Building MLA with the given list of tasks """ data = Data(problem) NI = ntask if(TUNER_NAME=='GPTune'): gt = GPTune(problem, computer=computer, data=data, options=options, driverabspath=os.path.abspath(__file__)) NS = Btotal if args.nrun > 0: NS = args.nrun NS1 = max(NS//2, 1) (data, model, stats) = gt.MLA(NS=NS, NI=NI, Igiven=giventask, NS1=NS1) print("Tuner: ", TUNER_NAME) print("stats: ", stats) results_file = open(f"KRR_{args.dataset}_ntask{args.ntask}_bandit{args.bmin}-{args.bmax}-{args.eta}_Nloop{args.Nloop}_expid{args.expid}.txt", "a") results_file.write(f"Tuner: {TUNER_NAME}\n") results_file.write(f"stats: {stats}\n") """ Print all input and parameter samples """ for tid in range(NI): print("tid: %d"%(tid)) print(" matrix:%s"%(data.I[tid][0])) print(" Ps ", data.P[tid]) print(" Os ", data.O[tid]) print(' Popt ', data.P[tid][np.argmin(data.O[tid])], 'Oopt ', -min(data.O[tid])[0], 'nth ', np.argmin(data.O[tid])) results_file.write(f"tid: {tid:d}\n") results_file.write(f" matrix:{data.I[tid][0]:s}\n") # results_file.write(f" Ps {data.P[tid]}\n") results_file.write(f" Os {data.O[tid].tolist()}\n") # results_file.write(f' Popt {data.P[tid][np.argmin(data.O[tid])]} Oopt {-min(data.O[tid])[0]} nth {np.argmin(data.O[tid])}\n') results_file.close() if(TUNER_NAME=='opentuner'): NS = Btotal if args.nrun > 0: NS = args.nrun NS1 = max(NS//2, 1) (data,stats) = OpenTuner(T=giventask, NS=NS, tp=problem, computer=computer, run_id="OpenTuner", niter=1, technique=None) print("Tuner: ", TUNER_NAME) print("stats: ", stats) results_file = open(f"KRR_{args.dataset}_ntask{args.ntask}_bandit{args.bmin}-{args.bmax}-{args.eta}_Nloop{args.Nloop}_expid{args.expid}.txt", "a") results_file.write(f"Tuner: {TUNER_NAME}\n") results_file.write(f"stats: {stats}\n") """ Print all input and parameter samples """ for tid in range(NI): print("tid: %d"%(tid)) print(" matrix:%s"%(data.I[tid][0])) print(" Ps ", data.P[tid][:NS]) print(" Os ", data.O[tid][:NS]) print(' Popt ', data.P[tid][np.argmin(data.O[tid][:NS])], 'Oopt ', -min(data.O[tid][:NS])[0], 'nth ', np.argmin(data.O[tid][:NS])) results_file.write(f"tid: {tid:d}\n") results_file.write(f" matrix:{data.I[tid][0]:s}\n") # results_file.write(f" Ps {data.P[tid][:NS]}\n") results_file.write(f" Os {data.O[tid][:NS].tolist()}\n") # results_file.write(f' Popt {data.P[tid][np.argmin(data.O[tid])]} Oopt {-min(data.O[tid])[0]} nth {np.argmin(data.O[tid])}\n') results_file.close() if(TUNER_NAME=='TPE'): NS = Btotal if args.nrun > 0: NS = args.nrun NS1 = max(NS//2, 1) (data,stats)=HpBandSter(T=giventask, NS=NS, tp=problem, computer=computer, options=options, run_id="HpBandSter", niter=1) print("Tuner: ", TUNER_NAME) print("stats: ", stats) results_file = open(f"KRR_{args.dataset}_ntask{args.ntask}_bandit{args.bmin}-{args.bmax}-{args.eta}_Nloop{args.Nloop}_expid{args.expid}.txt", "a") results_file.write(f"Tuner: {TUNER_NAME}\n") results_file.write(f"stats: {stats}\n") """ Print all input and parameter samples """ for tid in range(NI): print("tid: %d"%(tid)) print(" matrix:%s"%(data.I[tid][0])) print(" Ps ", data.P[tid]) print(" Os ", data.O[tid]) print(' Popt ', data.P[tid][np.argmin(data.O[tid])], 'Oopt ', -min(data.O[tid])[0], 'nth ', np.argmin(data.O[tid])) results_file.write(f"tid: {tid:d}\n") results_file.write(f" matrix:{data.I[tid][0]:s}\n") # results_file.write(f" Ps {data.P[tid]}\n") results_file.write(f" Os {data.O[tid].tolist()}\n") # results_file.write(f' Popt {data.P[tid][np.argmin(data.O[tid])]} Oopt {-min(data.O[tid])[0]} nth {np.argmin(data.O[tid])}\n') results_file.close() if(TUNER_NAME=='GPTuneBand'): data = Data(problem) gt = GPTune_MB(problem, computer=computer, NS=Nloop, options=options) (data, stats, data_hist)=gt.MB_LCM(NS = Nloop, Igiven = giventask) print("Tuner: ", TUNER_NAME) print("stats: ", stats) results_file = open(f"KRR_{args.dataset}_ntask{args.ntask}_bandit{args.bmin}-{args.bmax}-{args.eta}_Nloop{args.Nloop}_expid{args.expid}.txt", "a") results_file.write(f"Tuner: {TUNER_NAME}\n") results_file.write(f"stats: {stats}\n") """ Print all input and parameter samples """ for tid in range(NI): print("tid: %d" % (tid)) print(" matrix:%s"%(data.I[tid][0])) print(" Ps ", data.P[tid]) print(" Os ", data.O[tid].tolist()) nth = np.argmin(data.O[tid]) Popt = data.P[tid][nth] # find which arm and which sample the optimal param is from for arm in range(len(data_hist.P)): try: idx = (data_hist.P[arm]).index(Popt) arm_opt = arm except ValueError: pass print(' Popt ', Popt, 'Oopt ', -min(data.O[tid])[0], 'nth ', nth, 'nth-bandit (s, nth) = ', (arm_opt, idx)) results_file.write(f"tid: {tid:d}\n") results_file.write(f" matrix:{data.I[tid][0]:s}\n") # results_file.write(f" Ps {data.P[tid]}\n") results_file.write(f" Os {data.O[tid].tolist()}\n") # results_file.write(f' Popt {data.P[tid][np.argmin(data.O[tid])]} Oopt {-min(data.O[tid])[0]} nth {np.argmin(data.O[tid])}\n') results_file.close() # multi-fidelity version if(TUNER_NAME=='hpbandster'): NS = Ntotal (data,stats)=HpBandSter_bandit(T=giventask, NS=NS, tp=problem, computer=computer, options=options, run_id="hpbandster_bandit", niter=1) print("Tuner: ", TUNER_NAME) print("stats: ", stats) results_file = open(f"KRR_{args.dataset}_ntask{args.ntask}_bandit{args.bmin}-{args.bmax}-{args.eta}_Nloop{args.Nloop}_expid{args.expid}.txt", "a") results_file.write(f"Tuner: {TUNER_NAME}\n") results_file.write(f"stats: {stats}\n") """ Print all input and parameter samples """ for tid in range(NI): print("tid: %d" % (tid)) print(" matrix:%s"%(data.I[tid][0])) # print(" Ps ", data.P[tid]) print(" Os ", data.O[tid].tolist()) # print(' Popt ', data.P[tid][np.argmin(data.O[tid])], 'Oopt ', min(data.O[tid])[0], 'nth ', np.argmin(data.O[tid])) max_budget = 0. Oopt = 99999 Popt = None nth = None for idx, (config, out) in enumerate(zip(data.P[tid], data.O[tid].tolist())): for subout in out[0]: budget_cur = subout[0] if budget_cur > max_budget: max_budget = budget_cur Oopt = subout[1] Popt = config nth = idx elif budget_cur == max_budget: if subout[1] < Oopt: Oopt = subout[1] Popt = config nth = idx print(' Popt ', Popt, 'Oopt ', -Oopt, 'nth ', nth) results_file.write(f"tid: {tid:d}\n") results_file.write(f" matrix:{data.I[tid][0]:s}\n") # results_file.write(f" Ps {data.P[tid]}\n") results_file.write(f" Os {data.O[tid].tolist()}\n") # results_file.write(f' Popt {data.P[tid][np.argmin(data.O[tid])]} Oopt {-min(data.O[tid])[0]} nth {np.argmin(data.O[tid])}\n') results_file.close()
niter=1, technique=None) print("stats: ", stats) """ Print all input and parameter samples """ for tid in range(NI): print("tid: %d" % (tid)) print(" t:%d " % (data.I[tid][0])) print(" Ps ", data.P[tid]) print(" Os ", data.O[tid].tolist()) print(' Popt ', data.P[tid][np.argmin(data.O[tid])], 'Oopt ', min(data.O[tid])[0], 'nth ', np.argmin(data.O[tid])) if (TUNER_NAME == 'hpbandster'): (data, stats) = HpBandSter(T=giventask, NS=NS, tp=problem, computer=computer, run_id="HpBandSter", niter=1) print("stats: ", stats) """ Print all input and parameter samples """ for tid in range(NI): print("tid: %d" % (tid)) print(" t:%d " % (data.I[tid][0])) print(" Ps ", data.P[tid]) print(" Os ", data.O[tid].tolist()) print(' Popt ', data.P[tid][np.argmin(data.O[tid])], 'Oopt ', min(data.O[tid])[0], 'nth ', np.argmin(data.O[tid])) plot = 0 if plot == 1: x = np.arange(0., 1., 0.00001)
def main(): import matplotlib.pyplot as plt global nodes global cores # Parse command line arguments args = parse_args() global nrun nrun = args.nrun global task task = args.task global var var = args.var global size size = args.size TUNER_NAME = args.optimization (machine, processor, nodes, cores) = GetMachineConfiguration() print("machine: " + machine + " processor: " + processor + " num_nodes: " + str(nodes) + " num_cores: " + str(cores)) os.environ['MACHINE_NAME'] = machine os.environ['TUNER_NAME'] = TUNER_NAME t = Real(0., 10., transform="normalize", name="t") k = Integer(int(float(size) * 0.1), int(float(size) * 0.9), transform="normalize", name="k") l = Real(0., 1., transform="normalize", name="l") o = Real(float('-Inf'), float('Inf'), name="o") IS = Space([t]) PS = Space([k, l]) OS = Space([o]) constraints = {} problem = TuningProblem(IS, PS, OS, objectives, constraints, None) computer = Computer(nodes=nodes, cores=cores, hosts=None) options = Options() options['model_restarts'] = 1 options['distributed_memory_parallelism'] = False options['shared_memory_parallelism'] = False options['objective_evaluation_parallelism'] = False options['objective_multisample_threads'] = 1 options['objective_multisample_processes'] = 1 options['objective_nprocmax'] = 1 options['model_processes'] = 1 options['model_class'] = 'Model_LCM' #options['model_class'] = 'Model_GPy_LCM' options['verbose'] = False options.validate(computer=computer) giventask = [[float(task)]] global X_train global Y_train global X_test global Y_test X_train = [] Y_train = [] X_test = [] Y_test = [] if not os.path.exists("gptune-search-lcm.db"): os.system("mkdir -p gptune-search-lcm.db") with open( "gptune-search-lcm.db/gptune-search." + str(nrun) + "." + task + "." + size + "." + var + ".log", "w") as f_out: f_out.write( "NKnots,Lambda,RegressionTime,InTestTime,InMSE,InR2,InAR2,OutTestTime,OutMSE,OutR2,OutAR2\n" ) dataset = "gptune-demo-" + task + "-" + size + "-" + var trainset = dataset + "-train" with open("datagen/" + trainset, "r") as f_in: for dataline in f_in.readlines(): data = dataline.split(",") X_train.append(float(data[0])) Y_train.append(float(data[1])) testset = dataset + "-test" with open("datagen/" + testset, "r") as f_in: for dataline in f_in.readlines(): data = dataline.split(",") X_test.append(float(data[0])) Y_test.append(float(data[1])) NI = len(giventask) NS = nrun TUNER_NAME = os.environ['TUNER_NAME'] if (TUNER_NAME == 'GPTune'): data = Data(problem) gt = GPTune(problem, computer=computer, data=data, options=options, driverabspath=os.path.abspath(__file__)) (data, modeler, stats) = gt.MLA(NS=NS, Igiven=giventask, NI=NI, NS1=int(NS / 2)) print("stats: ", stats) """ Print all input and parameter samples """ for tid in range(NI): print("tid: %d" % (tid)) print(" t:%f " % (data.I[tid][0])) print(" Ps ", data.P[tid]) print(" Os ", data.O[tid].tolist()) print(' Popt ', data.P[tid][np.argmin(data.O[tid])], 'Oopt ', min(data.O[tid])[0], 'nth ', np.argmin(data.O[tid])) if (TUNER_NAME == 'opentuner'): (data, stats) = OpenTuner(T=giventask, NS=NS, tp=problem, computer=computer, run_id="OpenTuner", niter=1, technique=None) print("stats: ", stats) """ Print all input and parameter samples """ for tid in range(NI): print("tid: %d" % (tid)) print(" t:%f " % (data.I[tid][0])) print(" Ps ", data.P[tid]) print(" Os ", data.O[tid].tolist()) print(' Popt ', data.P[tid][np.argmin(data.O[tid])], 'Oopt ', min(data.O[tid])[0], 'nth ', np.argmin(data.O[tid])) if (TUNER_NAME == 'hpbandster'): (data, stats) = HpBandSter(T=giventask, NS=NS, tp=problem, computer=computer, run_id="HpBandSter", niter=1) print("stats: ", stats) """ Print all input and parameter samples """ for tid in range(NI): print("tid: %d" % (tid)) print(" t:%f " % (data.I[tid][0])) print(" Ps ", data.P[tid]) print(" Os ", data.O[tid].tolist()) print(' Popt ', data.P[tid][np.argmin(data.O[tid])], 'Oopt ', min(data.O[tid])[0], 'nth ', np.argmin(data.O[tid]))
def main(): (machine, processor, nodes, cores) = GetMachineConfiguration() print ("machine: " + machine + " processor: " + processor + " num_nodes: " + str(nodes) + " num_cores: " + str(cores)) # Parse command line arguments args = parse_args() bmin = args.bmin device = args.device bmax = args.bmax eta = args.eta nrun = args.nrun npernode = args.npernode ntask = args.ntask Nloop = args.Nloop restart = args.restart TUNER_NAME = args.optimization TLA = False (machine, processor, nodes, cores) = GetMachineConfiguration() print(args) print ("machine: " + machine + " processor: " + processor + " num_nodes: " + str(nodes) + " num_cores: " + str(cores)) os.environ['MACHINE_NAME'] = machine os.environ['TUNER_NAME'] = TUNER_NAME # os.system("mkdir -p scalapack-driver/bin/%s; cp ../build/pdqrdriver scalapack-driver/bin/%s/.;" %(machine, machine)) ntrain = Integer(1000, 10000, transform="normalize", name="ntrain") nvalid = Integer(256, 2048, transform="normalize", name="nvalid") lr = Real(1e-6, 1e-2, name="lr") optimizer = Categoricalnorm(['Adam', 'SGD'], transform="onehot", name="optimizer") sgd_momentum = Real(0, 0.99, name="sgd_momentum") num_conv_layers = Integer(1, 3, transform="normalize", name="num_conv_layers") num_filters_1 = Integer(4, 64, transform="normalize", name="num_filters_1") num_filters_2 = Integer(4, 64, transform="normalize", name="num_filters_2") num_filters_3 = Integer(4, 64, transform="normalize", name="num_filters_3") dropout_rate = Real(0, 0.9, name="dropout_rate") num_fc_units = Integer(8, 256, transform="normalize", name="num_fc_units") validation_loss = Real(float("-Inf"), float("Inf"), name="validation_loss") IS = Space([ntrain, nvalid]) PS = Space([lr, optimizer, sgd_momentum, num_conv_layers, num_filters_1, num_filters_2, num_filters_3, dropout_rate, num_fc_units]) OS = Space([validation_loss]) constraints = {} constants={"nodes":nodes,"cores":cores,"npernode":npernode,"bmin":bmin,"bmax":bmax,"eta":eta, "device":device} print(IS, PS, OS, constraints) problem = TuningProblem(IS, PS, OS, objectives, constraints, constants=constants) computer = Computer(nodes=nodes, cores=cores, hosts=None) options = Options() options['model_processes'] = 4 # parallel cholesky for each LCM kernel # options['model_threads'] = 1 # options['model_restarts'] = args.Nrestarts # options['distributed_memory_parallelism'] = False # parallel model restart options['model_restarts'] = restart options['distributed_memory_parallelism'] = False options['shared_memory_parallelism'] = False # options['mpi_comm'] = None options['model_class'] = 'Model_LCM' # Model_GPy_LCM or Model_LCM options['verbose'] = False options['budget_min'] = bmin options['budget_max'] = bmax options['budget_base'] = eta smax = int(np.floor(np.log(options['budget_max']/options['budget_min'])/np.log(options['budget_base']))) budgets = [options['budget_max'] /options['budget_base']**x for x in range(smax+1)] NSs = [int((smax+1)/(s+1))*options['budget_base']**s for s in range(smax+1)] NSs_all = NSs.copy() budget_all = budgets.copy() for s in range(smax+1): for n in range(s): NSs_all.append(int(NSs[s]/options['budget_base']**(n+1))) budget_all.append(int(budgets[s]*options['budget_base']**(n+1))) Ntotal = int(sum(NSs_all) * Nloop) Btotal = int(np.dot(np.array(NSs_all), np.array(budget_all))/options['budget_max'] * Nloop) # total number of evaluations at highest budget -- used for single-fidelity tuners print(f"bmin = {bmin}, bmax = {bmax}, eta = {eta}, smax = {smax}") print("samples in one multi-armed bandit loop, NSs_all = ", NSs_all) print("total number of samples: ", Ntotal) print("total number of evaluations at highest budget: ", Btotal) print() options.validate(computer = computer) data = Data(problem) # giventask = [[0.2, 0.5]] if ntask == 1: giventask = [[args.ntrain, args.nvalid]] # giventask = [[3000, 1000]] NI=len(giventask) assert NI == ntask # make sure number of tasks match if(TUNER_NAME=='GPTune'): gt = GPTune(problem, computer=computer, data=data, options=options, driverabspath=os.path.abspath(__file__)) """ Building MLA with the given list of tasks """ NS = Btotal if args.nrun > 0: NS = args.nrun NS1 = max(NS//2, 1) (data, model, stats) = gt.MLA(NS=NS, NI=NI, Igiven=giventask, NS1=NS1) print("stats: ", stats) """ Print all input and parameter samples """ for tid in range(NI): print("tid: %d" % (tid)) print(f" [ntrain, nvalid] = [{data.I[tid][0]}, {data.I[tid][1]}]") print(" Ps ", data.P[tid]) print(" Os ", data.O[tid]) print(' Popt ', data.P[tid][np.argmin(data.O[tid])], 'Oopt ', min(data.O[tid])[0], 'nth ', np.argmin(data.O[tid])) if(TUNER_NAME=='opentuner'): NS = Btotal (data,stats) = OpenTuner(T=giventask, NS=NS, tp=problem, computer=computer, run_id="OpenTuner", niter=1, technique=None) print("stats: ", stats) """ Print all input and parameter samples """ for tid in range(NI): print("tid: %d" % (tid)) print(f" [ntrain, nvalid] = [{data.I[tid][0]}, {data.I[tid][1]}]") print(" Ps ", data.P[tid]) print(" Os ", data.O[tid]) print(' Popt ', data.P[tid][np.argmin(data.O[tid][:NS])], 'Oopt ', min(data.O[tid][:NS])[0], 'nth ', np.argmin(data.O[tid][:NS])) # single-fidelity version of hpbandster if(TUNER_NAME=='TPE'): NS = Btotal (data,stats)=HpBandSter(T=giventask, NS=NS, tp=problem, computer=computer, run_id="HpBandSter", niter=1) print("stats: ", stats) """ Print all input and parameter samples """ for tid in range(NI): print("tid: %d" % (tid)) print(f" [ntrain, nvalid] = [{data.I[tid][0]}, {data.I[tid][1]}]") print(" Ps ", data.P[tid]) print(" Os ", data.O[tid].tolist()) print(' Popt ', data.P[tid][np.argmin(data.O[tid])], 'Oopt ', min(data.O[tid])[0], 'nth ', np.argmin(data.O[tid])) if(TUNER_NAME=='GPTuneBand'): data = Data(problem) gt = GPTune_MB(problem, computer=computer, NS=Nloop, options=options) (data, stats, data_hist)=gt.MB_LCM(NS = Nloop, Igiven = giventask) print("Tuner: ", TUNER_NAME) print("stats: ", stats) """ Print all input and parameter samples """ for tid in range(NI): print("tid: %d" % (tid)) print(f" [ntrain, nvalid] = [{data.I[tid][0]}, {data.I[tid][1]}]") print(" Ps ", data.P[tid]) print(" Os ", data.O[tid].tolist()) nth = np.argmin(data.O[tid]) Popt = data.P[tid][nth] # find which arm and which sample the optimal param is from for arm in range(len(data_hist.P)): try: idx = (data_hist.P[arm]).index(Popt) arm_opt = arm except ValueError: pass print(' Popt ', Popt, 'Oopt ', min(data.O[tid])[0], 'nth ', nth, 'nth-bandit (s, nth) = ', (arm_opt, idx)) # multi-fidelity version if(TUNER_NAME=='hpbandster'): NS = Ntotal (data,stats)=HpBandSter_bandit(T=giventask, NS=NS, tp=problem, computer=computer, options=options, run_id="hpbandster_bandit", niter=1) print("stats: ", stats) """ Print all input and parameter samples """ for tid in range(NI): print("tid: %d" % (tid)) print(f" [ntrain, nvalid] = [{data.I[tid][0]}, {data.I[tid][1]}]") print(" Ps ", data.P[tid]) print(" Os ", data.O[tid].tolist()) # print(' Popt ', data.P[tid][np.argmin(data.O[tid])], 'Oopt ', min(data.O[tid])[0], 'nth ', np.argmin(data.O[tid])) max_budget = 0. Oopt = 99999 Popt = None nth = None for idx, (config, out) in enumerate(zip(data.P[tid], data.O[tid].tolist())): for subout in out[0]: budget_cur = subout[0] if budget_cur > max_budget: max_budget = budget_cur Oopt = subout[1] Popt = config nth = idx elif budget_cur == max_budget: if subout[1] < Oopt: Oopt = subout[1] Popt = config nth = idx print(' Popt ', Popt, 'Oopt ', Oopt, 'nth ', nth)
def main(): # Parse command line arguments args = parse_args() # Extract arguments ntask = args.ntask optimization = args.optimization nrun = args.nrun TUNER_NAME = args.optimization (machine, processor, nodes, cores) = GetMachineConfiguration() print("machine: " + machine + " processor: " + processor + " num_nodes: " + str(nodes) + " num_cores: " + str(cores)) os.environ['MACHINE_NAME'] = machine os.environ['TUNER_NAME'] = TUNER_NAME os.system("cp ./IMPACT-Z/build/ImpactZexe-mpi .") nprocmax = nodes * cores inputfiles = ["ImpactZ.in_test1", "ImpactZ.in_test2"] controlfiles = ["matchquad.in_test1", "matchquad.in_test2"] # Task parameters inputfile = Categoricalnorm(inputfiles, transform="onehot", name="inputfile") controlfile = Categoricalnorm(controlfiles, transform="onehot", name="controlfile") # Input parameters # we know that XX = x00*(1+quad) has range [-50,50], so adjust range of quad accordingly file1 = open('matchquad.in_test1', 'r') Lines = file1.readlines() npara = int(Lines[0].split()[0]) res = [i for i in Lines[-1].split()] b1 = [-50.0 / float(res[i]) - 1.0 for i in range(npara)] b2 = [50.0 / float(res[i]) - 1.0 for i in range(npara)] lb = [min(b1[i], b2[i]) for i in range(npara)] ub = [max(b1[i], b2[i]) for i in range(npara)] # quad1 = Real (lb[0], ub[0], transform="normalize", name="quad1") # quad2 = Real (lb[1], ub[1], transform="normalize", name="quad2") # quad3 = Real (lb[2], ub[2], transform="normalize", name="quad3") # quad4 = Real (lb[3], ub[3], transform="normalize", name="quad4") # quad5 = Real (lb[4], ub[4], transform="normalize", name="quad5") quad = Real(-1, 1, transform="normalize", name="quad") # quad2 = Real (-1, 1, transform="normalize", name="quad2") # quad3 = Real (-1, 1, transform="normalize", name="quad3") # quad4 = Real (-1, 1, transform="normalize", name="quad4") # quad5 = Real (-1, 1, transform="normalize", name="quad5") # Output parameters mismatch = Real(float("-Inf"), float("Inf"), name="mismatch") IS = Space([inputfile, controlfile]) # PS = Space([quad1, quad2, quad3, quad4, quad5]) PS = Space([quad]) OS = Space([mismatch]) constraints = {} models = {} constants = {"nodes": nodes, "cores": cores} """ Print all input and parameter samples """ print(IS, PS, OS, constraints, models) problem = TuningProblem(IS, PS, OS, objectives, constraints, None, constants=constants) computer = Computer(nodes=nodes, cores=cores, hosts=None) """ Set and validate options """ options = Options() options['model_processes'] = 1 # options['model_threads'] = 1 options['model_restarts'] = 1 # options['search_multitask_processes'] = 1 # options['model_restart_processes'] = 1 options['distributed_memory_parallelism'] = False options['shared_memory_parallelism'] = False options['model_class'] = 'Model_LCM' # 'Model_GPy_LCM' options['verbose'] = False options['search_pop_size'] = 10000 options['sample_class'] = 'SampleOpenTURNS' options.validate(computer=computer) # """ Building MLA with the given list of tasks """ # giventask = [[np.random.choice(matrices,size=1)[0]] for i in range(ntask)] giventask = [["ImpactZ.in_test1", "matchquad.in_test1"]] # giventask = [["big.rua"]] data = Data(problem) # Pdefault = [0,0,0,0,0] # data.P = [[Pdefault]] * ntask if (TUNER_NAME == 'GPTune'): gt = GPTune(problem, computer=computer, data=data, options=options, driverabspath=os.path.abspath(__file__)) NI = len(giventask) NS = nrun (data, model, stats) = gt.MLA(NS=NS, NI=NI, Igiven=giventask, NS1=int(NS / 2)) print("stats: ", stats) """ Print all input and parameter samples """ for tid in range(NI): print("tid: %d" % (tid)) print(" inputfile:%s controlfile:%s" % (data.I[tid][0], data.I[tid][1])) print(" Ps ", data.P[tid]) print(" Os ", data.O[tid].tolist()) print(' Popt ', data.P[tid][np.argmin(data.O[tid])], 'Oopt ', min(data.O[tid])[0], 'nth ', np.argmin(data.O[tid])) # fig = plt.figure(figsize=[12.8, 9.6]) x = np.arange(-1, 1., 0.0001) for tid in range(len(data.I)): fig = plt.figure(figsize=[12.8, 9.6]) p = data.I[tid] t = p[0] I_orig = p kwargst = {IS[k].name: I_orig[k] for k in range(len(IS))} # y=np.zeros([len(x),1]) y_mean = np.zeros([len(x)]) y_std = np.zeros([len(x)]) for i in range(len(x)): P_orig = [x[i]] kwargs = {PS[k].name: P_orig[k] for k in range(len(PS))} kwargs.update(kwargst) kwargs.update(constants) # y[i]=objectives(kwargs) if (TUNER_NAME == 'GPTune'): (y_mean[i], var) = predict_aug(model, gt, kwargs, tid) y_std[i] = np.sqrt(var) # print(y_mean[i],y_std[i],y[i]) fontsize = 40 plt.rcParams.update({'font.size': 40}) # plt.plot(x, y, 'b',lw=2,label='true') plt.plot(x, y_mean, 'k', lw=3, zorder=9, label='prediction') plt.fill_between(x, y_mean - y_std, y_mean + y_std, alpha=0.2, color='k') # print(data.P[tid]) plt.scatter(data.P[tid], data.O[tid], c='r', s=50, zorder=10, edgecolors=(0, 0, 0), label='sample') plt.xlabel('x', fontsize=fontsize + 2) # plt.ylabel('log(y)',fontsize=fontsize+2) plt.ylabel('y', fontsize=fontsize + 2) # plt.title('t=%f'%t,fontsize=fontsize+2) # print('t:',t,'x:',x[np.argmin(y)],'ymin:',y.min()) # legend = plt.legend(loc='upper center', shadow=True, fontsize='x-large') legend = plt.legend(loc='upper right', shadow=False, fontsize=fontsize) # annot_min(x,y) # plt.show() plt.show(block=False) plt.pause(0.5) # input("Press [enter] to continue.") fig.savefig('surrogate1D.pdf') if (TUNER_NAME == 'opentuner'): NI = len(giventask) NS = nrun (data, stats) = OpenTuner(T=giventask, NS=NS, tp=problem, computer=computer, run_id="OpenTuner", niter=1, technique=None) print("stats: ", stats) """ Print all input and parameter samples """ for tid in range(NI): print("tid: %d" % (tid)) print(" inputfile:%s controlfile:%s" % (data.I[tid][0], data.I[tid][1])) print(" Ps ", data.P[tid]) print(" Os ", data.O[tid].tolist()) print(' Popt ', data.P[tid][np.argmin(data.O[tid])], 'Oopt ', min(data.O[tid])[0], 'nth ', np.argmin(data.O[tid])) if (TUNER_NAME == 'hpbandster'): NI = len(giventask) NS = nrun (data, stats) = HpBandSter(T=giventask, NS=NS, tp=problem, computer=computer, run_id="HpBandSter", niter=1) print("stats: ", stats) """ Print all input and parameter samples """ for tid in range(NI): print("tid: %d" % (tid)) print(" inputfile:%s controlfile:%s" % (data.I[tid][0], data.I[tid][1])) print(" Ps ", data.P[tid]) print(" Os ", data.O[tid].tolist()) print(' Popt ', data.P[tid][np.argmin(data.O[tid])], 'Oopt ', min(data.O[tid])[0], 'nth ', np.argmin(data.O[tid]))
def main(): global nodes global cores global JOBID global nprocmax global nprocmin # Parse command line arguments args = parse_args() nxmax = args.nxmax nymax = args.nymax nzmax = args.nzmax nodes = args.nodes cores = args.cores nprocmin_pernode = args.nprocmin_pernode machine = args.machine ntask = args.ntask nruns = args.nruns JOBID = args.jobid TUNER_NAME = args.optimization TLA = False os.environ['MACHINE_NAME'] = machine os.environ['TUNER_NAME'] = TUNER_NAME # os.system("mkdir -p scalapack-driver/bin/%s; cp ../build/pdqrdriver scalapack-driver/bin/%s/.;" %(machine, machine)) nprocmax = nodes*cores-1 # YL: there is one proc doing spawning, so nodes*cores should be at least 2 nprocmin = min(nodes*nprocmin_pernode,nprocmax-1) # YL: ensure strictly nprocmin<nprocmax, required by the Integer space nxmin = 20 nymin = 20 nzmin = 20 nx = Integer(nxmin, nxmax, transform="normalize", name="nx") ny = Integer(nymin, nymax, transform="normalize", name="ny") nz = Integer(nzmin, nzmax, transform="normalize", name="nz") Px = Integer(1, nprocmax, transform="normalize", name="Px") Py = Integer(1, nprocmax, transform="normalize", name="Py") Nproc = Integer(nprocmin, nprocmax, transform="normalize", name="Nproc") strong_threshold = Real(0, 1, transform="normalize", name="strong_threshold") trunc_factor = Real(0, 1, transform="normalize", name="trunc_factor") P_max_elmts = Integer(1, 12, transform="normalize", name="P_max_elmts") coarsen_type = Categoricalnorm (['0', '1', '2', '3', '4', '6', '8', '10'], transform="onehot", name="coarsen_type") relax_type = Categoricalnorm (['-1', '0', '6', '8', '16', '18'], transform="onehot", name="relax_type") smooth_type = Categoricalnorm (['5', '6', '7', '8', '9'], transform="onehot", name="smooth_type") smooth_num_levels = Integer(0, 5, transform="normalize", name="smooth_num_levels") interp_type = Categoricalnorm (['0', '3', '4', '5', '6', '8', '12'], transform="onehot", name="interp_type") agg_num_levels = Integer(0, 5, transform="normalize", name="agg_num_levels") r = Real(float("-Inf"), float("Inf"), name="r") IS = Space([nx, ny, nz]) PS = Space([Px, Py, Nproc, strong_threshold, trunc_factor, P_max_elmts, coarsen_type, relax_type, smooth_type, smooth_num_levels, interp_type, agg_num_levels]) OS = Space([r]) # Question: how to set constraints cst1 = f"Px * Py <= Nproc" cst2 = f"not(coarsen_type=='0' and P_max_elmts==10 and relax_type=='18' and smooth_type=='6' and smooth_num_levels==3 and interp_type=='8' and agg_num_levels==1)" constraints = {"cst1": cst1,"cst2": cst2} print(IS, PS, OS, constraints) problem = TuningProblem(IS, PS, OS, objectives, constraints, None) # no performance model computer = Computer(nodes=nodes, cores=cores, hosts=None) options = Options() options['model_processes'] = 1 # options['model_threads'] = 1 options['model_restarts'] = 1 options['distributed_memory_parallelism'] = False options['shared_memory_parallelism'] = False # options['mpi_comm'] = None options['model_class '] = 'Model_LCM' options['verbose'] = False options.validate(computer=computer) """ Intialize the tuner with existing data stored as last check point""" try: data = pickle.load(open('Data_nodes_%d_cores_%d_nxmax_%d_nymax_%d_nzmax_%d_machine_%s_jobid_%d.pkl' % (nodes, cores, nxmax, nymax, nzmax, machine, JOBID), 'rb')) giventask = data.I except (OSError, IOError) as e: data = Data(problem) giventask = [[randint(nxmin,nxmax),randint(nymin,nymax),randint(nzmin,nzmax)] for i in range(ntask)] # giventask = [[50, 60, 80], [60, 80, 100]] # # the following will use only task lists stored in the pickle file # data = Data(problem) if(TUNER_NAME=='GPTune'): gt = GPTune(problem, computer=computer, data=data, options=options, driverabspath=os.path.abspath(__file__)) """ Building MLA with the given list of tasks """ NI = len(giventask) NS = nruns (data, model, stats) = gt.MLA(NS=NS, NI=NI, Igiven=giventask, NS1=max(NS//2, 1)) print("stats: ", stats) """ Dump the data to file as a new check point """ pickle.dump(data, open('Data_nodes_%d_cores_%d_nxmax_%d_nymax_%d_nzmax_%d_machine_%s_jobid_%d.pkl' % (nodes, cores, nxmax, nymax, nzmax, machine, JOBID), 'wb')) """ Dump the tuner to file for TLA use """ pickle.dump(gt, open('MLA_nodes_%d_cores_%d_nxmax_%d_nymax_%d_nzmax_%d_machine_%s_jobid_%d.pkl' % (nodes, cores, nxmax, nymax, nzmax, machine, JOBID), 'wb')) """ Print all input and parameter samples """ for tid in range(NI): print("tid: %d" % (tid)) print(" nx:%d ny:%d nz:%d" % (data.I[tid][0], data.I[tid][1], data.I[tid][2])) print(" Ps ", data.P[tid]) print(" Os ", data.O[tid]) print(' Popt ', data.P[tid][np.argmin(data.O[tid])], 'Oopt ', min(data.O[tid])[0], 'nth ', np.argmin(data.O[tid])) if TLA is True: """ Call TLA for 2 new tasks using the constructed LCM model""" newtask = [[50, 50, 60], [80, 60, 70]] (aprxopts, objval, stats) = gt.TLA1(newtask, NS=None) print("stats: ", stats) """ Print the optimal parameters and function evaluations""" for tid in range(len(newtask)): print("new task: %s" % (newtask[tid])) print(' predicted Popt: ', aprxopts[tid], ' objval: ', objval[tid]) if(TUNER_NAME=='opentuner'): NI = ntask NS = nruns (data,stats) = OpenTuner(T=giventask, NS=NS, tp=problem, computer=computer, run_id="OpenTuner", niter=1, technique=None) print("stats: ", stats) """ Print all input and parameter samples """ for tid in range(NI): print("tid: %d" % (tid)) print(" nx:%d ny:%d nz:%d" % (data.I[tid][0], data.I[tid][1], data.I[tid][2])) print(" Ps ", data.P[tid]) print(" Os ", data.O[tid]) print(' Popt ', data.P[tid][np.argmin(data.O[tid])], 'Oopt ', min(data.O[tid])[0], 'nth ', np.argmin(data.O[tid])) if(TUNER_NAME=='hpbandster'): NI = ntask NS = nruns (data,stats)=HpBandSter(T=giventask, NS=NS, tp=problem, computer=computer, run_id="HpBandSter", niter=1) print("stats: ", stats) """ Print all input and parameter samples """ for tid in range(NI): print("tid: %d" % (tid)) print(" nx:%d ny:%d nz:%d" % (data.I[tid][0], data.I[tid][1], data.I[tid][2])) print(" Ps ", data.P[tid]) print(" Os ", data.O[tid].tolist()) print(' Popt ', data.P[tid][np.argmin(data.O[tid])], 'Oopt ', min(data.O[tid])[0], 'nth ', np.argmin(data.O[tid]))
def main(): # Parse command line arguments args = parse_args() # Extract arguments ntask = args.ntask optimization = args.optimization nrun = args.nrun TUNER_NAME = args.optimization (machine, processor, nodes, cores) = GetMachineConfiguration() print("machine: " + machine + " processor: " + processor + " num_nodes: " + str(nodes) + " num_cores: " + str(cores)) os.environ['MACHINE_NAME'] = machine os.environ['TUNER_NAME'] = TUNER_NAME os.system("cp ./IMPACT-Z/build/ImpactZexe-mpi .") nprocmax = nodes * cores inputfiles = ["ImpactZ.in_test1", "ImpactZ.in_test2"] controlfiles = ["matchquad.in_test1", "matchquad.in_test2"] # Task parameters inputfile = Categoricalnorm(inputfiles, transform="onehot", name="inputfile") controlfile = Categoricalnorm(controlfiles, transform="onehot", name="controlfile") # Input parameters # we know that XX = x00*(1+quad) has range [-50,50], so adjust range of quad accordingly file1 = open('matchquad.in_test1', 'r') Lines = file1.readlines() npara = int(Lines[0].split()[0]) res = [i for i in Lines[-1].split()] b1 = [-50.0 / float(res[i]) - 1.0 for i in range(npara)] b2 = [50.0 / float(res[i]) - 1.0 for i in range(npara)] lb = [min(b1[i], b2[i]) for i in range(npara)] ub = [max(b1[i], b2[i]) for i in range(npara)] # quad1 = Real (lb[0], ub[0], transform="normalize", name="quad1") # quad2 = Real (lb[1], ub[1], transform="normalize", name="quad2") # quad3 = Real (lb[2], ub[2], transform="normalize", name="quad3") # quad4 = Real (lb[3], ub[3], transform="normalize", name="quad4") # quad5 = Real (lb[4], ub[4], transform="normalize", name="quad5") quad1 = Real(-0.05, 0.05, transform="normalize", name="quad1") quad2 = Real(-0.05, 0.05, transform="normalize", name="quad2") quad3 = Real(-0.05, 0.05, transform="normalize", name="quad3") quad4 = Real(-0.05, 0.05, transform="normalize", name="quad4") quad5 = Real(-0.05, 0.05, transform="normalize", name="quad5") # Output parameters mismatch = Real(float("-Inf"), float("Inf"), name="mismatch") IS = Space([inputfile, controlfile]) PS = Space([quad1, quad2, quad3, quad4, quad5]) OS = Space([mismatch]) constraints = {} models = {} constants = {"nodes": nodes, "cores": cores} """ Print all input and parameter samples """ print(IS, PS, OS, constraints, models) problem = TuningProblem(IS, PS, OS, objectives, constraints, None, constants=constants) computer = Computer(nodes=nodes, cores=cores, hosts=None) """ Set and validate options """ options = Options() options['model_processes'] = 1 # options['model_threads'] = 1 options['model_restarts'] = 1 # options['search_multitask_processes'] = 1 # options['model_restart_processes'] = 1 options['distributed_memory_parallelism'] = False options['shared_memory_parallelism'] = False options['model_class'] = 'Model_GPy_LCM' # 'Model_GPy_LCM' options['verbose'] = False # options['search_pop_size'] = 10000 options['sample_class'] = 'SampleOpenTURNS' options.validate(computer=computer) # """ Building MLA with the given list of tasks """ # giventask = [[np.random.choice(matrices,size=1)[0]] for i in range(ntask)] giventask = [["ImpactZ.in_test1", "matchquad.in_test1"]] # giventask = [["big.rua"]] data = Data(problem) Pdefault = [0, 0, 0, 0, 0] data.P = [[Pdefault]] * ntask if (TUNER_NAME == 'GPTune'): gt = GPTune(problem, computer=computer, data=data, options=options, driverabspath=os.path.abspath(__file__)) NI = len(giventask) NS = nrun (data, model, stats) = gt.MLA(NS=NS, NI=NI, Igiven=giventask, NS1=int(NS / 2)) print("stats: ", stats) """ Print all input and parameter samples """ for tid in range(NI): print("tid: %d" % (tid)) print(" inputfile:%s controlfile:%s" % (data.I[tid][0], data.I[tid][1])) print(" Ps ", data.P[tid]) print(" Os ", data.O[tid].tolist()) print(' Popt ', data.P[tid][np.argmin(data.O[tid])], 'Oopt ', min(data.O[tid])[0], 'nth ', np.argmin(data.O[tid])) if (TUNER_NAME == 'opentuner'): NI = len(giventask) NS = nrun (data, stats) = OpenTuner(T=giventask, NS=NS, tp=problem, computer=computer, run_id="OpenTuner", niter=1, technique=None) print("stats: ", stats) """ Print all input and parameter samples """ for tid in range(NI): print("tid: %d" % (tid)) print(" inputfile:%s controlfile:%s" % (data.I[tid][0], data.I[tid][1])) print(" Ps ", data.P[tid]) print(" Os ", data.O[tid].tolist()) print(' Popt ', data.P[tid][np.argmin(data.O[tid])], 'Oopt ', min(data.O[tid])[0], 'nth ', np.argmin(data.O[tid])) if (TUNER_NAME == 'hpbandster'): NI = len(giventask) NS = nrun (data, stats) = HpBandSter(T=giventask, NS=NS, tp=problem, computer=computer, run_id="HpBandSter", niter=1) print("stats: ", stats) """ Print all input and parameter samples """ for tid in range(NI): print("tid: %d" % (tid)) print(" inputfile:%s controlfile:%s" % (data.I[tid][0], data.I[tid][1])) print(" Ps ", data.P[tid]) print(" Os ", data.O[tid].tolist()) print(' Popt ', data.P[tid][np.argmin(data.O[tid])], 'Oopt ', min(data.O[tid])[0], 'nth ', np.argmin(data.O[tid]))
def main(): import matplotlib.pyplot as plt global nodes global cores # Parse command line arguments args = parse_args() ntask = args.ntask nrun = args.nrun TUNER_NAME = args.optimization perfmodel = args.perfmodel plot = args.plot (machine, processor, nodes, cores) = GetMachineConfiguration() print("machine: " + machine + " processor: " + processor + " num_nodes: " + str(nodes) + " num_cores: " + str(cores)) os.environ['MACHINE_NAME'] = machine os.environ['TUNER_NAME'] = TUNER_NAME input_space = Space([Real(0., 10., transform="normalize", name="t")]) parameter_space = Space([Real(0., 1., transform="normalize", name="x")]) # input_space = Space([Real(0., 0.0001, "uniform", "normalize", name="t")]) # parameter_space = Space([Real(-1., 1., "uniform", "normalize", name="x")]) output_space = Space( [Real(float('-Inf'), float('Inf'), transform="normalize", name="y")]) constraints = {"cst1": "x >= 0. and x <= 1."} if (perfmodel == 1): problem = TuningProblem(input_space, parameter_space, output_space, objectives, constraints, models) # with performance model else: problem = TuningProblem(input_space, parameter_space, output_space, objectives, constraints, None) # no performance model computer = Computer(nodes=nodes, cores=cores, hosts=None) options = Options() options['model_restarts'] = 1 options['distributed_memory_parallelism'] = False options['shared_memory_parallelism'] = False options['objective_evaluation_parallelism'] = False options['objective_multisample_threads'] = 1 options['objective_multisample_processes'] = 1 options['objective_nprocmax'] = 1 # options['model_processes'] = 1 # options['model_threads'] = 1 # options['model_restart_processes'] = 1 # options['search_multitask_processes'] = 1 # options['search_multitask_threads'] = 1 # options['search_threads'] = 16 # options['mpi_comm'] = None #options['mpi_comm'] = mpi4py.MPI.COMM_WORLD options['model_class'] = 'Model_LCM' #'Model_GPy_LCM' options['verbose'] = False # options['sample_algo'] = 'MCS' # options['sample_class'] = 'SampleLHSMDU' options.validate(computer=computer) # giventask = [[6]] giventask = [[i] for i in np.arange(0, ntask / 2, 0.5).tolist()] NI = len(giventask) NS = nrun TUNER_NAME = os.environ['TUNER_NAME'] if (TUNER_NAME == 'GPTune'): data = Data(problem) gt = GPTune(problem, computer=computer, data=data, options=options, driverabspath=os.path.abspath(__file__)) (data, modeler, stats) = gt.MLA(NS=NS, Igiven=giventask, NI=NI, NS1=int(NS / 2)) # (data, modeler, stats) = gt.MLA(NS=NS, Igiven=giventask, NI=NI, NS1=NS-1) print("stats: ", stats) """ Print all input and parameter samples """ for tid in range(NI): print("tid: %d" % (tid)) print(" t:%f " % (data.I[tid][0])) print(" Ps ", data.P[tid]) print(" Os ", data.O[tid].tolist()) print(' Popt ', data.P[tid][np.argmin(data.O[tid])], 'Oopt ', min(data.O[tid])[0], 'nth ', np.argmin(data.O[tid])) if (TUNER_NAME == 'opentuner'): (data, stats) = OpenTuner(T=giventask, NS=NS, tp=problem, computer=computer, run_id="OpenTuner", niter=1, technique=None) print("stats: ", stats) """ Print all input and parameter samples """ for tid in range(NI): print("tid: %d" % (tid)) print(" t:%f " % (data.I[tid][0])) print(" Ps ", data.P[tid]) print(" Os ", data.O[tid].tolist()) print(' Popt ', data.P[tid][np.argmin(data.O[tid])], 'Oopt ', min(data.O[tid])[0], 'nth ', np.argmin(data.O[tid])) if (TUNER_NAME == 'hpbandster'): (data, stats) = HpBandSter(T=giventask, NS=NS, tp=problem, computer=computer, run_id="HpBandSter", niter=1) print("stats: ", stats) """ Print all input and parameter samples """ for tid in range(NI): print("tid: %d" % (tid)) print(" t:%f " % (data.I[tid][0])) print(" Ps ", data.P[tid]) print(" Os ", data.O[tid].tolist()) print(' Popt ', data.P[tid][np.argmin(data.O[tid])], 'Oopt ', min(data.O[tid])[0], 'nth ', np.argmin(data.O[tid])) if plot == 1: # fig = plt.figure(figsize=[12.8, 9.6]) x = np.arange(0., 1., 0.0001) for tid in range(len(data.I)): fig = plt.figure(figsize=[12.8, 9.6]) p = data.I[tid] t = p[0] I_orig = p kwargst = { input_space[k].name: I_orig[k] for k in range(len(input_space)) } y = np.zeros([len(x), 1]) y_mean = np.zeros([len(x)]) y_std = np.zeros([len(x)]) for i in range(len(x)): P_orig = [x[i]] kwargs = { parameter_space[k].name: P_orig[k] for k in range(len(parameter_space)) } kwargs.update(kwargst) y[i] = objectives(kwargs) if (TUNER_NAME == 'GPTune'): (y_mean[i], var) = predict_aug(modeler, gt, kwargs, tid) y_std[i] = np.sqrt(var) # print(y_mean[i],y_std[i],y[i]) fontsize = 40 plt.rcParams.update({'font.size': 40}) plt.plot(x, y, 'b', lw=2, label='true') plt.plot(x, y_mean, 'k', lw=3, zorder=9, label='prediction') plt.fill_between(x, y_mean - y_std, y_mean + y_std, alpha=0.2, color='k') # print(data.P[tid]) plt.scatter(data.P[tid], data.O[tid], c='r', s=50, zorder=10, edgecolors=(0, 0, 0), label='sample') plt.xlabel('x', fontsize=fontsize + 2) plt.ylabel('y(t,x)', fontsize=fontsize + 2) plt.title('t=%f' % t, fontsize=fontsize + 2) print('t:', t, 'x:', x[np.argmin(y)], 'ymin:', y.min()) # legend = plt.legend(loc='upper center', shadow=True, fontsize='x-large') # legend = plt.legend(loc='upper right', shadow=False, fontsize=fontsize) annot_min(x, y) # plt.show() plt.show(block=False) plt.pause(0.5) input("Press [enter] to continue.") fig.savefig('obj_t_%f.pdf' % t)
def main(): # Parse command line arguments args = parse_args() # Extract arguments ntask = args.ntask npernode = args.npernode optimization = args.optimization nrun = args.nrun TUNER_NAME = args.optimization (machine, processor, nodes, cores) = GetMachineConfiguration() print("machine: " + machine + " processor: " + processor + " num_nodes: " + str(nodes) + " num_cores: " + str(cores)) os.environ['MACHINE_NAME'] = machine os.environ['TUNER_NAME'] = TUNER_NAME # nprocmax = nodes*cores-1 # YL: there is one proc doing spawning, so nodes*cores should be at least 2 # nprocmin = min(nodes*nprocmin_pernode,nprocmax-1) # YL: ensure strictly nprocmin<nprocmax, required by the Integer space # matrices = ["big.rua", "g4.rua", "g20.rua"] # matrices = ["Si2.bin", "SiH4.bin", "SiNa.bin", "Na5.bin", "benzene.bin", "Si10H16.bin", "Si5H12.bin", "SiO.bin", "Ga3As3H12.bin","H2O.bin"] # matrices = ["Si2.bin", "SiH4.bin", "SiNa.bin", "Na5.bin", "benzene.bin", "Si10H16.bin", "Si5H12.bin", "SiO.bin", "Ga3As3H12.bin", "GaAsH6.bin", "H2O.bin"] # Task parameters matrices = [ "matrix_ACTIVSg10k_AC_00.mtx", "matrix_ACTIVSg70k_AC_00.mtx", "temp_75k.mtx" ] matrix = Categoricalnorm(matrices, transform="onehot", name="matrix") # Input parameters sp_reordering_method = Categoricalnorm(['metis', 'parmetis', 'scotch'], transform="onehot", name="sp_reordering_method") # sp_reordering_method = Categoricalnorm (['metis','geometric'], transform="onehot", name="sp_reordering_method") # sp_compression = Categoricalnorm (['none','hss'], transform="onehot", name="sp_compression") # sp_compression = Categoricalnorm (['none','hss','hodlr','hodbf'], transform="onehot", name="sp_compression") # sp_compression = Categoricalnorm (['none','hss','hodlr','hodbf','blr'], transform="onehot", name="sp_compression") # npernode = Integer (0, 5, transform="normalize", name="npernode") sp_nd_param = Integer(2, 32, transform="normalize", name="sp_nd_param") sp_gpu_streams = Integer(1, 8, transform="normalize", name="sp_gpu_streams") # sp_compression_min_sep_size = Integer (2, 5, transform="normalize", name="sp_compression_min_sep_size") # sp_compression_min_front_size = Integer (4, 10, transform="normalize", name="sp_compression_min_front_size") # sp_compression_leaf_size = Integer (5, 9, transform="normalize", name="sp_compression_leaf_size") # sp_compression_rel_tol = Integer(-6, -1, transform="normalize", name="sp_compression_rel_tol") result = Real(float("-Inf"), float("Inf"), name="r") IS = Space([matrix]) PS = Space([sp_reordering_method, sp_nd_param, sp_gpu_streams]) OS = Space([result]) constraints = {} models = {} constants = {"nodes": nodes, "cores": cores, "npernode": npernode} """ Print all input and parameter samples """ print(IS, PS, OS, constraints, models) problem = TuningProblem(IS, PS, OS, objectives, constraints, None, constants=constants) computer = Computer(nodes=nodes, cores=cores, hosts=None) """ Set and validate options """ options = Options() options['model_processes'] = 1 # options['model_threads'] = 1 options['model_restarts'] = 1 # options['search_multitask_processes'] = 1 # options['model_restart_processes'] = 1 options['distributed_memory_parallelism'] = False options['shared_memory_parallelism'] = False options['model_class'] = 'Model_GPy_LCM' # 'Model_GPy_LCM' options['verbose'] = False options.validate(computer=computer) # """ Building MLA with the given list of tasks """ giventask = [["temp_75k.mtx"]] data = Data(problem) # the following makes sure the first sample is using default parameters data.I = giventask data.P = [[['metis', 8, 4]]] if (TUNER_NAME == 'GPTune'): gt = GPTune(problem, computer=computer, data=data, options=options, driverabspath=os.path.abspath(__file__)) NI = len(giventask) NS = nrun (data, model, stats) = gt.MLA(NS=NS, NI=NI, Igiven=giventask, NS1=max(NS // 2, 1)) print("stats: ", stats) """ Print all input and parameter samples """ for tid in range(NI): print("tid: %d" % (tid)) print(" matrix:%s" % (data.I[tid][0])) print(" Ps ", data.P[tid]) print(" Os ", data.O[tid].tolist()) print(' Popt ', data.P[tid][np.argmin(data.O[tid])], 'Oopt ', min(data.O[tid])[0], 'nth ', np.argmin(data.O[tid])) if (TUNER_NAME == 'opentuner'): NI = len(giventask) NS = nrun (data, stats) = OpenTuner(T=giventask, NS=NS, tp=problem, computer=computer, run_id="OpenTuner", niter=1, technique=None) print("stats: ", stats) """ Print all input and parameter samples """ for tid in range(NI): print("tid: %d" % (tid)) print(" matrix:%s" % (data.I[tid][0])) print(" Ps ", data.P[tid]) print(" Os ", data.O[tid].tolist()) print(' Popt ', data.P[tid][np.argmin(data.O[tid])], 'Oopt ', min(data.O[tid])[0], 'nth ', np.argmin(data.O[tid])) if (TUNER_NAME == 'hpbandster'): NI = len(giventask) NS = nrun (data, stats) = HpBandSter(T=giventask, NS=NS, tp=problem, computer=computer, run_id="HpBandSter", niter=1) print("stats: ", stats) """ Print all input and parameter samples """ for tid in range(NI): print("tid: %d" % (tid)) print(" matrix:%s" % (data.I[tid][0])) print(" Ps ", data.P[tid]) print(" Os ", data.O[tid].tolist()) print(' Popt ', data.P[tid][np.argmin(data.O[tid])], 'Oopt ', min(data.O[tid])[0], 'nth ', np.argmin(data.O[tid]))
def main(): import matplotlib.pyplot as plt args = parse_args() ntask = args.ntask nruns = args.nruns TUNER_NAME = args.optimization Nloop = args.Nloop plot = args.plot expid = args.expid restart = args.restart (machine, processor, nodes, cores) = GetMachineConfiguration() print("machine: " + machine + " processor: " + processor + " num_nodes: " + str(nodes) + " num_cores: " + str(cores)) os.environ['MACHINE_NAME'] = machine os.environ['TUNER_NAME'] = TUNER_NAME input_space = Space([Real(0., 10., transform="normalize", name="t")]) parameter_space = Space([Real(0., 1., transform="normalize", name="x")]) # input_space = Space([Real(0., 0.0001, "uniform", "normalize", name="t")]) # parameter_space = Space([Real(-1., 1., "uniform", "normalize", name="x")]) output_space = Space([Real(float('-Inf'), float('Inf'), name="y")]) constraints = {"cst1": "x >= 0. and x <= 1."} # problem = TuningProblem(input_space, parameter_space,output_space, objectives, constraints, models) # with performance model problem = TuningProblem(input_space, parameter_space, output_space, objectives, constraints, None) # no performance model computer = Computer(nodes=nodes, cores=cores, hosts=None) options = Options() options['model_restarts'] = restart options['distributed_memory_parallelism'] = False options['shared_memory_parallelism'] = False options['objective_evaluation_parallelism'] = False options['objective_multisample_threads'] = 1 options['objective_multisample_processes'] = 1 options['objective_nprocmax'] = 1 options['model_processes'] = 1 # options['model_threads'] = 1 # options['model_restart_processes'] = 1 # options['search_multitask_processes'] = 1 # options['search_multitask_threads'] = 1 # options['search_threads'] = 16 # options['mpi_comm'] = None #options['mpi_comm'] = mpi4py.MPI.COMM_WORLD options['model_class'] = 'Model_GPy_LCM' #'Model_LCM' options['verbose'] = False # options['sample_algo'] = 'MCS' # options['sample_class'] = 'SampleLHSMDU' options.validate(computer=computer) options['budget_min'] = bmin options['budget_max'] = bmax options['budget_base'] = eta smax = int( np.floor( np.log(options['budget_max'] / options['budget_min']) / np.log(options['budget_base']))) budgets = [ options['budget_max'] / options['budget_base']**x for x in range(smax + 1) ] NSs = [ int((smax + 1) / (s + 1)) * options['budget_base']**s for s in range(smax + 1) ] NSs_all = NSs.copy() budget_all = budgets.copy() for s in range(smax + 1): for n in range(s): NSs_all.append(int(NSs[s] / options['budget_base']**(n + 1))) budget_all.append(int(budgets[s] * options['budget_base']**(n + 1))) Ntotal = int(sum(NSs_all) * Nloop) Btotal = int( np.dot(np.array(NSs_all), np.array(budget_all)) / options['budget_max'] ) # total number of evaluations at highest budget -- used for single-fidelity tuners print("samples in one multi-armed bandit loop, NSs_all = ", NSs_all) print("total number of samples: ", Ntotal) print("total number of evaluations at highest budget: ", Btotal) print(f"Sampler: {options['sample_class']}, {options['sample_algo']}") print() data = Data(problem) # giventask = [[1.0], [5.0], [10.0]] # giventask = [[1.0], [1.2], [1.3]] # giventask = [[1.0]] # t_end = args.t_end giventask = [[i] for i in np.arange(1, ntask / 2 + 1, 0.5).tolist()] # giventask = [[i] for i in np.arange(1, 1.5, 0.05).tolist()] # giventask = [[1.0], [1.05], [1.1]] NI = len(giventask) assert NI == ntask # make sure number of tasks match np.set_printoptions(suppress=False, precision=3) if (TUNER_NAME == 'GPTuneBand'): NS = Nloop data = Data(problem) gt = GPTune_MB(problem, computer=computer, NS=Nloop, options=options) (data, stats, data_hist) = gt.MB_LCM(NS=Nloop, Igiven=giventask) print("Tuner: ", TUNER_NAME) print("Sampler class: ", options['sample_class']) print("Model class: ", options['model_class']) print("stats: ", stats) """ Print all input and parameter samples """ for tid in range(NI): print("tid: %d" % (tid)) print(f" t = {data.I[tid][0]:.2f}") print(" Ps ", data.P[tid]) print(" Os ", data.O[tid].tolist()) nth = np.argmin(data.O[tid]) Popt = data.P[tid][nth] # find which arm and which sample the optimal param is from for arm in range(len(data_hist.P)): try: idx = (data_hist.P[arm]).index(Popt) arm_opt = arm except ValueError: pass print(' Popt ', Popt, 'Oopt ', min(data.O[tid])[0], 'nth ', nth, 'nth-bandit (s, nth) = ', (arm_opt, idx)) if (TUNER_NAME == 'GPTune'): NS = Btotal if args.nruns > 0: NS = args.nruns print("In GPTune, using the given number of nruns ", NS) NS1 = max(NS // 2, 1) gt = GPTune(problem, computer=computer, data=data, options=options, driverabspath=os.path.abspath(__file__)) """ Building MLA with the given list of tasks """ (data, modeler, stats) = gt.MLA(NS=NS, NI=NI, Igiven=giventask, NS1=NS1) print("stats: ", stats) print("model class: ", options['model_class']) print("Model restart: ", restart) """ Print all input and parameter samples """ sum_Oopt = 0. for tid in range(NI): print("tid: %d" % (tid)) print(f" t: {data.I[tid][0]:.2f} ") print(" Ps ", data.P[tid]) print(" Os ", data.O[tid]) print(' Popt ', data.P[tid][np.argmin(data.O[tid])], f'Oopt {min(data.O[tid])[0]:.3f}', 'nth ', np.argmin(data.O[tid])) sum_Oopt += min(data.O[tid])[0] # print("sum of all optimal objectives", sum_Oopt) if (TUNER_NAME == 'opentuner'): NS = Btotal (data, stats) = OpenTuner(T=giventask, NS=NS, tp=problem, computer=computer, run_id="OpenTuner", niter=1, technique=None) print("stats: ", stats) """ Print all input and parameter samples """ for tid in range(NI): print("tid: %d" % (tid)) print(f" t: {data.I[tid][0]:.2f} ") print(" Ps ", data.P[tid]) print(" Os ", data.O[tid]) print(' Popt ', data.P[tid][np.argmin(data.O[tid][:NS])], 'Oopt ', min(data.O[tid][:NS])[0], 'nth ', np.argmin(data.O[tid][:NS])) # single fidelity version of hpbandster if (TUNER_NAME == 'TPE'): NS = Btotal (data, stats) = HpBandSter(T=giventask, NS=NS, tp=problem, computer=computer, run_id="HpBandSter", niter=1) print("stats: ", stats) """ Print all input and parameter samples """ for tid in range(NI): print("tid: %d" % (tid)) print(f" t: {data.I[tid][0]:.2f} ") print(" Ps ", data.P[tid]) print(" Os ", data.O[tid]) print(' Popt ', data.P[tid][np.argmin(data.O[tid])], 'Oopt ', min(data.O[tid])[0], 'nth ', np.argmin(data.O[tid])) # multi-fidelity version if (TUNER_NAME == 'hpbandster'): NS = Ntotal (data, stats) = HpBandSter_bandit(T=giventask, NS=NS, tp=problem, computer=computer, options=options, run_id="hpbandster_bandit", niter=1) print("Tuner: ", TUNER_NAME) print("stats: ", stats) """ Print all input and parameter samples """ for tid in range(NI): print("tid: %d" % (tid)) print(f" t: {data.I[tid][0]:.2f} ") print(" Ps ", data.P[tid]) print(" Os ", data.O[tid].tolist()) # print(' Popt ', data.P[tid][np.argmin(data.O[tid])], 'Oopt ', min(data.O[tid])[0], 'nth ', np.argmin(data.O[tid])) max_budget = 0. Oopt = 99999 Popt = None nth = None for idx, (config, out) in enumerate(zip(data.P[tid], data.O[tid].tolist())): for subout in out[0]: budget_cur = subout[0] if budget_cur > max_budget: max_budget = budget_cur Oopt = subout[1] Popt = config nth = idx elif budget_cur == max_budget: if subout[1] < Oopt: Oopt = subout[1] Popt = config nth = idx print(' Popt ', Popt, 'Oopt ', Oopt, 'nth ', nth) if plot == 1: x = np.arange(0., 1., 0.0001) ymean_set = [] # stores predicted function values ytrue_set = [] for tid in range(len(data.I)): p = data.I[tid] t = p[0] fig = plt.figure(figsize=[12.8, 9.6]) I_orig = p kwargst = { input_space[k].name: I_orig[k] for k in range(len(input_space)) } y = np.zeros([len(x), 1]) y_mean = np.zeros([len(x)]) y_std = np.zeros([len(x)]) for i in range(len(x)): P_orig = [x[i]] kwargs = { parameter_space[k].name: P_orig[k] for k in range(len(parameter_space)) } kwargs.update(kwargst) y[i] = objectives(kwargs) if (TUNER_NAME == 'GPTune'): (y_mean[i], var) = predict_aug(modeler, gt, kwargs, tid) y_std[i] = np.sqrt(var) # print(y_mean[i],y_std[i],y[i]) fontsize = 40 plt.rcParams.update({'font.size': 40}) plt.plot(x, y, 'b', lw=2, label='true') plt.plot(x, y_mean, 'k', lw=3, zorder=9, label='prediction') plt.fill_between(x, y_mean - y_std, y_mean + y_std, alpha=0.2, color='k') plt.ylim(0, 2) # print(data.P[tid]) plt.scatter(data.P[tid], data.O[tid], c='r', s=50, zorder=10, edgecolors=(0, 0, 0), label='sample') plt.xlabel('x', fontsize=fontsize + 2) plt.ylabel('y(t,x)', fontsize=fontsize + 2) plt.title('t=%f' % t, fontsize=fontsize + 2) print('t:', t, 'x:', x[np.argmin(y)], 'ymin:', y.min()) # legend = plt.legend(loc='upper center', shadow=True, fontsize='x-large') legend = plt.legend(loc='upper right', shadow=False, fontsize=fontsize) annot_min(x, y) # plt.show() plt.show(block=False) plt.pause(0.5) # input("Press [enter] to continue.") # fig.savefig('obj_t_%f.eps'%t) fig.savefig(f'obj_ntask{NI}_{expid}_tid_{tid}_t_{t:.1f}.pdf') ymean_set.append(y_mean) ytrue_set.append(y) # show the distance among surrogate functions R = np.zeros( (NI, NI)) # Pearson sample correlation matrix of learned surrogates R_true = np.zeros( (NI, NI)) # Pearson sample correlation of true functions for i in range(NI): for ip in range(i, NI): ymean_i = ymean_set[i] ymean_ip = ymean_set[ip] ytrue_i = np.array((ytrue_set[i]).reshape((1, -1)))[0] ytrue_ip = np.array((ytrue_set[ip]).reshape((1, -1)))[0] # find the Pearson sample correlation coefficient R[i, ip], _ = scipy.stats.pearsonr(ymean_i, ymean_ip) R_true[i, ip], _ = scipy.stats.pearsonr(ytrue_i, ytrue_ip) print("The correlation matrix among surrogate functions is: \n", R) print("The correlation matrix among true functions is: \n", R_true) new_Rtrue = R_true[np.triu_indices(R_true.shape[0], 1)] new_R = R[np.triu_indices(R.shape[0], 1)] print("The mean absolute error is: \n", np.mean(abs(new_Rtrue - new_R))) print("The mean relative error is: \n", np.mean(abs(new_Rtrue - new_R) / abs(new_R)))
def main(): global ROOTDIR global nodes global cores global target global nprocmax global nprocmin # Parse command line arguments args = parse_args() # Extract arguments # mmax = args.mmax # nmax = args.nmax ntask = args.ntask nodes = args.nodes cores = args.cores nprocmin_pernode = args.nprocmin_pernode machine = args.machine optimization = args.optimization nruns = args.nruns truns = args.truns # JOBID = args.jobid TUNER_NAME = args.optimization os.environ['MACHINE_NAME'] = machine os.environ['TUNER_NAME'] = TUNER_NAME nprocmax = nodes*cores-1 # YL: there is one proc doing spawning, so nodes*cores should be at least 2 nprocmin = min(nodes*nprocmin_pernode,nprocmax-1) # YL: ensure strictly nprocmin<nprocmax, required by the Integer space # matrices = ["big.rua", "g4.rua", "g20.rua"] # matrices = ["Si2.bin", "SiH4.bin", "SiNa.bin", "Na5.bin", "benzene.bin", "Si10H16.bin", "Si5H12.bin", "SiO.bin", "Ga3As3H12.bin","H2O.bin"] matrices = ["Si2.bin", "SiH4.bin", "SiNa.bin", "Na5.bin", "benzene.bin", "Si10H16.bin", "Si5H12.bin", "SiO.bin", "Ga3As3H12.bin", "GaAsH6.bin", "H2O.bin"] # Task parameters matrix = Categoricalnorm (matrices, transform="onehot", name="matrix") # Input parameters COLPERM = Categoricalnorm (['2', '4'], transform="onehot", name="COLPERM") LOOKAHEAD = Integer (5, 20, transform="normalize", name="LOOKAHEAD") nprows = Integer (1, nprocmax, transform="normalize", name="nprows") nproc = Integer (nprocmin, nprocmax, transform="normalize", name="nproc") NSUP = Integer (30, 300, transform="normalize", name="NSUP") NREL = Integer (10, 40, transform="normalize", name="NREL") result = Real (float("-Inf") , float("Inf"),name="r") IS = Space([matrix]) PS = Space([COLPERM, LOOKAHEAD, nproc, nprows, NSUP, NREL]) OS = Space([result]) cst1 = "NSUP >= NREL" cst2 = "nproc >= nprows" # intrinsically implies "p <= nproc" constraints = {"cst1" : cst1, "cst2" : cst2} models = {} """ Print all input and parameter samples """ print(IS, PS, OS, constraints, models) target='memory' # target='time' problem = TuningProblem(IS, PS, OS, objectives, constraints, None) computer = Computer(nodes = nodes, cores = cores, hosts = None) """ Set and validate options """ options = Options() options['model_processes'] = 1 # options['model_threads'] = 1 options['model_restarts'] = 1 # options['search_multitask_processes'] = 1 # options['model_restart_processes'] = 1 options['distributed_memory_parallelism'] = False options['shared_memory_parallelism'] = False options['model_class '] = 'Model_LCM' # 'Model_GPy_LCM' options['verbose'] = False options.validate(computer = computer) """ Intialize the tuner with existing data stored as last check point""" try: data = pickle.load(open('Data_nodes_%d_cores_%d_nprocmin_pernode_%d_tasks_%s_machine_%s.pkl' % (nodes, cores, nprocmin_pernode, matrices, machine), 'rb')) giventask = data.I except (OSError, IOError) as e: data = Data(problem) giventask = [[np.random.choice(matrices,size=1)[0]] for i in range(ntask)] # """ Building MLA with the given list of tasks """ # giventask = [["big.rua"]] giventask = [["Si2.bin"]] data = Data(problem) if(TUNER_NAME=='GPTune'): gt = GPTune(problem, computer=computer, data=data, options=options, driverabspath=os.path.abspath(__file__)) NI = len(giventask) NS = nruns (data, model, stats) = gt.MLA(NS=NS, NI=NI, Igiven=giventask, NS1=max(NS//2, 1)) print("stats: ", stats) """ Dump the data to file as a new check point """ pickle.dump(data, open('Data_nodes_%d_cores_%d_nprocmin_pernode_%d_tasks_%s_machine_%s.pkl' % (nodes, cores, nprocmin_pernode, matrices, machine), 'wb')) """ Dump the tuner to file for TLA use """ pickle.dump(gt, open('MLA_nodes_%d_cores_%d_nprocmin_pernode_%d_tasks_%s_machine_%s.pkl' % (nodes, cores, nprocmin_pernode, matrices, machine), 'wb')) """ Print all input and parameter samples """ for tid in range(NI): print("tid: %d"%(tid)) print(" matrix:%s"%(data.I[tid][0])) print(" Ps ", data.P[tid]) print(" Os ", data.O[tid]) print(' Popt ', data.P[tid][np.argmin(data.O[tid])], 'Oopt ', min(data.O[tid])[0], 'nth ', np.argmin(data.O[tid])) if(TUNER_NAME=='opentuner'): NI = ntask NS = nruns (data,stats) = OpenTuner(T=giventask, NS=NS, tp=problem, computer=computer, run_id="OpenTuner", niter=1, technique=None) print("stats: ", stats) """ Print all input and parameter samples """ for tid in range(NI): print("tid: %d"%(tid)) print(" matrix:%s"%(data.I[tid][0])) print(" Ps ", data.P[tid]) print(" Os ", data.O[tid]) print(' Popt ', data.P[tid][np.argmin(data.O[tid])], 'Oopt ', min(data.O[tid])[0], 'nth ', np.argmin(data.O[tid])) if(TUNER_NAME=='hpbandster'): NI = ntask NS = nruns (data,stats)=HpBandSter(T=giventask, NS=NS, tp=problem, computer=computer, run_id="HpBandSter", niter=1) print("stats: ", stats) """ Print all input and parameter samples """ for tid in range(NI): print("tid: %d"%(tid)) print(" matrix:%s"%(data.I[tid][0])) print(" Ps ", data.P[tid]) print(" Os ", data.O[tid]) print(' Popt ', data.P[tid][np.argmin(data.O[tid])], 'Oopt ', min(data.O[tid])[0], 'nth ', np.argmin(data.O[tid]))
def main(): args = parse_args() ntask = args.ntask Nloop = args.Nloop bmin = args.bmin bmax = args.bmax eta = args.eta TUNER_NAME = args.optimization (machine, processor, nodes, cores) = GetMachineConfiguration() print ("machine: " + machine + " processor: " + processor + " num_nodes: " + str(nodes) + " num_cores: " + str(cores)) nstepmax = args.nstepmax nstepmin = args.nstepmin os.environ['TUNER_NAME'] = TUNER_NAME # Input parameters # ROWPERM = Categoricalnorm (['1', '2'], transform="onehot", name="ROWPERM") # COLPERM = Categoricalnorm (['2', '4'], transform="onehot", name="COLPERM") # nprows = Integer (0, 5, transform="normalize", name="nprows") # nproc = Integer (5, 6, transform="normalize", name="nproc") NSUP = Integer (30, 300, transform="normalize", name="NSUP") NREL = Integer (10, 40, transform="normalize", name="NREL") nbx = Integer (1, 3, transform="normalize", name="nbx") nby = Integer (1, 3, transform="normalize", name="nby") time = Real (float("-Inf") , float("Inf"), transform="normalize", name="time") # nstep = Integer (3, 15, transform="normalize", name="nstep") lphi = Integer (2, 3, transform="normalize", name="lphi") mx = Integer (5, 6, transform="normalize", name="mx") my = Integer (7, 8, transform="normalize", name="my") IS = Space([mx,my,lphi]) # PS = Space([ROWPERM, COLPERM, nprows, nproc, NSUP, NREL]) # PS = Space([ROWPERM, COLPERM, NSUP, NREL, nbx, nby]) PS = Space([NSUP, NREL, nbx, nby]) OS = Space([time]) cst1 = "NSUP >= NREL" constraints = {"cst1" : cst1} models = {} constants={"nodes":nodes,"cores":cores,"nstepmin":nstepmin,"nstepmax":nstepmax,"bmin":bmin,"bmax":bmax,"eta":eta} """ Print all input and parameter samples """ print(IS, PS, OS, constraints, models) BINDIR = os.path.abspath("/project/projectdirs/m2957/liuyangz/my_research/nimrod/nimdevel_spawn/build_haswell_gnu_openmpi/bin") RUNDIR = os.path.abspath("/project/projectdirs/m2957/liuyangz/my_research/nimrod/nimrod_input") os.system("cp %s/nimrod.in ./nimrod_template.in"%(RUNDIR)) os.system("cp %s/fluxgrid.in ."%(RUNDIR)) os.system("cp %s/g163518.03130 ."%(RUNDIR)) os.system("cp %s/p163518.03130 ."%(RUNDIR)) os.system("cp %s/nimset ."%(RUNDIR)) os.system("cp %s/nimrod ./nimrod_spawn"%(BINDIR)) problem = TuningProblem(IS, PS, OS, objectives, constraints, None, constants=constants) computer = Computer(nodes = nodes, cores = cores, hosts = None) """ Set and validate options """ options = Options() options['model_restarts'] = 1 options['distributed_memory_parallelism'] = False options['shared_memory_parallelism'] = False options['objective_evaluation_parallelism'] = False options['objective_multisample_threads'] = 1 options['objective_multisample_processes'] = 1 options['objective_nprocmax'] = 1 options['model_processes'] = 1 # options['model_threads'] = 1 # options['model_restart_processes'] = 1 # options['search_multitask_processes'] = 1 # options['search_multitask_threads'] = 1 # options['search_threads'] = 16 # options['mpi_comm'] = None # options['mpi_comm'] = mpi4py.MPI.COMM_WORLD options['model_class'] = 'Model_LCM' if args.LCMmodel == 'LCM' else 'Model_GPy_LCM' # Model_GPy_LCM or Model_LCM options['verbose'] = True options['sample_class'] = 'SampleLHSMDU' options['sample_algo'] = 'LHS-MDU' options.validate(computer=computer) options['budget_min'] = bmin options['budget_max'] = bmax options['budget_base'] = eta smax = int(np.floor(np.log(options['budget_max']/options['budget_min'])/np.log(options['budget_base']))) budgets = [options['budget_max'] /options['budget_base']**x for x in range(smax+1)] NSs = [int((smax+1)/(s+1))*options['budget_base']**s for s in range(smax+1)] NSs_all = NSs.copy() budget_all = budgets.copy() for s in range(smax+1): for n in range(s): NSs_all.append(int(NSs[s]/options['budget_base']**(n+1))) budget_all.append(int(budgets[s]*options['budget_base']**(n+1))) Ntotal = int(sum(NSs_all) * Nloop) Btotal = int(np.dot(np.array(NSs_all), np.array(budget_all))/options['budget_max']*Nloop) # total number of evaluations at highest budget -- used for single-fidelity tuners print(f"bmin = {bmin}, bmax = {bmax}, eta = {eta}, smax = {smax}") print("samples in one multi-armed bandit loop, NSs_all = ", NSs_all) print("total number of samples: ", Ntotal) print("total number of evaluations at highest budget: ", Btotal) print(f"Sampler: {options['sample_class']}, {options['sample_algo']}") print() data = Data(problem) # giventask = [[1.0], [5.0], [10.0]] # giventask = [[1.0], [1.2], [1.3]] giventask = [[6,8,2]] Pdefault = [128,20,2,2] # t_end = args.t_end # giventask = [[i] for i in np.arange(1, t_end, (t_end-1)/ntask).tolist()] # 10 tasks # giventask = [[i] for i in np.arange(1.0, 6.0, 1.0).tolist()] # 5 tasks NI=len(giventask) assert NI == ntask # make sure number of tasks match np.set_printoptions(suppress=False, precision=4) if(TUNER_NAME=='GPTuneBand'): NS = Nloop data = Data(problem) gt = GPTune_MB(problem, computer=computer, NS=Nloop, options=options) (data, stats, data_hist)=gt.MB_LCM(NS = Nloop, Igiven = giventask, Pdefault=Pdefault) print("Tuner: ", TUNER_NAME) print("Sampler class: ", options['sample_class']) print("Model class: ", options['model_class']) print("stats: ", stats) """ Print all input and parameter samples """ for tid in range(NI): print("tid: %d" % (tid)) print(" mx:%s my:%s lphi:%s"%(data.I[tid][0],data.I[tid][1],data.I[tid][2])) print(" Ps ", data.P[tid]) print(" Os ", data.O[tid].tolist()) nth = np.argmin(data.O[tid]) Popt = data.P[tid][nth] # find which arm and which sample the optimal param is from for arm in range(len(data_hist.P)): try: idx = (data_hist.P[arm]).index(Popt) arm_opt = arm except ValueError: pass print(' Popt ', Popt, 'Oopt ', min(data.O[tid])[0], 'nth ', nth, 'nth-bandit (s, nth) = ', (arm_opt, idx)) if(TUNER_NAME=='GPTune'): NS = Btotal if args.nrun > 0: NS = args.nrun NS1 = max(NS//2, 1) data.I = giventask data.P = [[Pdefault]] * NI gt = GPTune(problem, computer=computer, data=data, options=options, driverabspath=os.path.abspath(__file__)) """ Building MLA with the given list of tasks """ (data, model, stats) = gt.MLA(NS=NS, NI=NI, Igiven=giventask, NS1=NS1) print("stats: ", stats) print("Sampler class: ", options['sample_class'], "Sample algo:", options['sample_algo']) print("Model class: ", options['model_class']) if options['model_class'] == 'Model_LCM' and NI > 1: print("Get correlation metric ... ") C = model[0].M.kern.get_correlation_metric() print("The correlation matrix C is \n", C) elif options['model_class'] == 'Model_GPy_LCM' and NI > 1: print("Get correlation metric ... ") C = model[0].get_correlation_metric(NI) print("The correlation matrix C is \n", C) """ Print all input and parameter samples """ for tid in range(NI): print("tid: %d" % (tid)) print(" mx:%s my:%s lphi:%s"%(data.I[tid][0],data.I[tid][1],data.I[tid][2])) print(" Ps ", data.P[tid]) print(" Os ", data.O[tid]) print(' Popt ', data.P[tid][np.argmin(data.O[tid])], f'Oopt {min(data.O[tid])[0]:.3f}', 'nth ', np.argmin(data.O[tid])) if(TUNER_NAME=='opentuner'): NS = Btotal (data,stats) = OpenTuner(T=giventask, NS=NS, tp=problem, computer=computer, run_id="OpenTuner", niter=1, technique=None) print("stats: ", stats) """ Print all input and parameter samples """ for tid in range(NI): print("tid: %d" % (tid)) print(" mx:%s my:%s lphi:%s"%(data.I[tid][0],data.I[tid][1],data.I[tid][2])) print(" Ps ", data.P[tid]) print(" Os ", data.O[tid]) print(' Popt ', data.P[tid][np.argmin(data.O[tid][:NS])], 'Oopt ', min(data.O[tid][:NS])[0], 'nth ', np.argmin(data.O[tid][:NS])) if(TUNER_NAME=='hpbandster'): NS = Btotal (data,stats)=HpBandSter(T=giventask, NS=NS, tp=problem, computer=computer, run_id="HpBandSter", niter=1) print("stats: ", stats) """ Print all input and parameter samples """ for tid in range(NI): print("tid: %d" % (tid)) print(" mx:%s my:%s lphi:%s"%(data.I[tid][0],data.I[tid][1],data.I[tid][2])) print(" Ps ", data.P[tid]) print(" Os ", data.O[tid]) print(' Popt ', data.P[tid][np.argmin(data.O[tid])], 'Oopt ', min(data.O[tid])[0], 'nth ', np.argmin(data.O[tid])) if(TUNER_NAME=='TPE'): NS = Ntotal (data,stats)=callhpbandster_bandit.HpBandSter(T=giventask, NS=NS, tp=problem, computer=computer, options=options, run_id="hpbandster_bandit", niter=1) print("Tuner: ", TUNER_NAME) print("stats: ", stats) """ Print all input and parameter samples """ for tid in range(NI): print("tid: %d" % (tid)) print(" mx:%s my:%s lphi:%s"%(data.I[tid][0],data.I[tid][1],data.I[tid][2])) print(" Ps ", data.P[tid]) print(" Os ", data.O[tid].tolist()) # print(' Popt ', data.P[tid][np.argmin(data.O[tid])], 'Oopt ', min(data.O[tid])[0], 'nth ', np.argmin(data.O[tid])) max_budget = 0. Oopt = 99999 Popt = None nth = None for idx, (config, out) in enumerate(zip(data.P[tid], data.O[tid].tolist())): for subout in out[0]: budget_cur = subout[0] if budget_cur > max_budget: max_budget = budget_cur Oopt = subout[1] Popt = config nth = idx elif budget_cur == max_budget: if subout[1] < Oopt: Oopt = subout[1] Popt = config nth = idx print(' Popt ', Popt, 'Oopt ', Oopt, 'nth ', nth)
def main(): # Parse command line arguments args = parse_args() # Extract arguments tla = args.tla ntask = args.ntask nprocmin_pernode = args.nprocmin_pernode optimization = args.optimization nrun = args.nrun obj = args.obj target = obj TUNER_NAME = args.optimization (machine, processor, nodes, cores) = GetMachineConfiguration() print("machine: " + machine + " processor: " + processor + " num_nodes: " + str(nodes) + " num_cores: " + str(cores)) os.environ['MACHINE_NAME'] = machine os.environ['TUNER_NAME'] = TUNER_NAME nprocmax = nodes * cores # matrices = ["big.rua", "g4.rua", "g20.rua"] # matrices = ["Si2.rb", "SiH4.rb", "SiNa.rb", "Na5.rb", "benzene.rb", "Si10H16.rb", "Si5H12.rb", "SiO.rb", "Ga3As3H12.rb","H2O.rb"] # matrices = ["Si2.rb", "SiH4.rb", "SiNa.rb", "Na5.rb", "benzene.rb", "Si10H16.rb", "Si5H12.rb", "SiO.rb", "Ga3As3H12.rb", "GaAsH6.rb", "H2O.rb"] matrices = [ "big.rua", "g20.rua", "Si2.bin", "SiH4.bin", "SiNa.bin", "Na5.bin", "benzene.bin", "Si10H16.bin", "Si5H12.bin", "SiO.bin", "Ga3As3H12.bin", "GaAsH6.bin", "H2O.bin" ] # Task parameters matrix = Categoricalnorm(matrices, transform="onehot", name="matrix") # Input parameters COLPERM = Categoricalnorm(['2', '4'], transform="onehot", name="COLPERM") LOOKAHEAD = Integer(5, 20, transform="normalize", name="LOOKAHEAD") nprows = Integer(1, nprocmax, transform="normalize", name="nprows") npernode = Integer(int(math.log2(nprocmin_pernode)), int(math.log2(cores)), transform="normalize", name="npernode") NSUP = Integer(30, 300, transform="normalize", name="NSUP") NREL = Integer(10, 40, transform="normalize", name="NREL") if (target == 'time'): result = Real(float("-Inf"), float("Inf"), name="time") if (target == 'memory'): result = Real(float("-Inf"), float("Inf"), name="memory") IS = Space([matrix]) PS = Space([COLPERM, LOOKAHEAD, npernode, nprows, NSUP, NREL]) OS = Space([result]) constraints = {"cst1": cst1, "cst2": cst2} models = {} constants = {"nodes": nodes, "cores": cores, "target": target} """ Print all input and parameter samples """ print(IS, PS, OS, constraints, models) problem = TuningProblem(IS, PS, OS, objectives, constraints, None, constants=constants) computer = Computer(nodes=nodes, cores=cores, hosts=None) """ Set and validate options """ options = Options() options['model_processes'] = 1 # options['model_threads'] = 1 options['model_restarts'] = 1 # options['search_multitask_processes'] = 1 # options['model_restart_processes'] = 1 options['distributed_memory_parallelism'] = False options['shared_memory_parallelism'] = False options['model_class '] = 'Model_LCM' # 'Model_GPy_LCM' options['verbose'] = False options.validate(computer=computer) # """ Building MLA with the given list of tasks """ # giventask = [[np.random.choice(matrices,size=1)[0]] for i in range(ntask)] giventask = [["big.rua"], ["g20.rua"]] # giventask = [["big.rua"]] data = Data(problem) if (TUNER_NAME == 'GPTune'): gt = GPTune(problem, computer=computer, data=data, options=options, driverabspath=os.path.abspath(__file__)) NI = len(giventask) NS = nrun (data, model, stats) = gt.MLA(NS=NS, NI=NI, Igiven=giventask, NS1=max(NS // 2, 1)) print("stats: ", stats) """ Print all input and parameter samples """ for tid in range(NI): print("tid: %d" % (tid)) print(" matrix:%s" % (data.I[tid][0])) print(" Ps ", data.P[tid]) print(" Os ", data.O[tid].tolist()) print(' Popt ', data.P[tid][np.argmin(data.O[tid])], 'Oopt ', min(data.O[tid])[0], 'nth ', np.argmin(data.O[tid])) if (tla == 1): """ Call TLA for a new task using the constructed LCM model""" newtask = [["big.rua"]] # newtask = [["H2O.rb"]] (aprxopts, objval, stats) = gt.TLA1(newtask, NS=None) print("stats: ", stats) """ Print the optimal parameters and function evaluations""" for tid in range(len(newtask)): print("new task: %s" % (newtask[tid])) print(' predicted Popt: ', aprxopts[tid], ' objval: ', objval[tid]) if (TUNER_NAME == 'opentuner'): NI = len(giventask) NS = nrun (data, stats) = OpenTuner(T=giventask, NS=NS, tp=problem, computer=computer, run_id="OpenTuner", niter=1, technique=None) print("stats: ", stats) """ Print all input and parameter samples """ for tid in range(NI): print("tid: %d" % (tid)) print(" matrix:%s" % (data.I[tid][0])) print(" Ps ", data.P[tid]) print(" Os ", data.O[tid].tolist()) print(' Popt ', data.P[tid][np.argmin(data.O[tid])], 'Oopt ', min(data.O[tid])[0], 'nth ', np.argmin(data.O[tid])) if (TUNER_NAME == 'hpbandster'): NI = len(giventask) NS = nrun (data, stats) = HpBandSter(T=giventask, NS=NS, tp=problem, computer=computer, run_id="HpBandSter", niter=1) print("stats: ", stats) """ Print all input and parameter samples """ for tid in range(NI): print("tid: %d" % (tid)) print(" matrix:%s" % (data.I[tid][0])) print(" Ps ", data.P[tid]) print(" Os ", data.O[tid].tolist()) print(' Popt ', data.P[tid][np.argmin(data.O[tid])], 'Oopt ', min(data.O[tid])[0], 'nth ', np.argmin(data.O[tid]))
def main(): global ROOTDIR global nodes global cores global JOBID global nprocmax global nprocmin # Parse command line arguments args = parse_args() mmax = args.mmax nmax = args.nmax ntask = args.ntask nodes = args.nodes cores = args.cores nprocmin_pernode = args.nprocmin_pernode machine = args.machine nruns = args.nruns truns = args.truns JOBID = args.jobid TUNER_NAME = args.optimization os.environ['MACHINE_NAME'] = machine os.environ['TUNER_NAME'] = TUNER_NAME os.system( "mkdir -p scalapack-driver/bin/%s; cp ../build/pdqrdriver scalapack-driver/bin/%s/.;" % (machine, machine)) nprocmax = nodes * cores - 1 # YL: there is one proc doing spawning, so nodes*cores should be at least 2 nprocmin = min( nodes * nprocmin_pernode, nprocmax - 1 ) # YL: ensure strictly nprocmin<nprocmax, required by the Integer space mmin = 128 nmin = 128 m = Integer(mmin, mmax, transform="normalize", name="m") n = Integer(nmin, nmax, transform="normalize", name="n") b = Integer(4, 16, transform="normalize", name="b") nproc = Integer(nprocmin, nprocmax, transform="normalize", name="nproc") p = Integer(0, nprocmax, transform="normalize", name="p") r = Real(float("-Inf"), float("Inf"), name="r") IS = Space([m, n]) PS = Space([b, nproc, p]) OS = Space([r]) cst1 = "b*8 * p <= m" cst2 = "b*8 * nproc <= n * p" cst3 = "nproc >= p" constraints = {"cst1": cst1, "cst2": cst2, "cst3": cst3} print(IS, PS, OS, constraints) problem = TuningProblem(IS, PS, OS, objectives, constraints, models) # use performance models # problem = TuningProblem(IS, PS, OS, objectives, constraints, None) computer = Computer(nodes=nodes, cores=cores, hosts=None) """ Set and validate options """ options = Options() options['model_processes'] = 1 # options['model_threads'] = 1 options['model_restarts'] = 1 # options['search_multitask_processes'] = 1 # options['model_restart_processes'] = 1 # options['model_restart_threads'] = 1 options['distributed_memory_parallelism'] = False options['shared_memory_parallelism'] = False # options['mpi_comm'] = None options['model_class '] = 'Model_LCM' options['verbose'] = False options.validate(computer=computer) """ Intialize the tuner with existing data stored as last check point""" try: data = pickle.load( open( 'Data_nodes_%d_cores_%d_mmax_%d_nmax_%d_machine_%s_jobid_%d.pkl' % (nodes, cores, mmax, nmax, machine, JOBID), 'rb')) giventask = data.I except (OSError, IOError) as e: data = Data(problem) giventask = [[randint(mmin, mmax), randint(nmin, nmax)] for i in range(ntask)] # giventask = [[5000, 5000]] # # giventask = [[177, 1303],[367, 381],[1990, 1850],[1123, 1046],[200, 143],[788, 1133],[286, 1673],[1430, 512],[1419, 1320],[622, 263] ] # # the following will use only task lists stored in the pickle file # data = Data(problem) if (TUNER_NAME == 'GPTune'): gt = GPTune(problem, computer=computer, data=data, options=options, driverabspath=os.path.abspath(__file__)) """ Building MLA with NI random tasks """ NI = ntask NS = nruns (data, model, stats) = gt.MLA(NS=NS, Igiven=giventask, NI=NI, NS1=max(NS // 2, 1)) print("stats: ", stats) # """ Dump the data to file as a new check point """ # pickle.dump(data, open('Data_nodes_%d_cores_%d_mmax_%d_nmax_%d_machine_%s_jobid_%d.pkl' % (nodes, cores, mmax, nmax, machine, JOBID), 'wb')) # """ Dump the tuner to file for TLA use """ # pickle.dump(gt, open('MLA_nodes_%d_cores_%d_mmax_%d_nmax_%d_machine_%s_jobid_%d.pkl' % (nodes, cores, mmax, nmax, machine, JOBID), 'wb')) """ Print all input and parameter samples """ for tid in range(NI): print("tid: %d" % (tid)) print(" m:%d n:%d" % (data.I[tid][0], data.I[tid][1])) print(" Ps ", data.P[tid]) print(" Os ", data.O[tid].tolist()) print(' Popt ', data.P[tid][np.argmin(data.O[tid])], 'Oopt ', min(data.O[tid])[0], 'nth ', np.argmin(data.O[tid])) if (TUNER_NAME == 'opentuner'): NI = ntask NS = nruns (data, stats) = OpenTuner(T=giventask, NS=NS, tp=problem, computer=computer, run_id="OpenTuner", niter=1, technique=None) print("stats: ", stats) """ Print all input and parameter samples """ for tid in range(NI): print("tid: %d" % (tid)) print(" m:%d n:%d" % (data.I[tid][0], data.I[tid][1])) print(" Ps ", data.P[tid]) print(" Os ", data.O[tid].tolist()) print(' Popt ', data.P[tid][np.argmin(data.O[tid])], 'Oopt ', min(data.O[tid])[0], 'nth ', np.argmin(data.O[tid])) if (TUNER_NAME == 'hpbandster'): NI = ntask NS = nruns (data, stats) = HpBandSter(T=giventask, NS=NS, tp=problem, computer=computer, run_id="HpBandSter", niter=1) print("stats: ", stats) """ Print all input and parameter samples """ for tid in range(NI): print("tid: %d" % (tid)) print(" m:%d n:%d" % (data.I[tid][0], data.I[tid][1])) print(" Ps ", data.P[tid]) print(" Os ", data.O[tid].tolist()) print(' Popt ', data.P[tid][np.argmin(data.O[tid])], 'Oopt ', min(data.O[tid])[0], 'nth ', np.argmin(data.O[tid]))
def main(): # Parse command line arguments args = parse_args() # Extract arguments ntask = args.ntask npernode = args.npernode optimization = args.optimization nrun = args.nrun print("NPERNODE: ", npernode) dataset = args.dataset TUNER_NAME = args.optimization (machine, processor, nodes, cores) = GetMachineConfiguration() print("machine: " + machine + " processor: " + processor + " num_nodes: " + str(nodes) + " num_cores: " + str(cores)) os.environ['MACHINE_NAME'] = machine os.environ['TUNER_NAME'] = TUNER_NAME #datafiles = ["data/susy_10Kn"] datafiles = [dataset] # Task input parameters datafile = Categoricalnorm(datafiles, transform="onehot", name="datafile") # Tuning parameters (model related) h = Real(-10, 10, transform="normalize", name="h") Lambda = Real(-10, 10, transform="normalize", name="Lambda") # Tuning parameters (randomized algorithm related) p = Integer(1, 50, transform="normalize", name="p") # oversampling parameter vann = Integer(1, 256, transform="normalize", name="vann") # number of approximate nearest neighbors epow = Integer(-7, -1, transform="normalize", name="epow") # relative compression tolerance #e = Categoricalnorm(["1e-7","1e-6","1e-5","1e-4","1e-3","1e-2","1e-1"], transform="onehot", name="e") # relative compression tolerance # npernode = Integer(int(math.log2(nprocmin_pernode)), int(math.log2(cores)), transform="normalize", name="npernode") error = Real(0, float("Inf"), name="error") training_time = Real(0, float("Inf"), name="training_time") IS = Space([datafile]) PS = Space([h, Lambda, p, vann, epow]) OS = Space([error, training_time]) constraints = {} models = {} constants = {"nodes": nodes, "cores": cores, "npernode": npernode} """ Print all input and parameter samples """ print(IS, PS, OS, constraints, models) problem = TuningProblem(IS, PS, OS, objectives, constraints, constants=constants) computer = Computer(nodes=nodes, cores=cores, hosts=None) """ Set and validate options """ options = Options() options['model_processes'] = 1 # options['model_threads'] = 1 options['model_restarts'] = 1 # options['search_multitask_processes'] = 1 # options['model_restart_processes'] = 1 options['distributed_memory_parallelism'] = False options['shared_memory_parallelism'] = False options['model_class'] = 'Model_LCM' # 'Model_GPy_LCM' options['verbose'] = False # MO options['search_algo'] = 'nsga2' #'maco' #'moead' #'nsga2' #'nspso' options['search_pop_size'] = 1000 options['search_gen'] = 10 options['search_more_samples'] = 4 options.validate(computer=computer) # """ Building MLA with the given list of tasks """ giventask = [[dataset]] data = Data(problem) if (TUNER_NAME == 'GPTune'): gt = GPTune(problem, computer=computer, data=data, options=options, driverabspath=os.path.abspath(__file__)) NI = len(giventask) NS = nrun (data, model, stats) = gt.MLA(NS=NS, NI=NI, Igiven=giventask, NS1=max(NS // 2, 1)) print("stats: ", stats) """ Print all input and parameter samples """ for tid in range(NI): print("tid: %d" % (tid)) print(" matrix:%s" % (data.I[tid][0])) print(" Ps ", data.P[tid]) print(" Os ", data.O[tid]) #print(' Popt ', data.P[tid][np.argmin(data.O[tid])], 'Oopt ', -min(data.O[tid])[0], 'nth ', np.argmin(data.O[tid])) """ Print all input and parameter samples """ import pygmo as pg for tid in range(NI): print("tid: %d" % (tid)) print(" matrix:%s" % (data.I[tid][0])) print(" Ps ", data.P[tid]) print(" Os ", data.O[tid].tolist()) ndf, dl, dc, ndr = pg.fast_non_dominated_sorting(data.O[tid]) front = ndf[0] # print('front id: ',front) fopts = data.O[tid][front] xopts = [data.P[tid][i] for i in front] print(' Popts ', xopts) print(' Oopts ', fopts.tolist()) if (TUNER_NAME == 'opentuner'): NI = ntask NS = nrun (data, stats) = OpenTuner(T=giventask, NS=NS, tp=problem, computer=computer, run_id="OpenTuner", niter=1, technique=None) print("stats: ", stats) """ Print all input and parameter samples """ for tid in range(NI): print("tid: %d" % (tid)) print(" matrix:%s" % (data.I[tid][0])) print(" Ps ", data.P[tid]) print(" Os ", data.O[tid]) print(' Popt ', data.P[tid][np.argmin(data.O[tid])], 'Oopt ', -min(data.O[tid])[0], 'nth ', np.argmin(data.O[tid])) if (TUNER_NAME == 'hpbandster'): NI = ntask NS = nrun (data, stats) = HpBandSter(T=giventask, NS=NS, tp=problem, computer=computer, run_id="HpBandSter", niter=1) print("stats: ", stats) """ Print all input and parameter samples """ for tid in range(NI): print("tid: %d" % (tid)) print(" matrix:%s" % (data.I[tid][0])) print(" Ps ", data.P[tid]) print(" Os ", data.O[tid]) print(' Popt ', data.P[tid][np.argmin(data.O[tid])], 'Oopt ', -min(data.O[tid])[0], 'nth ', np.argmin(data.O[tid])) if (TUNER_NAME == 'Random'): gt = GPTune(problem, computer=computer, data=data, options=options, driverabspath=os.path.abspath(__file__)) NI = len(giventask) NS = nrun (data, model, stats) = gt.MLA(NS=NS, NI=NI, Igiven=giventask, NS1=NS) print("stats: ", stats) """ Print all input and parameter samples """ for tid in range(NI): print("tid: %d" % (tid)) print(" matrix:%s" % (data.I[tid][0])) print(" Ps ", data.P[tid]) print(" Os ", data.O[tid]) #print(' Popt ', data.P[tid][np.argmin(data.O[tid])], 'Oopt ', -min(data.O[tid])[0], 'nth ', np.argmin(data.O[tid])) """ Print all input and parameter samples """ import pygmo as pg for tid in range(NI): print("tid: %d" % (tid)) print(" matrix:%s" % (data.I[tid][0])) print(" Ps ", data.P[tid]) print(" Os ", data.O[tid].tolist()) ndf, dl, dc, ndr = pg.fast_non_dominated_sorting(data.O[tid]) front = ndf[0] # print('front id: ',front) fopts = data.O[tid][front] xopts = [data.P[tid][i] for i in front] print(' Popts ', xopts) print(' Oopts ', fopts.tolist())
def main(): global JOBID # Parse command line arguments args = parse_args() mmax = args.mmax nmax = args.nmax ntask = args.ntask nprocmin_pernode = args.nprocmin_pernode nrun = args.nrun truns = args.truns tla = args.tla JOBID = args.jobid TUNER_NAME = args.optimization (machine, processor, nodes, cores) = GetMachineConfiguration() print("machine: " + machine + " processor: " + processor + " num_nodes: " + str(nodes) + " num_cores: " + str(cores)) os.environ['MACHINE_NAME'] = machine os.environ['TUNER_NAME'] = TUNER_NAME os.system("mkdir -p scalapack-driver/bin/%s;" % (machine)) DRIVERFOUND = False INSTALLDIR = os.getenv('GPTUNE_INSTALL_PATH') DRIVER = os.path.abspath(__file__ + "/../../../build/pdqrdriver") if (os.path.exists(DRIVER)): DRIVERFOUND = True elif (INSTALLDIR is not None): DRIVER = INSTALLDIR + "/gptune/pdqrdriver" if (os.path.exists(DRIVER)): DRIVERFOUND = True else: for p in sys.path: if ("gptune" in p): DRIVER = p + "/pdqrdriver" if (os.path.exists(DRIVER)): DRIVERFOUND = True break if (DRIVERFOUND == True): os.system("cp %s scalapack-driver/bin/%s/.;" % (DRIVER, machine)) else: raise Exception( f"pdqrdriver cannot be located. Try to set env variable GPTUNE_INSTALL_PATH correctly." ) nprocmax = nodes * cores bunit = 8 # the block size is multiple of bunit mmin = 128 nmin = 128 m = Integer(mmin, mmax, transform="normalize", name="m") n = Integer(nmin, nmax, transform="normalize", name="n") mb = Integer(1, 16, transform="normalize", name="mb") nb = Integer(1, 16, transform="normalize", name="nb") npernode = Integer(int(math.log2(nprocmin_pernode)), int(math.log2(cores)), transform="normalize", name="npernode") p = Integer(1, nprocmax, transform="normalize", name="p") r = Real(float("-Inf"), float("Inf"), name="r") IS = Space([m, n]) PS = Space([mb, nb, npernode, p]) OS = Space([r]) constraints = {"cst1": cst1, "cst2": cst2, "cst3": cst3} constants = {"nodes": nodes, "cores": cores, "bunit": bunit} print(IS, PS, OS, constraints) problem = TuningProblem(IS, PS, OS, objectives, constraints, None, constants=constants) computer = Computer(nodes=nodes, cores=cores, hosts=None) """ Set and validate options """ options = Options() options['model_processes'] = 1 # options['model_threads'] = 1 options['model_restarts'] = 1 # options['search_multitask_processes'] = 1 # options['model_restart_processes'] = 1 # options['model_restart_threads'] = 1 options['distributed_memory_parallelism'] = False options['shared_memory_parallelism'] = False # options['mpi_comm'] = None options['model_class'] = 'Model_LCM' options['verbose'] = False options.validate(computer=computer) seed(1) if ntask == 1: giventask = [[mmax, nmax]] elif ntask == 2: giventask = [[mmax, nmax], [int(mmax / 2), int(nmax / 2)]] else: giventask = [[randint(mmin, mmax), randint(nmin, nmax)] for i in range(ntask)] # # giventask = [[2000, 2000]] # giventask = [[177, 1303],[367, 381],[1990, 1850],[1123, 1046],[200, 143],[788, 1133],[286, 1673],[1430, 512],[1419, 1320],[622, 263] ] # giventask = [[177, 1303],[367, 381]] ntask = len(giventask) data = Data(problem) if (TUNER_NAME == 'GPTune'): gt = GPTune(problem, computer=computer, data=data, options=options, driverabspath=os.path.abspath(__file__)) """ Building MLA with the given list of tasks """ NI = len(giventask) NS = nrun (data, model, stats) = gt.MLA(NS=NS, Igiven=giventask, NI=NI, NS1=max(NS // 2, 1)) #(data, model, stats) = gt.MLA_LoadModel(NS=10, Igiven=giventask) print("stats: ", stats) """ Print all input and parameter samples """ for tid in range(NI): print("tid: %d" % (tid)) print(" m:%d n:%d" % (data.I[tid][0], data.I[tid][1])) print(" Ps ", data.P[tid]) print(" Os ", data.O[tid].tolist()) print(' Popt ', data.P[tid][np.argmin(data.O[tid])], 'Oopt ', min(data.O[tid])[0], 'nth ', np.argmin(data.O[tid])) if (tla == 1): """ Call TLA for 2 new tasks using the constructed LCM model""" newtask = [[400, 500], [800, 600]] (aprxopts, objval, stats) = gt.TLA1(newtask, NS=None) print("stats: ", stats) """ Print the optimal parameters and function evaluations""" for tid in range(len(newtask)): print("new task: %s" % (newtask[tid])) print(' predicted Popt: ', aprxopts[tid], ' objval: ', objval[tid]) if (TUNER_NAME == 'opentuner'): NI = len(giventask) NS = nrun (data, stats) = OpenTuner(T=giventask, NS=NS, tp=problem, computer=computer, run_id="OpenTuner", niter=1, technique=None) print("stats: ", stats) """ Print all input and parameter samples """ for tid in range(NI): print("tid: %d" % (tid)) print(" m:%d n:%d" % (data.I[tid][0], data.I[tid][1])) print(" Ps ", data.P[tid]) print(" Os ", data.O[tid].tolist()) print(' Popt ', data.P[tid][np.argmin(data.O[tid])], 'Oopt ', min(data.O[tid])[0], 'nth ', np.argmin(data.O[tid])) if (TUNER_NAME == 'hpbandster'): NI = len(giventask) NS = nrun (data, stats) = HpBandSter(T=giventask, NS=NS, tp=problem, computer=computer, run_id="HpBandSter", niter=1) print("stats: ", stats) """ Print all input and parameter samples """ for tid in range(NI): print("tid: %d" % (tid)) print(" m:%d n:%d" % (data.I[tid][0], data.I[tid][1])) print(" Ps ", data.P[tid]) print(" Os ", data.O[tid].tolist()) print(' Popt ', data.P[tid][np.argmin(data.O[tid])], 'Oopt ', min(data.O[tid])[0], 'nth ', np.argmin(data.O[tid]))
def main(): # Parse command line arguments args = parse_args() # Extract arguments ntask = args.ntask nthreads = args.nthreads optimization = args.optimization nrun = args.nrun TUNER_NAME = args.optimization (machine, processor, nodes, cores) = GetMachineConfiguration() print ("machine: " + machine + " processor: " + processor + " num_nodes: " + str(nodes) + " num_cores: " + str(cores)) os.environ['MACHINE_NAME'] = machine os.environ['TUNER_NAME'] = TUNER_NAME # Task parameters geomodels = ["cavity_5cell_30K_feko","pillbox_4000","pillbox_1000","cavity_wakefield_4K_feko","cavity_rec_5K_feko","cavity_rec_17K_feko"] # geomodels = ["cavity_wakefield_4K_feko"] model = Categoricalnorm (geomodels, transform="onehot", name="model") # Input parameters # the frequency resolution is 100Khz # freq = Integer (22000, 23500, transform="normalize", name="freq") # freq = Integer (6320, 6430, transform="normalize", name="freq") # freq = Integer (21000, 22800, transform="normalize", name="freq") freq = Integer (11400, 12000, transform="normalize", name="freq") # freq = Integer (500, 900, transform="normalize", name="freq") result1 = Real (float("-Inf") , float("Inf"),name="r1") IS = Space([model]) PS = Space([freq]) OS = Space([result1]) constraints = {} models = {} constants={"nodes":nodes,"cores":cores,"nthreads":nthreads} """ Print all input and parameter samples """ print(IS, PS, OS, constraints, models) problem = TuningProblem(IS, PS, OS, objectives, constraints, None, constants=constants) computer = Computer(nodes = nodes, cores = cores, hosts = None) """ Set and validate options """ options = Options() options['model_processes'] = 1 # options['model_threads'] = 1 options['model_restarts'] = 1 # options['search_multitask_processes'] = 1 # options['model_restart_processes'] = 1 options['distributed_memory_parallelism'] = False options['shared_memory_parallelism'] = False options['model_class '] = 'Model_LCM' # 'Model_GPy_LCM' options['verbose'] = False # options['search_algo'] = 'nsga2' #'maco' #'moead' #'nsga2' #'nspso' # options['search_pop_size'] = 1000 # 1000 # options['search_gen'] = 10 options.validate(computer = computer) # """ Building MLA with the given list of tasks """ # giventask = [["pillbox_4000"]] giventask = [["pillbox_1000"]] # giventask = [["cavity_5cell_30K_feko"]] # giventask = [["cavity_rec_17K_feko"]] # giventask = [["cavity_wakefield_4K_feko"]] data = Data(problem) if(TUNER_NAME=='GPTune'): gt = GPTune(problem, computer=computer, data=data, options=options, driverabspath=os.path.abspath(__file__)) NI = len(giventask) NS = nrun (data, model, stats) = gt.MLA(NS=NS, NI=NI, Igiven=giventask, NS1=max(NS//2, 1)) print("stats: ", stats) """ Print all input and parameter samples """ for tid in range(NI): print("tid: %d"%(tid)) print(" model:%s"%(data.I[tid][0])) print(" Ps ", data.P[tid]) OL=np.asarray([o[0] for o in data.O[tid]], dtype=np.float64) np.set_printoptions(suppress=False,precision=8) print(" Os ", OL) print(' Popt ', data.P[tid][np.argmin(data.O[tid])], 'Oopt ', min(data.O[tid])[0], 'nth ', np.argmin(data.O[tid])) # ndf, dl, dc, ndr = pg.fast_non_dominated_sorting(data.O[tid]) # front = ndf[0] # # print('front id: ',front) # fopts = data.O[tid][front] # xopts = [data.P[tid][i] for i in front] # print(' Popts ', xopts) # print(' Oopts ', fopts) if(TUNER_NAME=='opentuner'): NI = ntask NS = nrun (data,stats) = OpenTuner(T=giventask, NS=NS, tp=problem, computer=computer, run_id="OpenTuner", niter=1, technique=None) print("stats: ", stats) """ Print all input and parameter samples """ for tid in range(NI): print("tid: %d"%(tid)) print(" matrix:%s"%(data.I[tid][0])) print(" Ps ", data.P[tid]) print(" Os ", data.O[tid]) print(' Popt ', data.P[tid][np.argmin(data.O[tid])], 'Oopt ', min(data.O[tid])[0], 'nth ', np.argmin(data.O[tid])) if(TUNER_NAME=='hpbandster'): NI = ntask NS = nrun (data,stats)=HpBandSter(T=giventask, NS=NS, tp=problem, computer=computer, run_id="HpBandSter", niter=1) print("stats: ", stats) """ Print all input and parameter samples """ for tid in range(NI): print("tid: %d"%(tid)) print(" matrix:%s"%(data.I[tid][0])) print(" Ps ", data.P[tid]) print(" Os ", data.O[tid]) print(' Popt ', data.P[tid][np.argmin(data.O[tid])], 'Oopt ', min(data.O[tid])[0], 'nth ', np.argmin(data.O[tid]))
def main(): global nodes global cores global bunit global JOBID # Parse command line arguments args = parse_args() mmax = args.mmax nmax = args.nmax ntask = args.ntask nprocmin_pernode = args.nprocmin_pernode nrun = args.nrun nrun1 = args.nrun1 if(nrun1 is None): nrun1=max(nrun//2, 1) truns = args.truns JOBID = args.jobid TUNER_NAME = args.optimization perfmodel = args.perfmodel (machine, processor, nodes, cores) = GetMachineConfiguration() print ("machine: " + machine + " processor: " + processor + " num_nodes: " + str(nodes) + " num_cores: " + str(cores)) os.environ['MACHINE_NAME'] = machine os.environ['TUNER_NAME'] = TUNER_NAME os.system("mkdir -p scalapack-driver/bin/%s;" %(machine)) DRIVERFOUND=False INSTALLDIR=os.getenv('GPTUNE_INSTALL_PATH') DRIVER = os.path.abspath(__file__ + "/../../../build/pdqrdriver") if(os.path.exists(DRIVER)): DRIVERFOUND=True elif(INSTALLDIR is not None): DRIVER = INSTALLDIR+"/gptune/pdqrdriver" if(os.path.exists(DRIVER)): DRIVERFOUND=True else: for p in sys.path: if("gptune" in p): DRIVER=p+"/pdqrdriver" if(os.path.exists(DRIVER)): DRIVERFOUND=True break if(DRIVERFOUND == True): os.system("cp %s scalapack-driver/bin/%s/.;" %(DRIVER,machine)) else: raise Exception(f"pdqrdriver cannot be located. Try to set env variable GPTUNE_INSTALL_PATH correctly.") nprocmax = nodes*cores bunit=8 # the block size is multiple of bunit mmin=1280 nmin=1280 m = Integer(mmin, mmax, transform="normalize", name="m") n = Integer(nmin, nmax, transform="normalize", name="n") b = Integer(4, 16, transform="normalize", name="b") npernode = Integer (int(math.log2(nprocmin_pernode)), int(math.log2(cores)), transform="normalize", name="npernode") p = Integer(1, nprocmax, transform="normalize", name="p") r = Real(float("-Inf"), float("Inf"), name="r") IS = Space([m, n]) PS = Space([b, npernode, p]) OS = Space([r]) constraints = {"cst1": cst1, "cst2": cst2, "cst3": cst3} constants={"nodes":nodes,"cores":cores,"bunit":bunit,"perfmodel":perfmodel} print(IS, PS, OS, constraints) if(perfmodel==1): problem = TuningProblem(IS, PS, OS, objectives, constraints, models, constants=constants) # use performance models else: problem = TuningProblem(IS, PS, OS, objectives, constraints, None, constants=constants) computer = Computer(nodes=nodes, cores=cores, hosts=None) """ Set and validate options """ options = Options() options['model_processes'] = 1 # options['model_threads'] = 1 options['model_restarts'] = 1 # options['search_multitask_processes'] = 1 # options['model_restart_processes'] = 1 # options['model_restart_threads'] = 1 options['distributed_memory_parallelism'] = False options['shared_memory_parallelism'] = False # options['mpi_comm'] = None options['model_class'] = 'Model_LCM' options['verbose'] = False options.validate(computer=computer) seed(1) if ntask == 1: giventask = [[mmax,nmax]] else: giventask = [[randint(mmin,mmax),randint(nmin,nmax)] for i in range(ntask)] ntask=len(giventask) data = Data(problem,D=[{'bunit':bunit,'nodes':nodes, 'c0': 0, 'c1': 0,'c2': 0,'c3': 0,'c4': 0}]*ntask) # # giventask = [[177, 1303],[367, 381],[1990, 1850],[1123, 1046],[200, 143],[788, 1133],[286, 1673],[1430, 512],[1419, 1320],[622, 263] ] # # the following will use only task lists stored in the pickle file # data = Data(problem,D=[{'c0': 0, 'c1': 0,'c2': 0,'c3': 0,'c4': 0}]*len(giventask)) if(TUNER_NAME=='GPTune'): if(perfmodel==1): gt = GPTune(problem, computer=computer, data=data, options=options,driverabspath=os.path.abspath(__file__),models_update=models_update) else: gt = GPTune(problem, computer=computer, data=data, options=options,driverabspath=os.path.abspath(__file__),models_update=None) """ Building MLA with NI random tasks """ NI = ntask NS = nrun NS1 = nrun1 (data, model, stats) = gt.MLA(NS=NS, Igiven=giventask, NI=NI, NS1=NS1) print("stats: ", stats) """ Print all input and parameter samples """ for tid in range(NI): print("tid: %d" % (tid)) print(" m:%d n:%d" % (data.I[tid][0], data.I[tid][1])) print(" Ps ", data.P[tid]) print(" Os ", data.O[tid].tolist()) print(' Popt ', data.P[tid][np.argmin(data.O[tid])], 'Oopt ', min(data.O[tid])[0], 'nth ', np.argmin(data.O[tid])) if(TUNER_NAME=='opentuner'): NI = ntask NS = nrun (data,stats)=OpenTuner(T=giventask, NS=NS, tp=problem, computer=computer, run_id="OpenTuner", niter=1, technique=None) print("stats: ", stats) """ Print all input and parameter samples """ for tid in range(NI): print("tid: %d" % (tid)) print(" m:%d n:%d" % (data.I[tid][0], data.I[tid][1])) print(" Ps ", data.P[tid]) print(" Os ", data.O[tid].tolist()) print(' Popt ', data.P[tid][np.argmin(data.O[tid])], 'Oopt ', min(data.O[tid])[0], 'nth ', np.argmin(data.O[tid])) if(TUNER_NAME=='hpbandster'): NI = ntask NS = nrun (data,stats)=HpBandSter(T=giventask, NS=NS, tp=problem, computer=computer, run_id="HpBandSter", niter=1) print("stats: ", stats) """ Print all input and parameter samples """ for tid in range(NI): print("tid: %d" % (tid)) print(" m:%d n:%d" % (data.I[tid][0], data.I[tid][1])) print(" Ps ", data.P[tid]) print(" Os ", data.O[tid].tolist()) print(' Popt ', data.P[tid][np.argmin(data.O[tid])], 'Oopt ', min(data.O[tid])[0], 'nth ', np.argmin(data.O[tid]))
def main(): global nodes global cores # Parse command line arguments args = parse_args() ntask = args.ntask perfmodel = args.perfmodel plot = args.plot nrep = args.nrep (machine, processor, nodes, cores) = GetMachineConfiguration() print("machine: " + machine + " processor: " + processor + " num_nodes: " + str(nodes) + " num_cores: " + str(cores)) os.environ['MACHINE_NAME'] = machine input_space = Space([Real(0., 10., transform="normalize", name="t")]) parameter_space = Space([Real(0., 1., transform="normalize", name="x")]) # input_space = Space([Real(0., 0.0001, "uniform", "normalize", name="t")]) # parameter_space = Space([Real(-1., 1., "uniform", "normalize", name="x")]) output_space = Space([Real(float('-Inf'), float('Inf'), name="y")]) constraints = {"cst1": "x >= 0. and x <= 1."} if (perfmodel == 1): problem = TuningProblem(input_space, parameter_space, output_space, objectives, constraints, models) # with performance model else: problem = TuningProblem(input_space, parameter_space, output_space, objectives, constraints, None) # no performance model computer = Computer(nodes=nodes, cores=cores, hosts=None) options = Options() options['model_restarts'] = 1 options['distributed_memory_parallelism'] = False options['shared_memory_parallelism'] = False options['objective_evaluation_parallelism'] = False options['objective_multisample_threads'] = 1 options['objective_multisample_processes'] = 1 options['objective_nprocmax'] = 1 options['model_processes'] = 1 # options['model_threads'] = 1 # options['model_restart_processes'] = 1 # options['search_multitask_processes'] = 1 # options['search_multitask_threads'] = 1 # options['search_threads'] = 16 # options['mpi_comm'] = None #options['mpi_comm'] = mpi4py.MPI.COMM_WORLD options['model_class'] = 'Model_LCM' #'Model_GPy_LCM' options['verbose'] = False # options['sample_algo'] = 'MCS' # options['sample_class'] = 'SampleLHSMDU' options.validate(computer=computer) allavrs = [] allmaxs = [] allmins = [] times = [] # os.environ['TUNER_NAME'] = 'hpbandster' #'hpbandster' giventask = [[6]] # giventask = [[i] for i in np.arange(0, 10, 0.5).tolist()] NI = len(giventask) # NS=80 NREP = nrep NSS = [10, 20, 40] for TUNER_NAME in ['GPTune', 'hpbandster', 'opentuner']: t1 = time.time_ns() mins = np.zeros(len(NSS)) maxs = np.zeros(len(NSS)) avr = np.zeros(len(NSS)) for ss in range(len(NSS)): NS = NSS[ss] opts = np.zeros(NREP) for ii in range(NREP): if (TUNER_NAME == 'GPTune'): os.system("rm -rf ./gptune.db/*.json" ) ## YL: do not reuse database if NREP>1 data = Data(problem) gt = GPTune(problem, computer=computer, data=data, options=options, driverabspath=os.path.abspath(__file__)) (data, modeler, stats) = gt.MLA(NS=NS, Igiven=giventask, NI=NI, NS1=int(NS / 2)) # (data, modeler, stats) = gt.MLA(NS=NS, Igiven=giventask, NI=NI, NS1=NS-1) print("stats: ", stats) """ Print all input and parameter samples """ for tid in range(NI): print("tid: %d" % (tid)) print(" t:%f " % (data.I[tid][0])) print(" Ps ", data.P[tid]) print(" Os ", data.O[tid].tolist()) print(' Popt ', data.P[tid][np.argmin(data.O[tid])], 'Oopt ', min(data.O[tid])[0], 'nth ', np.argmin(data.O[tid])) if (TUNER_NAME == 'opentuner'): (data, stats) = OpenTuner(T=giventask, NS=NS, tp=problem, computer=computer, run_id="OpenTuner", niter=1, technique=None) print("stats: ", stats) """ Print all input and parameter samples """ for tid in range(NI): print("tid: %d" % (tid)) print(" t:%f " % (data.I[tid][0])) print(" Ps ", data.P[tid]) print(" Os ", data.O[tid].tolist()) print(' Popt ', data.P[tid][np.argmin(data.O[tid])], 'Oopt ', min(data.O[tid])[0], 'nth ', np.argmin(data.O[tid])) if (TUNER_NAME == 'hpbandster'): (data, stats) = HpBandSter(T=giventask, NS=NS, tp=problem, computer=computer, run_id="HpBandSter", niter=1) print("stats: ", stats) """ Print all input and parameter samples """ for tid in range(NI): print("tid: %d" % (tid)) print(" t:%f " % (data.I[tid][0])) print(" Ps ", data.P[tid]) print(" Os ", data.O[tid].tolist()) print(' Popt ', data.P[tid][np.argmin(data.O[tid])], 'Oopt ', min(data.O[tid])[0], 'nth ', np.argmin(data.O[tid])) opts[ii] = min(data.O[0])[0] avr[ss] = np.average(opts) maxs[ss] = np.max(opts) mins[ss] = np.min(opts) allavrs.append(avr) allmaxs.append(maxs) allmins.append(mins) t2 = time.time_ns() times.append((t2 - t1) / 1e9) print(allavrs) print(allmaxs) print(allmins) print(times) #### t=1 # allavrs=[array([0.8927, 0.8198, 0.7856, 0.768 , 0.7442]), array([0.8636, 0.866 , 0.7654, 0.7648, 0.7586]), array([0.8423, 0.7759, 0.7606, 0.7498, 0.741 ])] # allmaxs=[array([0.9681, 0.959 , 0.8696, 0.8151, 0.7681]), array([0.9569, 0.9592, 0.8267, 0.8083, 0.8147]), array([0.9428, 0.8278, 0.8411, 0.7722, 0.7531])] # allmins=[array([0.7688, 0.7356, 0.7411, 0.7359, 0.7355]), array([0.7681, 0.7354, 0.7356, 0.7355, 0.7354]), array([0.7694, 0.7383, 0.7356, 0.7354, 0.7354])] #### t=3 # allavrs=[array([0.9407, 0.9352, 0.8292, 0.7633, 0.7069]), array([0.8918, 0.9355, 0.8386, 0.7209, 0.6713]), array([0.8748, 0.8151, 0.7458, 0.6917, 0.6436])] # allmaxs=[array([0.9999, 0.9943, 0.9761, 0.8472, 0.7827]), array([0.9974, 0.9985, 0.972 , 0.8739, 0.7477]), array([1. , 0.9774, 0.9771, 0.9705, 0.7227])] # allmins=[array([0.7123, 0.7474, 0.626 , 0.6202, 0.6101]), array([0.7045, 0.6145, 0.647 , 0.6101, 0.6101]), array([0.6307, 0.6203, 0.6102, 0.6102, 0.6101])] #### t=4 # allavrs=[array([0.9093, 0.9388, 0.8455, 0.7118, 0.6694]), array([0.8675, 0.7928, 0.7793, 0.7134, 0.7454]), array([0.8656, 0.8245, 0.707 , 0.685 , 0.6541])] # allmaxs=[array([1. , 1. , 0.9271, 0.8848, 0.7925]), array([1. , 0.9927, 0.9877, 0.9964, 0.9606]), array([0.9964, 0.9995, 0.9905, 0.9834, 0.7493])] # allmins=[array([0.6321, 0.7726, 0.6383, 0.6006, 0.5905]), array([0.64 , 0.6002, 0.6539, 0.5902, 0.5909]), array([0.6404, 0.6015, 0.5935, 0.5902, 0.5902])] #### t=6 # allavrs=[array([0.9641, 0.9199, 0.7434, 0.7368, 0.696 ]), array([0.9214, 0.8602, 0.8214, 0.7431, 0.6681]), array([0.9394, 0.8431, 0.8218, 0.6557, 0.6129])] # allmaxs=[array([1. , 0.9998, 0.9344, 0.9853, 0.771 ]), array([1. , 1. , 0.9994, 0.9883, 0.8733]), array([1. , 0.9999, 0.9896, 0.7418, 0.6798])] # allmins=[array([0.7417, 0.6782, 0.627 , 0.5558, 0.6149]), array([0.6552, 0.6425, 0.5887, 0.5657, 0.5109]), array([0.5947, 0.6055, 0.7051, 0.5272, 0.5109])] if (plot == 1): fontsize = 24 fig = plt.figure(figsize=[12.8, 9.6]) plt.rcParams.update({'font.size': fontsize}) plt.errorbar(np.array(NSS) - 0.3, allavrs[0], yerr=[allavrs[0] - allmins[0], allmaxs[0] - allavrs[0]], capsize=10, elinewidth=2, markeredgewidth=5, fmt='o', label='GPTune') plt.errorbar(np.array(NSS), allavrs[1], yerr=[allavrs[1] - allmins[1], allmaxs[1] - allavrs[1]], capsize=10, elinewidth=2, markeredgewidth=5, fmt='o', label='hpbandster') plt.errorbar(np.array(NSS) + 0.3, allavrs[2], yerr=[allavrs[2] - allmins[2], allmaxs[2] - allavrs[2]], capsize=10, elinewidth=2, markeredgewidth=5, fmt='o', label='opentuner') plt.plot([NSS[0] - 5, NSS[-1] + 5], [0.510885, 0.510885], c='black', linestyle=':') # t=6 # plt.plot([NSS[0]-5,NSS[-1]+5], [0.735, 0.735], c='black', linestyle=':') # t=1 # plt.plot([NSS[0]-5,NSS[-1]+5], [0.61012, 0.61012], c='black', linestyle=':') # t=3 # plt.plot([NSS[0]-5,NSS[-1]+5], [0.59020, 0.59020], c='black', linestyle=':') # t=4 plt.xlabel('NS', fontsize=fontsize + 2) plt.ylabel('f_min', fontsize=fontsize + 2) plt.legend(loc='upper right') plt.show(block=False) plt.pause(0.5) input("Press [enter] to continue.") fig.savefig('fmins_t%d.eps' % int(giventask[0][0]))