def dispatch(ranges, YoverZ, N, logs, directory, light=0, remove=0, skip=0, parallel=r"$OMP_NUM_THREADS", nice=0, memory=0, cpu=0): shift = ranges[:,0] scale = np.array([(b-a) for a,b in ranges]) init_conds = [] for i in range(skip, N+skip): vals = shift+np.array(i4_sobol(len(ranges), i)[0])*scale for j, val in enumerate(vals): if logs[j]: vals[j] = 10**val #init_conds += [[tmp for tmp in vals]] #if vals[0] >= diffusion_cutoff: vals[-1] = 0 # if M>1.5 then D=0 M = vals[0] Z = vals[1] Y = 0.2463 + YoverZ*vals[1] vals = [M, Y, Z] print(np.log10( Z / (1-Y-Z) / 0.02293 )) bash_cmd = "maybe_sub.sh %s%s%s-p %d ./dispatch.sh -d %s "\ "-M %.6f -Y %.6f -Z %.6f %s%s"%\ tuple(["-n " if nice else ""] + ["-m %d "%memory if memory>0 else ""] + ["-c %s "%cpu if cpu!=0 else ""] + [parallel, directory] + [val for val in vals] + ["-L " if light else ""] + ["-r " if remove else ""]) print(bash_cmd) #exit() process = subprocess.Popen(bash_cmd.split(), shell=False) process.wait()
def dispatch(ranges, N, logs, threshold, directory, light=0, remove=0, skip=0, parallel=r"$OMP_NUM_THREADS", nice=0, memory=0): shift = ranges[:,0] scale = np.array([(b-a) for a,b in ranges]) init_conds = [] for i in range(skip, N+skip): vals = shift+np.array(i4_sobol(len(ranges), i)[0])*scale for j, val in enumerate(vals): if logs[j]: vals[j] = 10**val init_conds += [[tmp for tmp in vals]] for j, val in enumerate(vals): if vals[j] <= threshold[j] or np.isnan(vals[j]): vals[j] = 0 bash_cmd = "maybe_sub.sh %s%s-p %d ./dispatch.sh -d %s "\ "-M %.6f -Y %.6f -Z %.6f -a %.6f -o %.6f -D %.6f %s%s"%\ tuple(["-n " if nice else ""] + ["-m %d "%memory if memory>0 else "" ] + [parallel, directory] + [val for val in vals] + ["-L " if light else ""] + ["-r " if remove else ""]) print(bash_cmd) #exit() subprocess.Popen(bash_cmd.split(), shell=False) sleep(0.01) np.savetxt('initial_conditions.dat', np.array(init_conds))
def coefs_QMCSobol_wrapper(nb_samples, a_nu): # note that a lot of calculations will be repeated by doing this. We need to be smarter! int_val = 0 seed = 0 dim = 6 for one_sample in xrange(0,nb_samples): [sample, new_seed] = i4_sobol(6, seed) int_val = int_val + ctaa(sample, 1.0,1.0/6,5.)*mvc(sample,a_nu) seed = new_seed return int_val/nb_samples
def dispatch(ranges, tracks, points, logs, threshold, directory, light=0, remove=0, skip=0, parallel=r"$OMP_NUM_THREADS", nice=0, image=0, mainseq=0, subgiant=0, taper=0, chem_ev=0, rotk=0, couple=0): shift = ranges[:,0] scale = np.array([(b-a) for a,b in ranges]) init_conds = [] for i in range(skip, tracks+skip): vals = shift+np.array(i4_sobol(len(ranges), i)[0])*scale for j, val in enumerate(vals): if logs[j]: vals[j] = 10**val if not np.isnan(val) else 0 init_conds += [[tmp for tmp in vals]] for j, val in enumerate(vals): if vals[j] <= threshold[j] or np.isnan(vals[j]): vals[j] = 0 if chem_ev: vals[1] = vals[1] * vals[2] + 0.2463 if couple: vals[9] = vals[8] bash_cmd = "maybe_sub.sh -e %s%s-p %d ./dispatch.sh -d %s "\ "-n %d -N %d "\ "-M %.6f -Y %.6f -Z %.6f -a %.6f "\ "-o %.6f -oe %.6f -u %.6f -ue %.6f "\ "-D %.6f -g %.6f -e %.6f "\ "%s%s%s%s%s%s"%\ tuple(["-n " if nice else ""] + ["-i %d "%image if image>0 else "" ] + [parallel, directory] + [i] + [points] + [val for val in vals] + ["-L " if light else ""] + ["-r " if remove else ""] + ["-MS " if mainseq else ""] + ["-S " if subgiant else ""] + ["-t " if taper else ""] + ["-rotk " if rotk else ""]) print(bash_cmd) #exit() process = subprocess.Popen(bash_cmd.split(), shell=False) process.wait()
def sobol(parsin, nruns, seed=1): """Sobol sampled parameter values Return a defined set (nruns values) of Sobol sampled parameter values from the parameter distribution. By using Latin Hypercube, the sampling is limited to uniform. Sobol is always performed on the entire set of parameters used in the analysis! Parameters ------------ parsin : list of ModPar instances List with all the parameters to sample from nruns : int number of samples seed : int seed to start from, change this when performing multiple samples or to make sure the values are continued by using the last seed Returns -------- pars : narray 2D array with the rows the different runs and the pars in the columns """ ndim = len(parsin) pars = np.zeros((nruns, ndim)) for i in xrange(1, nruns + 1): [r, seed_out] = i4_sobol(ndim, seed) pars[i - 1, :] = r seed = seed_out for i in range(ndim): pars[:, i] = rescale(pars[:, i], parsin[i].min, parsin[i].max) print 'The seed to continue this sampling procedure is', seed, '.' print 'If you do not update the seed for extra samples, the samples will \ be the same!' return pars
def faster_QMC_computations(nb_samples, nus): # note that a lot of calculations will be repeated by doing this. We need to be smarter! # first find the highest degree considered in the list of nu's # then go through the samples, one at a time, and everytime we see a new value, we add to the dictionary together with the T_j(value) max_degree = np.max(nus) cheb_evals = {} weights_eval = {} int_val = 0 # as for integral value seed = 0 # required for the sobol index dim = 6 # that hurts! for one_sample in xrange(0,nb_samples): [sample, new_seed] = i4_sobol(6, seed) sample = sample*2 -1 # To set the anchor at (-1,-1,-1 ...) instead of the usual (0,0,...) for QMC methods not_computed = [a_param for a_param in one_sample if a_param not in cheb_evals] # contains the values that we have not precomputed before for to_compute in not_computed: # add these guys to the dictionary. to_add_to_dict = [1] for one_deg in xrange(1,max_degree+1): to_add_to_dict.append(np.polynomial.chebyshev.chebval(to_compute, np.hstack( (np.zeros(one_deg),1) ))) cheb_evals[to_compute] = to_add_to_dict weights_eval[to_compute] = np.polynomial.chebyshev.chebweight(to_compute) int_val = int_val + ctaa(sample-1, 1.0,1.0/6,5.)*mvc(sample,a_nu) seed = new_seed return int_val/nb_samples
def sobol(parsin, nruns, seed = 1): """Sobol sampled parameter values Return a defined set (nruns values) of Sobol sampled parameter values from the parameter distribution. By using Latin Hypercube, the sampling is limited to uniform. Sobol is always performed on the entire set of parameters used in the analysis! Parameters ------------ parsin : list of ModPar instances List with all the parameters to sample from nruns : int number of samples seed : int seed to start from, change this when performing multiple samples or to make sure the values are continued by using the last seed Returns -------- pars : narray 2D array with the rows the different runs and the pars in the columns """ ndim = len(parsin) pars = np.zeros((nruns, ndim)) for i in xrange(1, nruns+1): [r, seed_out] = i4_sobol(ndim, seed) pars[i-1, :] = r seed = seed_out for i in range(ndim): pars[:, i] = rescale(pars[:,i], parsin[i].min, parsin[i].max) print 'The seed to continue this sampling procedure is', seed,'.' print 'If you do not update the seed for extra samples, the samples will \ be the same!' return pars
def dispatch(ranges, tracks, logs, directory, remove=0, skip=0, parallel=r"$OMP_NUM_THREADS", nice=0, image=0): shift = ranges[:, 0] scale = np.array([(b - a) for a, b in ranges]) init_conds = [] for i in range(skip, tracks + skip): vals = shift + np.array(i4_sobol(len(ranges), i)[0]) * scale for j, val in enumerate(vals): if logs[j]: vals[j] = 10**val if not np.isnan(val) else 0 init_conds += [[tmp for tmp in vals]] for j, val in enumerate(vals): if np.isnan(vals[j]): vals[j] = 0 bash_cmd = "maybe_sub.sh -e %s%s-p %d ./dispatch.sh -d %s "\ "-n %d "\ "-M %.6f -Y %.6f -Z %.6f -a %.6f -t %.6fd9 -b %.6f "\ "%s"%\ tuple(["-n " if nice else ""] + ["-i %d "%image if image>0 else "" ] + [parallel, directory] + [i] + [val for val in vals] + ["-r " if remove else ""]) print(bash_cmd) #exit() process = subprocess.Popen(bash_cmd.split(), shell=False) process.wait()
plt.gca().xaxis.set_major_locator(plt.NullLocator()) plt.gca().yaxis.set_major_locator(plt.NullLocator()) plt.xlim([0, 1]) plt.ylim([0, 1]) fig.tight_layout() fig.savefig(os.path.join(output_dir, 'grid-random.png'), bbox_inches='tight', dpi=400) ### Quasi-random grid fig = plt.figure(figsize=(2.35037, 4.17309), dpi=400) for ii, n_points in enumerate(points): ax = fig.add_subplot(3, 1, ii + 1) grid = np.transpose( [np.array(i4_sobol(3, i)[0]) for i in range(20000, 20000 + n_points)]) plt.scatter( grid[0], grid[1], c=grid[2], cmap=cm.gist_heat, #linewidth=0.5/(1+ii), linewidth=0.25, s=10 / (1 + ii)) plt.gca().xaxis.set_major_locator(plt.NullLocator()) plt.gca().yaxis.set_major_locator(plt.NullLocator()) plt.xlim([0, 1]) plt.ylim([0, 1]) fig.tight_layout() fig.savefig(os.path.join(output_dir, 'grid-quasirandom.png'),
def opImplement(self): import Histogram,drand; #Seed to random generator seed = 0; #Reminder the sum of area #Array to storing generating point point_set = numpy.zeros((N_P,2,3,),dtype=numpy.double); number_of_triangle = self.data.GetNumberOfCells(); index=0; #Comput sum of area first area_sum,predict_np= self._compute_model_area_sum(); #Count how many point is generated # np * area/area_sum >= 0.5; # np = 0.5/ (area/area_sum) if self.number_of_point_to_generate < 0: self.number_of_point_to_generate = predict_np; sum_np = 0; index=0; for t in xrange(0,number_of_triangle): triangle = self.data.GetCell(t).GetPoints(); v1 = triangle.GetPoint(0) v2 = triangle.GetPoint(1) v3 = triangle.GetPoint(2) #PRNS random , according to paper it should using r3 so that we execute two drand_48 first drand.drand_48(seed); drand.drand_48(); r3 = drand.drand_48(); area = vtk2Box.compute_triangle_area(v1,v2,v3); #Current triangle area proportion of total area h_i = self.number_of_point_to_generate * area/area_sum; #Discuss how many point should be generate in the triangle n_i = int(round(math.ceil(h_i) + (h_i - math.ceil(h_i))*r3)); #n_i = int(math.ceil(h_i) + (h_i - math.ceil(h_i))*r3); #Generate the point within a triangle if n_i > 0: #Add the total generated point counter; sum_np+=n_i; #Form two vector of triangle v_1 = [v1[0] - v2[0],v1[1] - v2[1],v1[2] - v2[2]]; v_2 = [v3[0] - v2[0],v3[1] - v2[1],v3[2] - v2[2]]; #Compute the magnitude of vector v_1_len = math.sqrt(math.pow(v_1[0],2)+math.pow(v_1[1],2)+math.pow(v_1[2],2)); v_2_len = math.sqrt(math.pow(v_2[0],2)+math.pow(v_2[1],2)+math.pow(v_2[2],2)); #Convert vector to unit vector first if v_1_len > 0: v_1 = [a/v_1_len for a in v_1] if v_2_len > 0: v_2 = [a/v_2_len for a in v_2] #Cross two vector to compute the normal vector of triangle , v1Xv2 or v2Xv1 is not importent in this case crossed_vector = [v_2[1]*v_1[2] - v_2[2]*v_1[1],v_2[2]*v_1[0] - v_2[0]*v_1[2],v_2[0]*v_1[1] - v_2[1]*v_1[0]]; #Test negate the crossed_vector to simulate happen of v1Xv2 #crossed_vector = [-a for a in crossed_vector]; #Compute the magnitude of crossed vector crossed_vector_len = math.sqrt(math.pow(crossed_vector[0],2)+math.pow(crossed_vector[1],2)+math.pow(crossed_vector[2],2)); #Normalize crossed vector if crossed_vector_len > 0: normal = [a/crossed_vector_len for a in crossed_vector] else: normal = crossed_vector; #Generate the random point inside the triangle for i in xrange(n_i): #mD2 generating point algorithm #Using QRNS rather than PRNS as random generator #r1,r2 is two random value from 0,1 , seed will increment by 1 . ((r1,r2),seed) = sobol_lib.i4_sobol(2,seed); r_t1 = [(1-math.sqrt(r1))*a for a in v1]; r_t2 = [(math.sqrt(r1)*(1-r2))*a for a in v2]; r_t3 = [math.sqrt(r1)*(r2*a) for a in v3]; #Resulting generating point random_p = [r_t1[0]+r_t2[0]+r_t3[0],r_t1[1]+r_t2[1]+r_t3[1],r_t1[2]+r_t2[2]+r_t3[2]]; #Append random point and their normal to random point set if index > (N_P-1): point_set = numpy.append(point_set,[[random_p,normal]],axis=0); else: point_set[index]=[random_p,normal]; index+=1; histo = Histogram.AADHistogram(); #Convent List to numpy array so that we can simply passing value to c function. histo.computeFeature(point_set,sum_np); return histo;
print('Parsing isochrones') iso_dir = 'MIST_v1.2_feh_p0.00_afe_p0.0_vvcrit0.0_EEPS' track_files = glob.glob(os.path.join(iso_dir, '*.eep')) isochrones = {} for track_file in tqdm(track_files): with open(track_file) as f: head = [next(f) for x in range(12)] header = re.split('\\s+', head[-1].rstrip().strip("#"))[1:] DF = pd.read_table(track_file, names=header, sep='\\s+', comment='#') mass = int(os.path.basename(track_file).split('M')[0])/100 isochrones[mass] = DF # call lightcurve with quasirandomly generated inputs for id in range(args.skip, args.systems+args.skip): vals = shift+np.array(i4_sobol(len(ranges), id)[0])*scale flag_dict = {flag: 10**val if flag in logs else val for flag, val in zip(flags, vals)} # figure out which stellar model to use A_mass = flag_dict['A_mass'] M_idx = np.argmin([abs(A_mass - M) for M in isochrones.keys()]) A_mass = list(isochrones.keys())[M_idx] track = isochrones[A_mass] t_idx = np.argmin(abs(flag_dict['age']*10**9 - track['star_age'])) flag_dict['age'] = track['star_age'][t_idx] / 10**9 # pick A_radius, A_Teff from the model flag_dict['A_mass'] = A_mass flag_dict['A_radius'] = 10**track['log_R'][t_idx]
linewidth=0.25, s=10/(3-ii)) plt.gca().xaxis.set_major_locator(plt.NullLocator()) plt.gca().yaxis.set_major_locator(plt.NullLocator()) plt.xlim([0, 1]) plt.ylim([0, 1]) fig.tight_layout() fig.savefig(os.path.join(output_dir, 'grid-random.png'), bbox_inches='tight', dpi=400) ### Quasi-random grid fig = plt.figure(figsize=(2.35037, 4.17309), dpi=400) for ii, n_points in enumerate(points): ax = fig.add_subplot(3,1,ii+1) grid = np.transpose([np.array(i4_sobol(3, i)[0]) for i in range(20000, 20000+n_points)]) plt.scatter(grid[0], grid[1], c=grid[2], cmap=cm.gist_heat, #linewidth=0.5/(1+ii), linewidth=0.25, s=10/(1+ii)) plt.gca().xaxis.set_major_locator(plt.NullLocator()) plt.gca().yaxis.set_major_locator(plt.NullLocator()) plt.xlim([0, 1]) plt.ylim([0, 1]) fig.tight_layout() fig.savefig(os.path.join(output_dir, 'grid-quasirandom.png'), bbox_inches='tight', dpi=400)
def nloptimize(obj_func, ampmask, phasemask, params, config_file): ''' optimize obj_func gate using ampmask and phasemask ''' print "optimizing {0} gate fidelity using {1} and {2}".format( params['run_params'].gate, params['run_params'].run_ampmask, params['run_params'].run_phasemask) NOPTS = params['mask_params'].NOPTS # number of parameters to optimize NITER = params['run_params'].NITER # number of initial vectors x_start = zeros(NOPTS) # array to hold initial values for parameters x_bounds = params[ 'mask_params'].x_bounds # upper and lower bounds for parameters # varaiables to store optimal fidelity and parameter values optimal_fidelity = 0.0 optimal_x = zeros(NOPTS) # optimize for NITER different initial values seed = params['run_params'].sobol_seed # initialize sobol sequence seed run_optimize = True for i in range(NITER): # choose random initial values for parameters from allowed phase space [ran_var, seed_out] = sobol_lib.i4_sobol( NOPTS, seed) # use sobol sequence to generate NOPT random variables seed = seed_out # update seed for sobol sequence for j in range(NOPTS): x_start[j] = (x_bounds[j][1] - x_bounds[j][0]) * ran_var[j] + x_bounds[j][0] # tuple of arguments for the minimization routine - first set of arguments is for the obj_func args = ((params, ampmask, phasemask, run_optimize)) # optimize for single instance of x_start, use options={'disp': True} to display more info output = optimize.minimize(obj_func, x_start, args=args, method='SLSQP', bounds=x_bounds, tol=1e-4) x = output.x fidelity = 1 - output.fun if fidelity > optimal_fidelity: optimal_x = x optimal_fidelity = fidelity print "Iteration {0}, Current Result: {1}, Best Result: {2}".format( i, fidelity, optimal_fidelity) # convert optimal result to normal units and write to config file converted_optimal_x = convert_aru(optimal_x, config_file, FROM_ARU) # update config file with optimal parameters write_params(converted_optimal_x, config_file) # print results to screen print "Optimal results: " print "Fidelity =", optimal_fidelity for i in range(NOPTS): print "{0} = {1} {2}".format(params['mask_params'].param_list[i], optimal_x[i], params['mask_params'].x_units[i]) print "\n"