def parallel_bulkfit(path, num_splits=20, ncores=8, start_pt=0): ''' Run bulk fitting in parallel. Results are outputted in chunks to make restarting easier. ''' spectra = [f for f in os.listdir(path) if f[-4:] == 'fits'] split_at = len(spectra) / num_splits splits = [split_at * i for i in range(1, num_splits)] splits.append(len(spectra)) splits = splits[start_pt:] prev_split = 0 for i, split in enumerate(splits): print("On split " + str(i + 1) + " of " + str(len(splits))) print(str(datetime.now())) split_spectra = spectra[prev_split:split] pool = Pool(processes=ncores) output = pool.map(do_specfit, split_spectra) pool.close() pool.join() df = DataFrame(output[0], columns=split_spectra[:1]) for out, spec in zip(output[1:], split_spectra[1:]): df[spec[:-5]] = out df.to_csv("spectral_fitting_" + str(i + 1) + ".csv") prev_split = split
def parallel_bulkfit(path, num_splits=20, ncores=8, start_pt=0): ''' Run bulk fitting in parallel. Results are outputted in chunks to make restarting easier. ''' spectra = [f for f in os.listdir(path) if f[-4:] == 'fits'] split_at = len(spectra) / num_splits splits = [split_at*i for i in range(1, num_splits)] splits.append(len(spectra)) splits = splits[start_pt:] prev_split = 0 for i, split in enumerate(splits): print("On split " + str(i+1) + " of " + str(len(splits))) print(str(datetime.now())) split_spectra = spectra[prev_split:split] pool = Pool(processes=ncores) output = pool.map(do_specfit, split_spectra) pool.close() pool.join() df = DataFrame(output[0], columns=split_spectra[:1]) for out, spec in zip(output[1:], split_spectra[1:]): df[spec[:-5]] = out df.to_csv("spectral_fitting_"+str(i+1)+".csv") prev_split = split
e = (ei-ef) es.append(e) ei = ef es = np.array(es) print integrator + " done." return [es] Nsteps = 10000 dt = 0.00102839712987319 integrators = ["wh","whfast-nocor"] parameters = [(i) for i in integrators] pool = InterruptiblePool() res = np.array(pool.map(simulation,parameters)).reshape(len(integrators),Nsteps) import matplotlib; matplotlib.use("pdf") import matplotlib.pyplot as plt from matplotlib import ticker from matplotlib.colors import LogNorm f,axarr = plt.subplots(1,1,figsize=(10,7)) x = np.linspace(0,Nsteps*dt,Nsteps) extent=[x.min(),x.max(), res.min(), res.max()] axarr.set_xlim(extent[0], extent[1]) axarr.set_ylim(extent[2], extent[3]) axarr.set_xlabel(r"time [year ~ 4300]") axarr.set_ylabel(r"rel energy error")
rebound.integrate(50000.*np.pi) return [rebound.get_megno(), rebound.get_t()] #I always set the (osculating) semimajor axis to 1, you can pass different initial e values e0 = 0.9 # Rauch uses 0.9 for Fig 4 Scrit = 0.25 # always true if you use G=M=a=1 N = 80 dts = np.linspace(0.1,2.,N) Ss = np.linspace(0,0.5,N) parameters = [(Ss[i]*Scrit,dts[j]*2.*np.pi,e0) for i in range(N) for j in range(N)] pool = InterruptiblePool() res = pool.map(simulation,parameters) res = np.nan_to_num(res) megno = np.clip(res[:,0].reshape((N,N)),1.8,4.) lyaptime = np.clip(np.absolute(res[:,1].reshape((N,N))),1.,1.e5)/2./np.pi # divide by 2pi to get in units of orbital period import matplotlib; matplotlib.use("pdf") import matplotlib.pyplot as plt from matplotlib.colors import LogNorm f,axarr = plt.subplots(2) extent=[dts.min(), dts.max(), Ss.min(), Ss.max()] for ax in axarr: ax.set_xlim(extent[0], extent[1]) ax.set_ylim(extent[2], extent[3]) ax.set_xlabel(r"$\Delta t / t_{orb}$")
offsets = [ 31.697, 14.437, 85.452, 26.216, 9.330, -879.063, 23.698, 21.273, 20.728, 32.616, 35.219, 9.005, 10.124, 14.678 ] beamwidth_250 = [18.2] * len(fits250) beamwidth_350 = [24.9] * len(fits350) # Inputs (adjust to desired wavelength) beamwidths = beamwidth_350 # + beamwidth_350 distances = distances # + distances fits_files = fits350 # + fits350 print "Started at " + str(datetime.now()) if not MULTICORE: for i, filename in enumerate(fits_files): wrapper(filename, distances[i], beamwidths[i], offsets[i], verbose=False) else: pool = Pool(processes=NCORES) pool.map(single_input, izip(fits_files, distances, beamwidths, offsets)) pool.close() # pool.join()
"california_west-250_normed.fits"] fits350 = ["pipeCenterB59-350.fits", "lupusI-350.fits", "aquilaM2-350.fits", "orionB-350.fits", "polaris-350.fits", "chamaeleonI-350.fits", "perseus04-350.fits", "taurusN3-350.fits", "ic5146-350.fits", "orionA-C-350.fits", "orionA-S-350.fits", "california_cntr-350.fits", "california_east-350.fits", "california_west-350.fits"] distances = [145., 150., 260., 400., 150., 170., 235., 140., 460., 400., 400., 450., 450., 450.] # pc offsets = [31.697, 14.437, 85.452, 26.216, 9.330, -879.063, 23.698, 21.273, 20.728, 32.616, 35.219, 9.005, 10.124, 14.678] beamwidth_250 = [18.2] * len(fits250) beamwidth_350 = [24.9] * len(fits350) # Inputs (adjust to desired wavelength) beamwidths = beamwidth_350 # + beamwidth_350 distances = distances # + distances fits_files = fits350 # + fits350 print "Started at " + str(datetime.now()) if not MULTICORE: for i, filename in enumerate(fits_files): wrapper(filename, distances[i], beamwidths[i], offsets[i], verbose=False) else: pool = Pool(processes=NCORES) pool.map(single_input, izip(fits_files, distances, beamwidths, offsets)) pool.close() # pool.join()