def main(): """ Python, mpi4py parallel hello world. """ from mpi4py import MPI import sys import numpy as np num_proc = MPI.COMM_WORLD.Get_size() my_rank = MPI.COMM_WORLD.Get_rank() node_name = MPI.Get_processor_name() comm = MPI.COMM_WORLD if (my_rank == 0): sys.stdout.write(" %d MPI Processes are now active.\n" %(num_proc)) sys.stdout.flush() comm.Barrier() # Create some data -- simple sine wave two_pi = 2.0*np.pi nx = 1024 dx = two_pi/nx x = np.zeros(nx,dtype='float64') y = np.zeros(nx,dtype='float64') kx = two_pi/(1.0+my_rank) # each process has a different wavenumber amp = (1.0+my_rank) # and a different amplitude for i in range(nx): x[i] = i*dx y[i] = amp*np.sin(x[i]*kx) #Evaluate the sum, min, and max locally local_sum = np.sum(y) local_max = np.max(y) local_min = np.min(y) #Print the local values comm.Barrier() for i in range(num_proc): if (i == my_rank): if (i == 0): sys.stdout.write("\n\n") sys.stdout.write(" Local Results\n") sys.stdout.write( " Rank %d reports (MIN, MAX, SUM): %f, %f, %f.\n" % (my_rank, local_min, local_max, local_sum)) sys.stdout.flush() comm.Barrier() #Create variables to hold the global sum,min, and max global_sum = np.ndarray(1,dtype='float64') global_min = np.ndarray(1,dtype='float64') global_max = np.ndarray(1,dtype='float64') #Peform the reductions comm.Allreduce([local_sum, MPI.DOUBLE], [global_sum,MPI.DOUBLE], op=MPI.SUM) comm.Allreduce([local_min, MPI.DOUBLE], [global_min,MPI.DOUBLE], op=MPI.MIN) comm.Allreduce([local_max, MPI.DOUBLE], [global_max,MPI.DOUBLE], op=MPI.MAX) comm.Barrier() for i in range(num_proc): if (i == my_rank): if (i == 0): sys.stdout.write("\n\n") sys.stdout.write(" Global Results\n") sys.stdout.write( " Rank %d reports (MIN, MAX, SUM): %f, %f, %f.\n" % (my_rank, global_min, global_max, global_sum)) sys.stdout.flush() comm.Barrier() MPI.Finalize()
def testInterpKernelDEC_2D(self): size = MPI.COMM_WORLD.size rank = MPI.COMM_WORLD.rank if size != 5: raise RuntimeError("Expect MPI_COMM_WORLD size == 5") print(rank) nproc_source = 3 procs_source = list(range(nproc_source)) procs_target = list(range(size - nproc_source + 1, size)) interface = CommInterface() target_group = MPIProcessorGroup(interface, procs_target) source_group = MPIProcessorGroup(interface, procs_source) dec = InterpKernelDEC(source_group, target_group) mesh = 0 support = 0 paramesh = 0 parafield = 0 icocofield = 0 data_dir = os.environ['MEDCOUPLING_ROOT_DIR'] tmp_dir = os.environ['TMP'] if not tmp_dir or len(tmp_dir) == 0: tmp_dir = "/tmp" pass filename_xml1 = os.path.join(data_dir, "share/resources/med/square1_split") filename_xml2 = os.path.join(data_dir, "share/resources/med/square2_split") MPI.COMM_WORLD.Barrier() if source_group.containsMyRank(): filename = filename_xml1 + str(rank + 1) + ".med" meshname = "Mesh_2_" + str(rank + 1) mesh = ReadUMeshFromFile(filename, meshname, 0) paramesh = ParaMESH(mesh, source_group, "source mesh") comptopo = ComponentTopology() parafield = ParaFIELD(ON_CELLS, NO_TIME, paramesh, comptopo) parafield.getField().setNature(IntensiveMaximum) nb_local = mesh.getNumberOfCells() value = [1.0] * nb_local parafield.getField().setValues(value) icocofield = ICoCoMEDField(parafield.getField()) dec.attachLocalField(icocofield) pass else: filename = filename_xml2 + str(rank - nproc_source + 1) + ".med" meshname = "Mesh_3_" + str(rank - nproc_source + 1) mesh = ReadUMeshFromFile(filename, meshname, 0) paramesh = ParaMESH(mesh, target_group, "target mesh") comptopo = ComponentTopology() parafield = ParaFIELD(ON_CELLS, NO_TIME, paramesh, comptopo) parafield.getField().setNature(IntensiveMaximum) nb_local = mesh.getNumberOfCells() value = [0.0] * nb_local parafield.getField().setValues(value) icocofield = ICoCoMEDField(parafield.getField()) dec.attachLocalField(icocofield) pass if source_group.containsMyRank(): field_before_int = parafield.getVolumeIntegral(0, True) dec.synchronize() dec.setForcedRenormalization(False) dec.sendData() dec.recvData() field_after_int = parafield.getVolumeIntegral(0, True) self.assertTrue( math.fabs(field_after_int - field_before_int) < 1e-8) pass else: dec.synchronize() dec.setForcedRenormalization(False) dec.recvData() dec.sendData() pass ## end interface = 0 target_group = 0 source_group = 0 dec = 0 mesh = 0 support = 0 paramesh = 0 parafield = 0 icocofield = 0 MPI.COMM_WORLD.Barrier() MPI.Finalize() pass
def main(config, template_path, output_path, forest_path, populations, distance_bin_size, io_size, chunk_size, value_chunk_size, cache_size, verbose): """ :param config: :param template_path: :param forest_path: :param populations: :param io_size: :param chunk_size: :param value_chunk_size: :param cache_size: """ utils.config_logging(verbose) logger = utils.get_script_logger(script_name) comm = MPI.COMM_WORLD rank = comm.rank env = Env(comm=MPI.COMM_WORLD, config_file=config, template_paths=template_path) configure_hoc_env(env) if io_size == -1: io_size = comm.size if rank == 0: logger.info('%i ranks have been allocated' % comm.size) if output_path is None: output_path = forest_path if rank == 0: if not os.path.isfile(output_path): input_file = h5py.File(forest_path, 'r') output_file = h5py.File(output_path, 'w') input_file.copy('/H5Types', output_file) input_file.close() output_file.close() comm.barrier() layers = env.layers layer_idx_dict = { layers[layer_name]: layer_name for layer_name in ['GCL', 'IML', 'MML', 'OML', 'Hilus'] } (pop_ranges, _) = read_population_ranges(forest_path, comm=comm) start_time = time.time() for population in populations: logger.info('Rank %i population: %s' % (rank, population)) count = 0 (population_start, _) = pop_ranges[population] template_class = load_cell_template(env, population, bcast_template=True) measures_dict = {} for gid, morph_dict in NeuroH5TreeGen(forest_path, population, io_size=io_size, comm=comm, topology=True): if gid is not None: logger.info('Rank %i gid: %i' % (rank, gid)) cell = cells.make_neurotree_cell(template_class, neurotree_dict=morph_dict, gid=gid) secnodes_dict = morph_dict['section_topology']['nodes'] apicalidx = set(cell.apicalidx) basalidx = set(cell.basalidx) dendrite_area_dict = {k: 0.0 for k in layer_idx_dict} dendrite_length_dict = {k: 0.0 for k in layer_idx_dict} dendrite_distances = [] dendrite_diams = [] for (i, sec) in enumerate(cell.sections): if (i in apicalidx) or (i in basalidx): secnodes = secnodes_dict[i] for seg in sec.allseg(): L = seg.sec.L nseg = seg.sec.nseg seg_l = L / nseg seg_area = h.area(seg.x) seg_diam = seg.diam seg_distance = get_distance_to_node( cell, list(cell.soma)[0], seg.sec, seg.x) dendrite_diams.append(seg_diam) dendrite_distances.append(seg_distance) layer = synapses.get_node_attribute( 'layer', morph_dict, seg.sec, secnodes, seg.x) dendrite_length_dict[layer] += seg_l dendrite_area_dict[layer] += seg_area dendrite_distance_array = np.asarray(dendrite_distances) dendrite_diam_array = np.asarray(dendrite_diams) dendrite_distance_bin_range = int( ((np.max(dendrite_distance_array)) - np.min(dendrite_distance_array)) / distance_bin_size) + 1 dendrite_distance_counts, dendrite_distance_edges = np.histogram( dendrite_distance_array, bins=dendrite_distance_bin_range, density=False) dendrite_diam_sums, _ = np.histogram( dendrite_distance_array, weights=dendrite_diam_array, bins=dendrite_distance_bin_range, density=False) dendrite_mean_diam_hist = np.zeros_like(dendrite_diam_sums) np.divide(dendrite_diam_sums, dendrite_distance_counts, where=dendrite_distance_counts > 0, out=dendrite_mean_diam_hist) dendrite_area_per_layer = np.asarray([ dendrite_area_dict[k] for k in sorted(dendrite_area_dict.keys()) ], dtype=np.float32) dendrite_length_per_layer = np.asarray([ dendrite_length_dict[k] for k in sorted(dendrite_length_dict.keys()) ], dtype=np.float32) measures_dict[gid] = { 'dendrite_distance_hist_edges': np.asarray(dendrite_distance_edges, dtype=np.float32), 'dendrite_distance_counts': np.asarray(dendrite_distance_counts, dtype=np.int32), 'dendrite_mean_diam_hist': np.asarray(dendrite_mean_diam_hist, dtype=np.float32), 'dendrite_area_per_layer': dendrite_area_per_layer, 'dendrite_length_per_layer': dendrite_length_per_layer } del cell count += 1 else: logger.info('Rank %i gid is None' % rank) append_cell_attributes(output_path, population, measures_dict, namespace='Tree Measurements', comm=comm, io_size=io_size, chunk_size=chunk_size, value_chunk_size=value_chunk_size, cache_size=cache_size) MPI.Finalize()
def ABC_static_test(pp=None, sp=None): """ Arguments: ---------- pp: (optional) program parameters, parsed by argument parser provided by this file sp: (optional) solver parameters, parsed by spectralLES.parser """ if comm.rank == 0: print("\n----------------------------------------------------------") print("MPI-parallel Python spectralLES simulation of problem \n" "`Homogeneous Isotropic Turbulence' started with " "{} tasks at {}.".format(comm.size, timeofday())) print("----------------------------------------------------------") # ------------------------------------------------------------------ # Get the problem and solver parameters and assert compliance if pp is None: pp = hit_parser.parse_known_args()[0] if sp is None: sp = spectralLES.parser.parse_known_args()[0] if comm.rank == 0: print('\nProblem Parameters:\n-------------------') for k, v in vars(pp).items(): print(k, v) print('\nSpectralLES Parameters:\n-----------------------') for k, v in vars(sp).items(): print(k, v) print("\n----------------------------------------------------------\n") assert len(set(pp.N)) == 1, ('Error, this beta-release HIT program ' 'requires equal mesh dimensions') N = pp.N[0] assert len(set(pp.L)) == 1, ('Error, this beta-release HIT program ' 'requires equal domain dimensions') L = pp.L[0] if N % comm.size > 0: if comm.rank == 0: print('Error: job started with improper number of MPI tasks' ' for the size of the data specified!') MPI.Finalize() sys.exit(1) # ------------------------------------------------------------------ # Configure the LES solver solver = staticGeneralizedEddyViscosityLES( Smagorinsky=True, comm=comm, **vars(sp)) solver.computeAD = solver.computeAD_vorticity_form Sources = [solver.computeSource_linear_forcing, solver.computeSource_Smagorinsky_SGS, # solver.computeSource_4termGEV_SGS, ] # C1 = np.array([-6.39e-02]) C3 = np.array([-3.75e-02, 6.2487e-02, 6.9867e-03, 0.0]) C4 = np.array([-3.15e-02, -5.25e-02, 2.7e-02, 2.7e-02]) kwargs = dict(C1=-6.39e-02, C=C3*solver.D_les**2, dvScale=None) U_hat = solver.U_hat U = solver.U Kmod = np.floor(np.sqrt(solver.Ksq)).astype(int) # ------------------------------------------------------------------ # form HIT initial conditions from either user-defined values or # physics-based relationships Urms = 1.083*(pp.epsilon*L)**(1./3.) # empirical coefficient Einit= getattr(pp, 'Einit', None) or Urms**2 # == 2*KE_equilibrium kexp = getattr(pp, 'kexp', None) or -1./3. # -> E(k) ~ k^(-2./3.) kpeak= getattr(pp, 'kpeak', None) or N//4 # ~ kmax/2 # currently using a fixed random seed for testing solver.initialize_HIT_random_spectrum(Einit, kexp, kpeak, rseed=comm.rank) # ------------------------------------------------------------------ # Configure a spatial field writer writer = mpiWriter(comm, odir=pp.odir, N=N) Ek_fmt = "\widehat{{{0}}}^*\widehat{{{0}}}".format # ------------------------------------------------------------------------- # Setup the various time and IO counters tauK = sqrt(pp.nu/pp.epsilon) # Kolmogorov time-scale taul = 0.11*sqrt(3)*L/Urms # 0.11 is empirical coefficient if pp.tlimit == np.Inf: pp.tlimit = 200*taul dt_rst = getattr(pp, 'dt_rst', None) or taul dt_spec= getattr(pp, 'dt_spec', None) or 0.2*taul dt_drv = getattr(pp, 'dt_drv', None) or 0.25*tauK t_sim = t_rst = t_spec = t_drv = 0.0 tstep = irst = ispec = 0 tseries = [] if comm.rank == 0: print('\ntau_ell = %.6e\ntau_K = %.6e\n' % (taul, tauK)) # ------------------------------------------------------------------------- # Run the simulation if comm.rank == 0: t1 = time.time() while t_sim < pp.tlimit+1.e-8: # -- Update the dynamic dt based on CFL constraint dt = solver.new_dt_constant_nu(pp.cfl) t_test = t_sim + 0.5*dt # -- output/store a log every step if needed/wanted KE = 0.5*comm.allreduce(psum(np.square(U)))/solver.Nx tseries.append([tstep, t_sim, KE]) # -- output KE and enstrophy spectra if t_test >= t_spec: # -- output message log to screen on spectrum output only if comm.rank == 0: print("cycle = %7d time = %15.8e dt = %15.8e KE = %15.8e" % (tstep, t_sim, dt, KE)) # -- output kinetic energy spectrum to file spect3d = np.sum(np.real(U_hat*np.conj(U_hat)), axis=0) spect3d[..., 0] *= 0.5 spect1d = shell_average(comm, spect3d, Kmod) if comm.rank == 0: fname = '%s/%s-%3.3d_KE.spectra' % (pp.adir, pp.pid, ispec) fh = open(fname, 'w') metadata = Ek_fmt('u_i') fh.write('%s\n' % metadata) spect1d.tofile(fh, sep='\n', format='% .8e') fh.close() t_spec += dt_spec ispec += 1 # -- output physical-space solution fields for restarting and analysis if t_test >= t_rst: writer.write_scalar('%s-Velocity1_%3.3d.rst' % (pp.pid, irst), U[0], np.float64) writer.write_scalar('%s-Velocity2_%3.3d.rst' % (pp.pid, irst), U[1], np.float64) writer.write_scalar('%s-Velocity3_%3.3d.rst' % (pp.pid, irst), U[2], np.float64) t_rst += dt_rst irst += 1 # -- Update the forcing mean scaling if t_test >= t_drv: # call solver.computeSource_linear_forcing to compute dvScale only kwargs['dvScale'] = Sources[0](computeRHS=False) t_drv += dt_drv # -- integrate the solution forward in time solver.RK4_integrate(dt, *Sources, **kwargs) t_sim += dt tstep += 1 sys.stdout.flush() # forces Python 3 to flush print statements # ------------------------------------------------------------------------- # Finalize the simulation if comm.rank == 0: t2 = time.time() print('Program took %12.7f s' % ((t2-t1))) KE = 0.5*comm.allreduce(psum(np.square(U)))/solver.Nx tseries.append([tstep, t_sim, KE]) if comm.rank == 0: fname = '%s/%s-%3.3d_KE_tseries.txt' % (pp.adir, pp.pid, ispec) header = 'Kinetic Energy Timeseries,\n# columns: tstep, time, KE' np.savetxt(fname, tseries, fmt='%10.5e', header=header) print("cycle = %7d time = %15.8e dt = %15.8e KE = %15.8e" % (tstep, t_sim, dt, KE)) print("\n----------------------------------------------------------") print("MPI-parallel Python spectralLES simulation finished at {}." .format(timeofday())) print("----------------------------------------------------------") # -- output kinetic energy spectrum to file spect3d = np.sum(np.real(U_hat*np.conj(U_hat)), axis=0) spect3d[..., 0] *= 0.5 spect1d = shell_average(comm, spect3d, Kmod) if comm.rank == 0: fh = open('%s/%s-%3.3d_KE.spectra' % (pp.adir, pp.pid, ispec), 'w') metadata = Ek_fmt('u_i') fh.write('%s\n' % metadata) spect1d.tofile(fh, sep='\n', format='% .8e') fh.close() # -- output physical-space solution fields for restarting and analysis writer.write_scalar('%s-Velocity1_%3.3d.rst' % (pp.pid, irst), U[0], np.float64) writer.write_scalar('%s-Velocity2_%3.3d.rst' % (pp.pid, irst), U[1], np.float64) writer.write_scalar('%s-Velocity3_%3.3d.rst' % (pp.pid, irst), U[2], np.float64) return
def myquit(mes): MPI.Finalize() print(mes) sys.exit()
def main(): if myrank == 0: time0 = time.time() if myrank == 0: print(' ') print('nprocs = ', nprocs) assert not os.path.exists(savedir + 'Sigma') and not os.path.exists( savedir + 'Gloc'), 'Cannot overwrite existing data' volume = Nkx * Nky k2p, k2i, i2k = init_k2p_k2i_i2k(Nkx, Nky, nprocs, myrank) kpp = np.count_nonzero(k2p == myrank) integrator = integration.integrator(6, nt, beta, ntau) def H(kx, ky, t): return -2.0 * np.cos(kx) * np.ones([norb, norb]) constants = (myrank, Nkx, Nky, ARPES, kpp, k2p, k2i, tmax, nt, beta, ntau, norb, pump) UksR, UksI, eks, fks, Rs, Ht = init_Uks(H, dt_fine, *constants, version='higher order') print('Done initializing Us') SigmaM = matsubara(0, 0, 0, 0) SigmaM.load(savedir, 'SigmaM') # Solve real axis part xo #--------------------------------------------------------- D = compute_D0R(norb, omega, nt, tmax, ntau, beta, +1) print('Done initializing D') Sigma0 = langreth(norb, nt, tmax, ntau, beta, -1) Sigma = langreth(norb, nt, tmax, ntau, beta, -1) iter_selfconsistency = 4 change = 0.0 for i in range(iter_selfconsistency): print('iteration : %d' % i) Sigma0.copy(Sigma) Gloc = langreth(norb, nt, tmax, ntau, beta, -1) for ik in range(kpp): ik1, ik2 = i2k[ik] G0M = compute_G0M(ik1, ik2, UksI, eks, fks, Rs, *constants) G0 = compute_G0R(ik1, ik2, UksR, UksI, eks, fks, Rs, *constants) if i == 0: G = G0 else: G = langreth(norb, nt, tmax, ntau, beta, -1) integrator.dyson_langreth(G0M, SigmaM, G0, Sigma, G) Gloc.add(G) Sigma = langreth(norb, nt, tmax, ntau, beta, -1) if nprocs == 1: Sigma.copy(Gloc) else: comm.Allreduce(Gloc.L, Sigma.L, op=MPI.SUM) comm.Allreduce(Gloc.R, Sigma.R, op=MPI.SUM) comm.Allreduce(Gloc.RI, Sigma.RI, op=MPI.SUM) comm.Allreduce(Gloc.deltaR, Sigma.deltaR, op=MPI.SUM) Sigma.multiply(D) Sigma.scale(1j * g2 / volume) print('Done computing Sigma') print('sigma size') print(np.mean(np.abs(Sigma.R))) print(np.mean(np.abs(Sigma.RI))) print(np.mean(np.abs(Sigma.L))) change = max([np.mean(abs(Sigma0.R-Sigma.R)), \ np.mean(abs(Sigma0.L-Sigma.L)), \ np.mean(abs(Sigma0.RI-Sigma.RI)), \ np.mean(abs(Sigma0.deltaR-Sigma.deltaR))]) print('change = %1.3e' % change) Sigma.save(savedir, 'Sigma') Gloc.save(savedir, 'Gloc') saveparams(savedir) if 'MPI' in sys.modules: MPI.Finalize()
def main(): if myrank==0: time0 = time.time() print(' ') print('nprocs = ',nprocs) Nkx = 1 Nky = 1 k2p, k2i, i2k = init_k2p_k2i_i2k(Nkx, Nky, nprocs, myrank) kpp = np.count_nonzero(k2p==myrank) beta = 10.0 ARPES = False pump = 0 g2 = None omega = None tmax = 1.0 #dt_fine = 0.001 dt_fine = 0.001 order = 6 ntau = 800 #nts = [10, 50, 100, 500] nts = [100] diffs = {} diffs['nts'] = nts diffs['U'] = [] diffs['M'] = [] diffs['IR'] = [] diffs['R'] = [] diffs['L'] = [] delta = 0.3 omega = 0.2 V = 0.5 norb = 2 def H(kx, ky, t): #Bx = 2.0*V*np.cos(2.0*omega*t) #By = 2.0*V*np.sin(2.0*omega*t) #Bz = 2.0*delta #return 0.5*np.array([[Bz, Bx-1j*By], [Bx+1j*By, -Bz]], dtype=complex) return np.array([[delta, V*np.exp(-2.0*1j*omega*t)], [V*np.exp(+2.0*1j*omega*t), -delta]], dtype=complex) def compute_time_dependent_G0(H, myrank, Nkx, Nky, ARPES, kpp, k2p, k2i, tmax, nt, beta, ntau, norb, pump): # check how much slower this is than computing G0 using U(t,t') norb = np.shape(H(0,0,0))[0] def H0(kx, ky, t): return np.zeros([norb,norb]) constants = (myrank, Nkx, Nky, ARPES, kpp, k2p, k2i, tmax, nt, beta, ntau, norb, pump) UksR, UksI, eks, fks, Rs, Ht = init_Uks(H0, dt_fine, *constants, version='higher order') G0M_ref = compute_G0M(0, 0, UksI, eks, fks, Rs, *constants) G0_ref = compute_G0R(0, 0, UksR, UksI, eks, fks, Rs, *constants) dt = 1.0*tmax/(nt-1) G0M = compute_G00M(0, 0, *constants) G0 = compute_G00R(0, 0, *constants) print('test G00') differences(G0_ref, G0) ''' G00M = compute_G00M(0, 0, *constants) G00 = compute_G00R(0, 0, G0M, *constants) print('shpae G0M', np.shape(G0M.M)) print('shape G00M', np.shape(G00M.M)) #for (i,j) in product(range(norb), repeat=2): # plt(np.linspace(0,beta,ntau), [G0M.M[:,i,j].imag, G00M.M[:,i,j].imag], 'G00M %d %d'%(i,j)) # plt(np.linspace(0,beta,ntau), [G0M.M[:,i,j].real, G00M.M[:,i,j].real], 'G00M %d %d'%(i,j)) print('diffs') print(dist(G0M.M, G00M.M)) print(dist(G0.R, G00.R)) print(dist(G0.L, G00.L)) print(dist(G0.IR, G00.IR)) exit() ''' GM = matsubara(beta, ntau, norb, -1) SigmaM = matsubara(beta, ntau, norb, -1) SigmaM.deltaM = H(0, 0, 0) integrator.dyson_matsubara(G0M, SigmaM, GM) # check if SigmaM is the same as before G = langreth(norb, nt, tmax, ntau, beta, -1) Sigma = langreth(norb, nt, tmax, ntau, beta, -1) for it in range(nt): Sigma.deltaR[it] = H(0, 0, it*dt) integrator.dyson_langreth(G0M, SigmaM, G0, Sigma, G) return GM, G for nt in nts: constants = (myrank, Nkx, Nky, ARPES, kpp, k2p, k2i, tmax, nt, beta, ntau, norb, pump) integrator = integration.integrator(6, nt, beta, ntau) #--------------------------------------------------------- # compute U(t,t') exactly and the corresponding G0 #Uexact = np.array([expm(-1j*H(0,0,0)*t) for t in ts]) ts = np.linspace(0, tmax, nt) _, UksI, eks, fks, Rs, _ = init_Uks(H, dt_fine, *constants) Omega = sqrt((delta-omega)**2 + V**2) Uexact = np.zeros([1, nt, norb, norb], dtype=np.complex128) Uexact[0, :, 0, 0] = np.exp(-1j*omega*ts)*(np.cos(Omega*ts) - 1j*(delta-omega)/Omega*np.sin(Omega*ts)) Uexact[0, :, 0, 1] = -1j*V/Omega*np.exp(-1j*omega*ts)*np.sin(Omega*ts) Uexact[0, :, 1, 0] = -1j*V/Omega*np.exp(1j*omega*ts)*np.sin(Omega*ts) Uexact[0, :, 1, 1] = np.exp(1j*omega*ts)*(np.cos(Omega*ts) + 1j*(delta-omega)/Omega*np.sin(Omega*ts)) print('check unitary ') p = np.einsum('tba,tbc->tac', np.conj(Uexact[0,:]), Uexact[0,:]) print(dist(p, np.einsum('t,ab->tab', np.ones(nt), np.diag(np.ones(norb))))) GMexact = compute_G0M(0, 0, UksI, eks, fks, Rs, *constants) Gexact = compute_G0R(0, 0, Uexact, UksI, eks, fks, Rs, *constants) #--------------------------------------------------------- # compute G0 computed with U(t,t') via integration UksR, UksI, eks, fks, Rs, _ = init_Uks(H, dt_fine, *constants, version='higher order') G0M = compute_G0M(0, 0, UksI, eks, fks, Rs, *constants) G0 = compute_G0R(0, 0, UksR, UksI, eks, fks, Rs, *constants) # test for U(t,t') d = dist(Uexact, UksR) print('diff Uexact UksR', d) print("done computing G0 using U(t,t')") diffs['U'].append(d) #--------------------------------------------------------- # compute non-interacting G for the norb x norb problem # we compute this by solving Dyson's equation with the time-dependent hamiltonian as the selfenergy GdysonM, Gdyson = compute_time_dependent_G0(H, *constants) print('done computing G0 via Dyson equation') ''' ts = np.linspace(0,tmax,nt) Uexact = np.array([expm(-1j*H(0,0,0)*t) for t in ts]) print('diff Uexact UksR', dist(Uexact, UksR[0])) for (i,j) in product(range(2), repeat=2): plt(ts, [UksR[0,:,i,j].real, Uexact[:,i,j].real], 'real part %d %d'%(i,j)) plt(ts, [UksR[0,:,i,j].imag, Uexact[:,i,j].imag], 'imag part %d %d'%(i,j)) exit() ''' #for (i,j) in product(range(norb), repeat=2): # plt(np.linspace(0, beta, ntau), [G0.M[:,i,j].imag, GdysonM.M[:,i,j].imag], 'G0M') ''' for (i,j) in product(range(norb), repeat=2): im([Gexact.L[:,i,:,j].real, G0.L[:,i,:,j].real, Gdyson.L[:,i,:,j].real], [0,tmax,0,tmax], 'G0 real L %d %d'%(i,j)) im([Gexact.L[:,i,:,j].imag, G0.L[:,i,:,j].imag, Gdyson.L[:,i,:,j].imag], [0,tmax,0,tmax], 'G0 imag L %d %d'%(i,j)) ''' print('differences between G0 and Gexact') differences(G0, Gexact) print('differences between Gdyson and Gexact') differences(Gdyson, Gexact) ''' for (i,j) in product(range(norb), repeat=2): print('i j %d %d'%(i,j)) im([G0.R[:,i,:,j].imag, Gexact.R[:,i,:,j].imag], [0,tmax,0,tmax], 'R imag') im([G0.R[:,i,:,j].real, Gexact.R[:,i,:,j].real], [0,tmax,0,tmax], 'R real') ''' plt_diffs(diffs) if 'MPI' in sys.modules: MPI.Finalize()
def master_run(args): flush = args['--flush'] file_lst = args['<file-lst>'] with open(file_lst) as f: _files = f.readlines() # remove trailing '/n' files = [] for f in _files: if '\n' == f[-1]: files.append(f[:-1]) else: files.append(f) # load hit finding configuration file with open(args['<conf-file>']) as f: conf = yaml.load(f) # collect jobs dataset = conf['dataset'] batch_size = int(args['--batch-size']) max_frames = int(args['--max-frames']) buffer_size = int(args['--buffer-size']) jobs, nb_frames = util.collect_jobs(files, dataset, batch_size, max_frames=max_frames) nb_jobs = len(jobs) print('%d frames, %d jobs to be processed' % (nb_frames, nb_jobs), flush=flush) # dispatch jobs job_id = 0 reqs = {} peaks = [] workers = set(range(1, size)) finished_workers = set() for worker in workers: if job_id < nb_jobs: job = jobs[job_id] else: job = [] # dummy job comm.isend(job, dest=worker) reqs[worker] = comm.irecv(buf=buffer_size, source=worker) print('job %d/%d --> %d' % (job_id, nb_jobs, worker), flush=flush) job_id += 1 while job_id < nb_jobs: stop = False time.sleep(0.1) # take a break workers -= finished_workers for worker in workers: finished, result = reqs[worker].test() if finished: peaks += result if job_id < nb_jobs: print('job %d/%d --> %d' % (job_id, nb_jobs, worker), flush=flush) comm.isend(stop, dest=worker) comm.isend(jobs[job_id], dest=worker) reqs[worker] = comm.irecv(buf=buffer_size, source=worker) job_id += 1 else: stop = True comm.isend(stop, dest=worker) print('stop signal --> %d' % worker, flush=flush) finished_workers.add(worker) all_done = False while not all_done: all_done = True workers -= finished_workers for worker in workers: finished, result = reqs[worker].test() if finished: peaks += result stop = True print('stop signal --> %d' % worker, flush=flush) comm.isend(stop, dest=worker) finished_workers.add(worker) else: all_done = False # build and save peak powder filepath = jobs[0][0]['filepath'] frame = jobs[0][0]['frame'] h5_obj = h5py.File(filepath, 'r') image = util.read_image(filepath, frame=frame, h5_obj=h5_obj, dataset=dataset)['image'] powder = np.zeros(image.shape) peaks = np.round(np.array(peaks)).astype(np.int) powder[peaks[:, 0], peaks[:, 1]] = 1 powder_file = args['-o'] dir_ = os.path.dirname(powder_file) if not os.path.isdir(dir_): os.mkdir(dir_) np.savez(powder_file, powder_pattern=powder, powder_peaks=peaks) print('All Done!', flush=flush) MPI.Finalize()
def run(argv): comm.Barrier() start_time = MPI.Wtime() # broadcast parameters if rank == 0: iparams, txt_out_input = process_input(argv) iparams.flag_volume_correction = False iparams.flag_hush = True print(txt_out_input) frame_files = read_pickles(iparams.data) else: iparams = None frame_files = None comm.Barrier() # assign scaling task if rank == 0: master(frame_files, iparams, "scale") result = [] else: result = client() result = comm.gather(result, root=0) comm.Barrier() # pre-merge task if rank == 0: results = sum(result, []) print("Scaling is done on %d cores for %d frames" % (size, len(results))) master(results, iparams, "pre_merge") result = [] else: result = client() result = comm.gather(result, root=0) comm.Barrier() # merge task if rank == 0: print("Pre-merge is done on %d cores" % (len(result))) master(result, iparams, "merge") result = [] else: result = client() # finalize merge result = comm.gather(result, root=0) comm.Barrier() if rank == 0: print("Merge completed on %d cores" % (len(result))) results = sum(result, []) mdh = merge_data_handler() txt_out_rejection = "" for _mdh, _txt_out_rejection in results: mdh.extend(_mdh) txt_out_rejection += _txt_out_rejection # selet only indices with non-Inf non-Nan stats selections = flex.bool([ False if (math.isnan(r0) or math.isinf(r0) or math.isnan(r1) or math.isinf(r1)) else True for r0, r1 in zip(mdh.r_meas_div, mdh.r_meas_divisor) ]) mdh.reduce_by_selection(selections) its = intensities_scaler() mdh, txt_merge_mean_table = its.write_output(mdh, iparams, "test", "average") print(txt_merge_mean_table) # collect time profile comm.Barrier() end_time = MPI.Wtime() txt_time = "Elapsed Time (s):%10.2f\n" % (end_time - start_time) # write log output if rank == 0: print(txt_time) with open(os.path.join(iparams.run_no, "log.txt"), "w") as f: f.write(txt_out_input + txt_merge_mean_table + txt_time) with open(os.path.join(iparams.run_no, "rejections.txt"), "w") as f: f.write(txt_out_rejection) MPI.Finalize()
def master_run(args): flush = args['--flush'] # mkdir if not exist hit_dir = args['<hit-dir>'] if not os.path.isdir(hit_dir): os.mkdir(hit_dir) peak_file = args['<peak-file>'] peak_info = np.load(peak_file) batch_size = int(args['--batch-size']) nb_jobs = int(np.ceil(len(peak_info) / batch_size)) # collect jobs ids = np.array_split(np.arange(len(peak_info)), nb_jobs) jobs = [] for i in range(len(ids)): jobs.append(peak_info[ids[i]]) print('%d jobs, %d frames to be processed' % (nb_jobs, len(peak_info)), flush=flush) # other parameters buffer_size = int(args['--buffer-size']) update_freq = int(args['--update-freq']) # dispatch jobs job_id = 0 reqs = {} workers = set(range(1, size)) finished_workers = set() time_start = time.time() for worker in workers: if job_id < nb_jobs: job = jobs[job_id] else: job = [] # dummy job comm.isend(job, dest=worker) reqs[worker] = comm.irecv(buf=buffer_size, source=worker) print('job %d/%d --> slave %d' % (job_id, nb_jobs, worker), flush=flush) job_id += 1 while job_id < nb_jobs: stop = False workers -= finished_workers time.sleep(0.1) # take a break for worker in workers: finished, result = reqs[worker].test() if finished: if job_id < nb_jobs: print('job %d/%d --> slave %d' % (job_id, nb_jobs, worker), flush=flush) comm.isend(stop, dest=worker) comm.isend(jobs[job_id], dest=worker) reqs[worker] = comm.irecv(buf=buffer_size, source=worker) job_id += 1 else: stop = True comm.isend(stop, dest=worker) print('stop signal --> %d' % worker, flush=flush) finished_workers.add(worker) if job_id % update_freq == 0: # update stat progress = float(job_id) / nb_jobs * 100 stat_dict = { 'progress': '%.2f%%' % progress, 'duration/sec': 'not finished', 'total jobs': nb_jobs, } stat_file = os.path.join(hit_dir, 'peak2cxi.yml') with open(stat_file, 'w') as f: yaml.dump(stat_dict, f, default_flow_style=False) all_done = False while not all_done: time.sleep(0.1) workers -= finished_workers all_done = True for worker in workers: finished, result = reqs[worker].test() if finished: stop = True comm.isend(stop, dest=worker) finished_workers.add(worker) else: all_done = False time_end = time.time() duration = time_end - time_start stat_dict = { 'progress': 'done', 'duration/sec': duration, 'total jobs': nb_jobs, } stat_file = os.path.join(hit_dir, 'peak2cxi.yml') with open(stat_file, 'w') as f: yaml.dump(stat_dict, f, default_flow_style=False) print('All Done!', flush=flush) MPI.Finalize()
def exit(code): MPI.Finalize() sys.exit(code)
def receiveMessage(self, timeout=-1.0): """ Receives a *message* (a Python object) and returns a tuple (*senderTaskId*, *message*) The sender of the *message* can be identified through the *senderTaskId* object of class :class:`MPITaskID`. If *timeout* (seconds) is negative the function waits (blocks) until some message arrives. If *timeout*>0 seconds pass without receiving a message, an empty tuple is returned. Zero *timeout* performs a nonblocking receive which returns an empty tuple if no message is received. Note that timeout>0 is implemented a bit kludgy with a poll loop. In case of an error or discarded message returns ``None``. Handles transparently all * task exit notification messages * spawned function return value messages Discards all other low-level MPI messages that were not sent with the :meth:`sendMessage` method. """ # receive any message from any source received = receiveMPIMessage(timeout) # None means error. No message. if received is None: return None # Empty tuple means timeout. No message. if len(received) == 0: return () # Unpack tuple (source, msgTag, msg) = received # Is it a message? if msgTag == MPIMSGTAG_EXIT: # Request to exit spawn loop # Ignore if we are the spawner if vmStatus is None: mpi.Finalize() sys.exit() elif msgTag == MPIMSGTAG_MESSAGE: # Unpack task number and message (fromTaskNumber, toTaskNumber, message) = msg # Discard if toTaskNumber does not match our task number if toTaskNumber != MPI.taskNumber: return None else: return (MPITaskID(source, fromTaskNumber), message) elif msgTag == MPIMSGTAG_TASK_RETVAL: # Unpack task number, success flag, and response (taskNumber, success, response) = msg return (MPITaskID(source, taskNumber), MsgTaskResult(success, response)) elif msgTag == MPIMSGTAG_TASK_EXIT: # Unpack task number, success flag, and response taskNumber = msg if self.debug: DbgMsgOut( "MPI", "Task " + str(MPITaskID(source, taskNumber)) + " exit detected.") # Update vmStatus if message matches the taskNumber in vmStatus if MPI.vmStatus['slots'][source]['taskNumber'] == taskNumber: MPI.vmStatus['slots'][source]['taskNumber'] = -1 host = MPI.vmStatus['slots'][source]['host'] MPI.vmStatus['hosts'][host]['freeSlots'].add(source) MPI.vmStatus['usedSlots'].discard(source) taskID = MPITaskID(source, taskNumber) return (taskID, MsgTaskExit(taskID)) else: # Unknown message, discard and return None. return None
raise # Clear working environment mpi_vm.cleanupEnvironment(taskStorage) # Default response if sendBack is not True: response = None # Flush stdout so parent will receive it. sys.stdout.flush() # Send back response if requested if sendBack: comm.send((taskNumber, success, response), dest=0, tag=MPIMSGTAG_TASK_RETVAL) # Send back task exit message comm.send(taskNumber, dest=0, tag=MPIMSGTAG_TASK_EXIT) # At this point all MPIMSGTAG_MESSAGE messages are dropped. # They are processed only in spawned functions. # Worker exits here mpi.Finalize() sys.exit(0) else: # Spawner updateSpawnerInfo()
def input(path): """ Simulate some random current input :param path: the file for the configurations of the connection :return: """ #Start communication channels path_to_files = path #For NEST # Init connection print("Waiting for port details") info = MPI.INFO_NULL root = 0 port = MPI.Open_port(info) fport = open(path_to_files, "w+") fport.write(port) fport.close() print('wait connection ' + port) sys.stdout.flush() comm = MPI.COMM_WORLD.Accept(port, info, root) print('connect to ' + port) #test one rate status_ = MPI.Status() check = np.empty(1, dtype='b') starting = 1 while True: comm.Recv([check, 1, MPI.CXX_BOOL], source=0, tag=MPI.ANY_TAG, status=status_) print(" start to send") sys.stdout.flush() print(" status a tag ", status_.Get_tag()) sys.stdout.flush() if status_.Get_tag() == 0: # receive list ids size_list = np.empty(1, dtype='i') comm.Recv([size_list, 1, MPI.INT], source=0, tag=0, status=status_) print("size list id", size_list) sys.stdout.flush() list_id = np.empty(size_list, dtype='i') comm.Recv([list_id, size_list, MPI.INT], source=0, tag=0, status=status_) print(" id ", list_id) sys.stdout.flush() shape = np.random.randint(0, 50, 1, dtype='i') * 2 data = starting + np.random.rand(shape[0]) * 200 data = np.around(np.sort(np.array(data, dtype='d')), decimals=1) send_shape = np.array(np.concatenate([shape, shape]), dtype='i') comm.Send([send_shape, MPI.INT], dest=status_.Get_source(), tag=list_id[0]) print(" shape data ", shape) sys.stdout.flush() comm.Send([data, MPI.DOUBLE], dest=status_.Get_source(), tag=list_id[0]) print(" send data", data) sys.stdout.flush() comm.Recv([check, 1, MPI.CXX_BOOL], source=status_.Get_source(), tag=MPI.ANY_TAG, status=status_) print("end run") sys.stdout.flush() starting += 200 elif (status_.Get_tag() == 2): print("end simulation") sys.stdout.flush() print("ending time : ", starting) sys.stdout.flush() break else: print(status_.Get_tag()) break comm.Disconnect() MPI.Close_port(port) os.remove(path_to_files) print('exit') MPI.Finalize()
def cleanup(): if MPI.Is_initialized(): MPI.Finalize()
def main(config_file, config_prefix, input_path, population, template_paths, dataset_prefix, results_path, results_file_id, results_namespace_id, v_init, io_size, chunk_size, value_chunk_size, write_size, verbose): utils.config_logging(verbose) logger = utils.get_script_logger(os.path.basename(__file__)) comm = MPI.COMM_WORLD rank = comm.rank if rank == 0: logger.info('%i ranks have been allocated' % comm.size) if io_size == -1: io_size = comm.size if results_file_id is None: if rank == 0: result_file_id = uuid.uuid4() results_file_id = comm.bcast(results_file_id, root=0) if results_namespace_id is None: results_namespace_id = 'Cell Clamp Results' comm = MPI.COMM_WORLD np.seterr(all='raise') verbose = True params = dict(locals()) env = Env(**params) configure_hoc_env(env) if rank == 0: io_utils.mkout(env, env.results_file_path) env.comm.barrier() env.cell_selection = {} template_class = load_cell_template(env, population) if input_path is not None: env.data_file_path = input_path env.load_celltypes() synapse_config = env.celltypes[population]['synapses'] weights_namespaces = [] if 'weights' in synapse_config: has_weights = synapse_config['weights'] if has_weights: if 'weights namespace' in synapse_config: weights_namespaces.append(synapse_config['weights namespace']) elif 'weights namespaces' in synapse_config: weights_namespaces.extend(synapse_config['weights namespaces']) else: weights_namespaces.append('Weights') else: has_weights = False start_time = time.time() count = 0 gid_count = 0 attr_dict = {} if input_path is None: cell_path = env.data_file_path connectivity_path = env.connectivity_file_path else: cell_path = input_path connectivity_path = input_path for gid, morph_dict in NeuroH5TreeGen(cell_path, population, io_size=io_size, comm=env.comm, topology=True): local_time = time.time() if gid is not None: color = 0 comm0 = comm.Split(color, 0) logger.info('Rank %i gid: %i' % (rank, gid)) cell_dict = {'morph': morph_dict} synapses_iter = read_cell_attribute_selection(cell_path, population, [gid], 'Synapse Attributes', comm=comm0) _, synapse_dict = next(synapses_iter) cell_dict['synapse'] = synapse_dict if has_weights: cell_weights_iters = [ read_cell_attribute_selection(cell_path, population, [gid], weights_namespace, comm=comm0) for weights_namespace in weights_namespaces ] weight_dict = dict( zip_longest(weights_namespaces, cell_weights_iters)) cell_dict['weight'] = weight_dict (graph, a) = read_graph_selection(file_name=connectivity_path, selection=[gid], namespaces=['Synapses', 'Connections'], comm=comm0) cell_dict['connectivity'] = (graph, a) gid_count += 1 attr_dict[gid] = {} attr_dict[gid].update( cell_clamp.measure_passive(gid, population, v_init, env, cell_dict=cell_dict)) attr_dict[gid].update( cell_clamp.measure_ap(gid, population, v_init, env, cell_dict=cell_dict)) attr_dict[gid].update( cell_clamp.measure_ap_rate(gid, population, v_init, env, cell_dict=cell_dict)) attr_dict[gid].update( cell_clamp.measure_fi(gid, population, v_init, env, cell_dict=cell_dict)) else: color = 1 comm0 = comm.Split(color, 0) logger.info('Rank %i gid is None' % (rank)) comm0.Free() count += 1 if (results_path is not None) and (count % write_size == 0): append_cell_attributes(env.results_file_path, population, attr_dict, namespace=env.results_namespace_id, comm=env.comm, io_size=env.io_size, chunk_size=chunk_size, value_chunk_size=value_chunk_size) attr_dict = {} env.comm.barrier() if results_path is not None: append_cell_attributes(env.results_file_path, population, attr_dict, namespace=env.results_namespace_id, comm=env.comm, io_size=env.io_size, chunk_size=chunk_size, value_chunk_size=value_chunk_size) global_count = env.comm.gather(gid_count, root=0) MPI.Finalize()
def finalize(): MPI.Finalize()
def main(): if myrank == 0: time0 = time.time() print(' ') print('nprocs = ', nprocs) Nkx = 1 Nky = 1 k2p, k2i, i2k = init_k2p_k2i_i2k(Nkx, Nky, nprocs, myrank) kpp = np.count_nonzero(k2p == myrank) beta = 2.0 ARPES = False pump = 0 g2 = None omega = None tmax = 5.0 dim_embedding = 2 order = 6 ntau = 200 #nts = [400,800,1000] #nts = [10, 50, 100, 500] #nts = [50, 100, 500] #nts = [50, 100, 500] nts = [10, 50, 100, 500] diffs = {} diffs['nts'] = nts diffs['M'] = [] diffs['RI'] = [] diffs['R'] = [] diffs['L'] = [] # random H np.random.seed(1) norb = 4 Hmat = 0.1 * np.random.randn(norb, norb) + 0.1 * 1j * np.random.randn( norb, norb) Hmat += np.conj(Hmat).T Tmat = 0.1 * np.random.randn(norb, norb) + 0.1 * 1j * np.random.randn( norb, norb) Tmat += np.conj(Tmat).T # example H ''' norb = 3 e0 = -0.2 e1 = -0.1 e2 = 0.2 lamb1 = 1.0 lamb2 = 1.2 Hmat = np.array([[e0, lamb1, lamb2], [np.conj(lamb1), e1, 0], [np.conj(lamb2), 0, e2]], dtype=complex) ''' print('\nH : ') print(Hmat) print('') #def f(t): return np.cos(0.01*t) def f(t): return 1.0 for nt in nts: dt_fine = 0.1 * tmax / (nt - 1) #--------------------------------------------------------- # compute non-interacting G for the norb x norb problem norb = np.shape(Hmat)[0] def H(kx, ky, t): return Hmat + Tmat * np.cos(0.2 * t) #return np.array([[e0, lamb1, lamb2], # [np.conj(lamb1), e1, 0], # [np.conj(lamb2), 0, e2]], # dtype=complex) constants = (myrank, Nkx, Nky, ARPES, kpp, k2p, k2i, tmax, nt, beta, ntau, norb, pump) UksR, UksI, eks, fks, Rs, Ht = init_Uks(H, dt_fine, *constants, version='higher order') GexactM = compute_G0M(0, 0, UksI, eks, fks, Rs, *constants) Gexact = compute_G0R(0, 0, UksR, UksI, eks, fks, Rs, *constants) #------------------------------------------------------ # compute Sigma_embedding # Sigma = sum_{i,j} H0i(t) Gij(t,t') Hj0(t') norb = np.shape(Hmat)[0] - dim_embedding SigmaM = matsubara(beta, ntau, norb, -1) def H(kx, ky, t): return Hmat[dim_embedding:, dim_embedding:] + Tmat[ dim_embedding:, dim_embedding:] * np.cos(0.2 * t) constants = (myrank, Nkx, Nky, ARPES, kpp, k2p, k2i, tmax, nt, beta, ntau, norb, pump) UksR, UksI, eks, fks, Rs, _ = init_Uks(H, dt_fine, *constants, version='higher order') SM = compute_G0M(0, 0, UksI, eks, fks, Rs, *constants) SM.M = np.einsum('hi,mij,jk->mhk', Ht[0, 0, :dim_embedding, dim_embedding:], SM.M, Ht[0, 0, dim_embedding:, :dim_embedding]) #taus = np.linspace(0, beta, ntau) #plt(taus, [SM.M[:,0,0].real, SM.M[:,0,0].imag], 'SM') #exit() S = compute_G0R(0, 0, UksR, UksI, eks, fks, Rs, *constants) S.R = np.einsum('mhi,minj,njk->mhnk', Ht[0, :, :dim_embedding, dim_embedding:], S.R, Ht[0, :, dim_embedding:, :dim_embedding]) S.L = np.einsum('mhi,minj,njk->mhnk', Ht[0, :, :dim_embedding, dim_embedding:], S.L, Ht[0, :, dim_embedding:, :dim_embedding]) S.RI = np.einsum('mhi,minj,jk->mhnk', Ht[0, :, :dim_embedding, dim_embedding:], S.RI, Ht[0, 0, dim_embedding:, :dim_embedding]) #dt = 1.0*tmax/(nt-1) #ts = np.linspace(0, tmax, nt) SigmaM = matsubara(beta, ntau, dim_embedding, -1) SigmaM.M = SM.M Sigma = langreth(norb, nt, tmax, ntau, beta, -1) Sigma.L = S.L Sigma.R = S.R Sigma.RI = S.RI #------------------------------------------------------ # solve the embedding problem norb = dim_embedding def H(kx, ky, t): return Hmat[:dim_embedding, : dim_embedding] + Tmat[:dim_embedding, : dim_embedding] * np.cos(0.2 * t) constants = (myrank, Nkx, Nky, ARPES, kpp, k2p, k2i, tmax, nt, beta, ntau, norb, pump) #Ht = init_Ht(H, *constants) UksR, UksI, eks, fks, Rs, _ = init_Uks(H, dt_fine, *constants, version='higher order') G0M = compute_G0M(0, 0, UksI, eks, fks, Rs, *constants) G0 = compute_G0R(0, 0, UksR, UksI, eks, fks, Rs, *constants) integrator = integration.integrator(6, nt, beta, ntau) GM = matsubara(beta, ntau, norb, -1) integrator.dyson_matsubara(G0M, SigmaM, GM) print('differences Matsubara') diff = np.mean(abs(GM.M - GexactM.M[:, :dim_embedding, :dim_embedding])) print('diff = %1.3e' % diff) G = langreth(norb, nt, tmax, ntau, beta, -1) integrator.dyson_langreth(G0M, SigmaM, G0, Sigma, G) #------------------------------------------------------ # compute differences diff = np.mean(abs(GM.M - GexactM.M[:, :dim_embedding, :dim_embedding])) print('diff = %1.3e' % diff) diffs['M'].append(diff) diff = np.mean( abs(G.R - Gexact.R[:, :dim_embedding, :, :dim_embedding])) print('diff langreth R = %1.3e' % diff) diffs['R'].append(diff) diff = np.mean( abs(G.RI - Gexact.RI[:, :dim_embedding, :, :dim_embedding])) print('diff langreth RI = %1.3e' % diff) diffs['RI'].append(diff) diff = np.mean( abs(G.L - Gexact.L[:, :dim_embedding, :, :dim_embedding])) print('diff langreth L = %1.3e' % diff) diffs['L'].append(diff) #------------------------------------------------------ plt_diffs(diffs) if 'MPI' in sys.modules: MPI.Finalize()
def createmovie(Files, step=1000, extreme=0.0008, parallel=False, animation=False, videoname='animation.mp4', framerate=3): import matplotlib.animation as animation if parallel: from mpi4py import MPI comm = MPI.COMM_WORLD rank = comm.Get_rank() size = comm.Get_size() noFiles = len(Files) noFilesToConvert = noFiles / size remainderFiles = noFiles % size startFile = (rank * noFilesToConvert) endFile = startFile + noFilesToConvert try: if rank < noFiles: for i in range(startFile, endFile): print "index: " + ` i ` createimg(Files[i], step=step, extreme=extreme) if rank < remainderFiles: print "remainder index: " + ` size * noFilesToConvert + rank ` createimg(Files[size * noFilesToConvert + rank], step=step, extreme=extreme) except IndexError: print "Index Error: Array out of bounds." #comm.Barrier() MPI.Finalize() if animation: #if rank==0: inputPNGFiles = '/'.join(map( str, Files[0].split("/")[:-1])) + '/pltmoviedata%*.png' print videoname print '"ffmpeg" "-framerate" "' + str( framerate ) + '" "-i" "' + inputPNGFiles + '" "-s:v" "1280x720" "-c:v" "libx264" "-profile:v" "high" "-crf" "23" "-pix_fmt" "yuv420p" "-r" "30" ' + videoname os.system( '"ffmpeg" "-framerate" "' + str(framerate) + '" "-i" "' + inputPNGFiles + '" "-s:v" "1280x720" "-c:v" "libx264" "-profile:v" "high" "-crf" "23" "-pix_fmt" "yuv420p" "-r" "30" ' + videoname) #MPI.Finalize() else: print 'serial' for f in Files: print f try: createimg(f, step=step, extreme=extreme) except: continue if animation: inputPNGFiles = '/'.join(map( str, Files[0].split("/")[:-1])) + '/pltmoviedata%*.png' print videoname print '"ffmpeg" "-framerate" "' + str( framerate ) + '" "-i" "' + inputPNGFiles + '" "-s:v" "1280x720" "-c:v" "libx264" "-profile:v" "high" "-crf" "23" "-pix_fmt" "yuv420p" "-r" "30" ' + videoname os.system( '"ffmpeg" "-framerate" "' + str(framerate) + '" "-i" ' + inputPNGFiles + ' "-s:v" "1280x720" "-c:v" "libx264" "-profile:v" "high" "-crf" "23" "-pix_fmt" "yuv420p" "-r" "30" ' + videoname)
ax.set_ylabel(r"b", fontsize=16) ax.set_ylim(bottom=None, top=1.5, auto=False) fig.savefig("b_mixing_t%s" % str(i)) fig1 = plt.figure() ax1 = fig1.add_subplot(111) ax1.plot(rp1.x.value / r_tar, rad_fld1[:, i], "b-") ax1.plot(rp1.x.value / r_tar, rad_fld2[:, i], "r-") ax1.set_xlabel(r"$r^{*}$", fontsize=16) ax1.set_ylabel("Mass Fraction", fontsize=16) ax1.legend(labels=(r"SF$_{6}$", "Air"), loc="center right") fig1.savefig("mfrac_t%s" % str(i)) plt.close("all") comm.barrier() # Wait for all comm.Gather(p_time, time, root=0) # Gather time in one array comm.Gather(p_b, b, root=0) # Gather enstrophy in one array MPI.Finalize() # Finalize communication # Start ploting enstrophy in time if Pid == 0: radius = np.array(rp0.x.value) f = open('b_mixing.dat', 'w') f.write('#Radius (cm)\t\t\t\t\t b Parameter at each output file\n') np.savetxt('b_mixing.dat', np.concatenate((np.column_stack(radius).T, b_mix), axis=1)) f.close() plt.plot(time * 1.E06, b, 'b-') plt.xlabel(r'Time ($\mu s$)', fontsize=16) plt.ylabel(r'b', fontsize=16) #plt.yscale('log') plt.grid(True) plt.savefig('b_mixing.png')
def serve(self): """This method will handle control messages until an error happens or all children-worker processes exit. Handles controller finilization by cleaning, logging and returning if controller was successful or not. """ try: # spin spin spin self._success = 2 while self._workers: # spin while we have still children to watch for try: pid, status = os.waitpid(-1, os.WNOHANG) except OSError as exc: raise PlatoonError("while waiting for a child", exc) if pid != 0: # If a status change has happened at a child if os.WIFEXITED(status): self._workers.discard(pid) self._success = os.WEXITSTATUS(status) if self._success == 0: # A worker has terminated normally. Other workers # are expected to terminate normally too, so # continue. continue else: # A worker has not terminated normally due to an # error or an irrecoverable fault raise PlatoonError("A worker has exited with non-success code: {}".format(self._success)) else: # other status changes are not desirable raise PlatoonError("A worker has changed to a status other than exit.") try: query = self.csocket.recv_json(flags=zmq.NOBLOCK) except zmq.Again: # if a query has not happened, try again continue except zmq.ZMQError as exc: raise PlatoonError("while receiving using ZMQ socket", exc) # try default interface, it may raise PlatoonError response = self._handle_base_control(query['req'], query['worker_id'], query['req_info']) if response is None: response = self.handle_control(query['req'], query['worker_id'], query['req_info']) try: self.csocket.send_json(response) except zmq.ZMQError as exc: raise PlatoonError("while sending using ZMQ socket", exc) except PlatoonError as exc: # if platoon fails kill all children workers print(exc, file=sys.stderr) self._clean() except Exception as exc: print(PlatoonError("Unexpected exception", exc), file=sys.stderr) self._clean() else: if self._multinode and MPI: MPI.Finalize() finally: # Close sockets and unlink for shared memory self._close() return self._success
def run_mpi(path): ''' return the result of the simulation between the wanted time :param path: the folder of the simulation ''' # take the parameters of the simulation from the saving file with open(path + '/parameter.json') as f: parameters = json.load(f) param_co_simulation = parameters['param_co_simulation'] param_tvb_connection = parameters['param_tvb_connection'] param_tvb_coupling = parameters['param_tvb_coupling'] param_tvb_integrator = parameters['param_tvb_integrator'] param_tvb_model = parameters['param_tvb_model'] param_tvb_monitor = parameters['param_tvb_monitor'] result_path = parameters['result_path'] end = parameters['end'] # configuration of the logger logger = logging.getLogger('tvb') fh = logging.FileHandler(path + '/log/tvb.log') formatter = logging.Formatter( '%(asctime)s - %(name)s - %(levelname)s - %(message)s') fh.setFormatter(formatter) logger.addHandler(fh) if param_co_simulation['level_log'] == 0: fh.setLevel(logging.DEBUG) logger.setLevel(logging.DEBUG) elif param_co_simulation['level_log'] == 1: fh.setLevel(logging.INFO) logger.setLevel(logging.INFO) elif param_co_simulation['level_log'] == 2: fh.setLevel(logging.WARNING) logger.setLevel(logging.WARNING) elif param_co_simulation['level_log'] == 3: fh.setLevel(logging.ERROR) logger.setLevel(logging.ERROR) elif param_co_simulation['level_log'] == 4: fh.setLevel(logging.CRITICAL) logger.setLevel(logging.CRITICAL) #initialise the TVB param_tvb_monitor['path_result'] = result_path + '/tvb/' id_proxy = param_co_simulation['id_region_nest'] time_synch = param_co_simulation['synchronization'] path_send = result_path + "translation/send_to_tvb/" path_receive = result_path + "translation/receive_from_tvb/" simulator = init( param_tvb_connection, param_tvb_coupling, param_tvb_integrator, param_tvb_model, param_tvb_monitor, { 'id_proxy': np.array(id_proxy), 'time_synchronize': time_synch, 'path_send': path_send, 'path_receive': path_receive, }) # configure for saving result of TVB # check how many monitor it's used nb_monitor = param_tvb_monitor['Raw'] + param_tvb_monitor[ 'TemporalAverage'] + param_tvb_monitor['Bold'] + param_tvb_monitor[ 'SEEG'] # initialise the variable for the saving the result save_result = [] for i in range(nb_monitor): # the input output monitor save_result.append([]) #init MPI : data = None #data for the proxy node (no initialisation in the parameter) comm_receive = [] for i in id_proxy: comm_receive.append(init_mpi(path_send + str(i) + ".txt", logger)) comm_send = [] for i in id_proxy: comm_send.append(init_mpi(path_receive + str(i) + ".txt", logger)) # the loop of the simulation count = 0 count_save = 0 logger.info(f' TVB pid:{os.getpid()}') time.sleep(10) while count * time_synch < end: # FAT END POINT logger.info(" TVB receive data") #receive MPI data data_value = [] for comm in comm_receive: receive = receive_mpi(comm, logger) time_data = receive[0] data_value.append(receive[1]) data = np.empty((2, ), dtype=object) nb_step = np.rint((time_data[1] - time_data[0]) / param_tvb_integrator['sim_resolution']) nb_step_0 = np.rint( time_data[0] / param_tvb_integrator['sim_resolution'] ) + 1 # start at the first time step not at 0.0 time_data = np.arange(nb_step_0, nb_step_0 + nb_step, 1) * param_tvb_integrator['sim_resolution'] data_value = np.swapaxes(np.array(data_value), 0, 1)[:, :] if data_value.shape[0] != time_data.shape[0]: raise (Exception('Bad shape of data')) data[:] = [time_data, data_value] logger.info(" TVB start simulation " + str(count * time_synch)) nest_data = [] for result in simulator(simulation_length=time_synch, proxy_data=data): for i in range(nb_monitor): if result[i] is not None: save_result[i].append(result[i]) nest_data.append([result[-1][0], result[-1][1]]) #save the result in file if result[-1][0] >= param_tvb_monitor['save_time'] * ( count_save + 1): #check if the time for saving at some time step np.save( param_tvb_monitor['path_result'] + '/step_' + str(count_save) + '.npy', save_result) save_result = [] for i in range(nb_monitor): save_result.append([]) count_save += 1 logger.info(" TVB end simulation") # prepare to send data with MPI nest_data = np.array(nest_data) time1 = [nest_data[0, 0], nest_data[-1, 0]] rate = np.concatenate(nest_data[:, 1]) for index, comm in enumerate(comm_send): send_mpi(comm, time1, rate[:, index] * 1e3) #increment of the loop count += 1 # save the last part logger.info(" TVB finish") np.save( param_tvb_monitor['path_result'] + '/step_' + str(count_save) + '.npy', save_result) for index, comm in enumerate(comm_send): end_mpi( comm, result_path + "/translation/receive_from_tvb/" + str(id_proxy[index]) + ".txt", True, logger) for index, comm in enumerate(comm_receive): end_mpi( comm, result_path + "/translation/send_to_tvb/" + str(id_proxy[index]) + ".txt", False, logger) MPI.Finalize() # ending with MPI logger.info(" TVB exit") return
def run_qufit(dataFile, modelNum, outDir="", polyOrd=3, nBits=32, noStokesI=False, showPlots=False, debug=False, verbose=False): """Function controlling the fitting procedure.""" # Get the processing environment if mpiSwitch: mpiComm = MPI.COMM_WORLD mpiSize = mpiComm.Get_size() mpiRank = mpiComm.Get_rank() else: mpiSize = 1 mpiRank = 0 # Default data types dtFloat = "float" + str(nBits) dtComplex = "complex" + str(2 * nBits) # Output prefix is derived from the input file name prefixOut, ext = os.path.splitext(dataFile) nestOut = prefixOut + "_nest/" if mpiRank == 0: if os.path.exists(nestOut): shutil.rmtree(nestOut, True) os.mkdir(nestOut) if mpiSwitch: mpiComm.Barrier() # Read the data file in the root process if mpiRank == 0: dataArr = np.loadtxt(dataFile, unpack=True, dtype=dtFloat) else: dataArr = None if mpiSwitch: dataArr = mpiComm.bcast(dataArr, root=0) # Parse the data array # freq_Hz, I, Q, U, dI, dQ, dU try: (freqArr_Hz, IArr, QArr, UArr, dIArr, dQArr, dUArr) = dataArr if mpiRank == 0: print("\nFormat [freq_Hz, I, Q, U, dI, dQ, dU]") except Exception: # freq_Hz, Q, U, dQ, dU try: (freqArr_Hz, QArr, UArr, dQArr, dUArr) = dataArr if mpiRank == 0: print("\nFormat [freq_Hz, Q, U, dQ, dU]") noStokesI = True except Exception: print("\nError: Failed to parse data file!") if debug: print(traceback.format_exc()) if mpiSwitch: MPI.Finalize() return # If no Stokes I present, create a dummy spectrum = unity if noStokesI: if mpiRank == 0: print("Note: no Stokes I data - assuming fractional polarisation.") IArr = np.ones_like(QArr) dIArr = np.zeros_like(QArr) # Convert to GHz for convenience freqArr_GHz = freqArr_Hz / 1e9 lamSqArr_m2 = np.power(C / freqArr_Hz, 2.0) # Fit the Stokes I spectrum and create the fractional spectra if mpiRank == 0: dataArr = create_frac_spectra(freqArr=freqArr_GHz, IArr=IArr, QArr=QArr, UArr=UArr, dIArr=dIArr, dQArr=dQArr, dUArr=dUArr, polyOrd=polyOrd, verbose=True) else: dataArr = None if mpiSwitch: dataArr = mpiComm.bcast(dataArr, root=0) (IModArr, qArr, uArr, dqArr, duArr, IfitDict) = dataArr # Plot the data and the Stokes I model fit if mpiRank == 0: print("Plotting the input data and spectral index fit.") freqHirArr_Hz = np.linspace(freqArr_Hz[0], freqArr_Hz[-1], 10000) IModHirArr = poly5(IfitDict["p"])(freqHirArr_Hz / 1e9) specFig = plt.figure(figsize=(10, 6)) plot_Ipqu_spectra_fig(freqArr_Hz=freqArr_Hz, IArr=IArr, qArr=qArr, uArr=uArr, dIArr=dIArr, dqArr=dqArr, duArr=duArr, freqHirArr_Hz=freqHirArr_Hz, IModArr=IModHirArr, fig=specFig) # Use the custom navigation toolbar try: specFig.canvas.toolbar.pack_forget() CustomNavbar(specFig.canvas, specFig.canvas.toolbar.window) except Exception: pass # Display the figure if showPlots: specFig.canvas.draw() specFig.show() #-------------------------------------------------------------------------# # Load the model and parameters from the relevant file if mpiSwitch: mpiComm.Barrier() if mpiRank == 0: print("\nLoading the model from 'models_ns/m%d.py' ..." % modelNum) mod = imp.load_source("m%d" % modelNum, "models_ns/m%d.py" % modelNum) global model model = mod.model # Let's time the sampler if mpiRank == 0: startTime = time.time() # Unpack the inParms structure parNames = [x["parname"] for x in mod.inParms] labels = [x["label"] for x in mod.inParms] values = [x["value"] for x in mod.inParms] bounds = [x["bounds"] for x in mod.inParms] priorTypes = [x["priortype"] for x in mod.inParms] wraps = [x["wrap"] for x in mod.inParms] nDim = len(priorTypes) fixedMsk = [0 if x == "fixed" else 1 for x in priorTypes] nFree = sum(fixedMsk) # Set the prior function given the bounds of each parameter prior = prior_call(priorTypes, bounds, values) # Set the likelihood function given the data lnlike = lnlike_call(parNames, lamSqArr_m2, qArr, dqArr, uArr, duArr) # Let's time the sampler if mpiRank == 0: startTime = time.time() # Run nested sampling using PyMultiNest nestArgsDict = merge_two_dicts(init_mnest(), mod.nestArgsDict) nestArgsDict["n_params"] = nDim nestArgsDict["n_dims"] = nDim nestArgsDict["outputfiles_basename"] = nestOut nestArgsDict["LogLikelihood"] = lnlike nestArgsDict["Prior"] = prior pmn.run(**nestArgsDict) # Do the post-processing on one processor if mpiSwitch: mpiComm.Barrier() if mpiRank == 0: # Query the analyser object for results aObj = pmn.Analyzer(n_params=nDim, outputfiles_basename=nestOut) statDict = aObj.get_stats() fitDict = aObj.get_best_fit() endTime = time.time() # NOTE: The Analyser methods do not work well for parameters with # posteriors that overlap the wrap value. Use np.percentile instead. pMed = [None] * nDim for i in range(nDim): pMed[i] = statDict["marginals"][i]['median'] lnLike = fitDict["log_likelihood"] lnEvidence = statDict["nested sampling global log-evidence"] dLnEvidence = statDict["nested sampling global log-evidence error"] # Get the best-fitting values & uncertainties directly from chains chains = aObj.get_equal_weighted_posterior() chains = wrap_chains(chains, wraps, bounds, pMed) p = [None] * nDim errPlus = [None] * nDim errMinus = [None] * nDim g = lambda v: (v[1], v[2] - v[1], v[1] - v[0]) for i in range(nDim): p[i], errPlus[i], errMinus[i] = \ g(np.percentile(chains[:, i], [15.72, 50, 84.27])) # Calculate goodness-of-fit parameters nData = 2.0 * len(lamSqArr_m2) dof = nData - nFree - 1 chiSq = chisq_model(parNames, p, lamSqArr_m2, qArr, dqArr, uArr, duArr) chiSqRed = chiSq / dof AIC = 2.0 * nFree - 2.0 * lnLike AICc = 2.0 * nFree * (nFree + 1) / (nData - nFree - 1) - 2.0 * lnLike BIC = nFree * np.log(nData) - 2.0 * lnLike # Summary of run print("") print("-" * 80) print("SUMMARY OF SAMPLING RUN:") print("#-PROCESSORS = %d" % mpiSize) print("RUN-TIME = %.2f" % (endTime - startTime)) print("DOF = %d" % dof) print("CHISQ: = %.3g" % chiSq) print("CHISQ RED = %.3g" % chiSqRed) print("AIC: = %.3g" % AIC) print("AICc = %.3g" % AICc) print("BIC = %.3g" % BIC) print("ln(EVIDENCE) = %.3g" % lnEvidence) print("dLn(EVIDENCE) = %.3g" % dLnEvidence) print("") print("-" * 80) print("RESULTS:\n") for i in range(len(p)): print("%s = %.4g (+%3g, -%3g)" % \ (parNames[i], p[i], errPlus[i], errMinus[i])) print("-" * 80) print("") # Create a save dictionary and store final p in values outFile = prefixOut + "_m%d_nest.json" % modelNum IfitDict["p"] = toscalar(IfitDict["p"].tolist()) saveDict = { "parNames": toscalar(parNames), "labels": toscalar(labels), "values": toscalar(p), "errPlus": toscalar(errPlus), "errMinus": toscalar(errMinus), "bounds": toscalar(bounds), "priorTypes": toscalar(priorTypes), "wraps": toscalar(wraps), "dof": toscalar(dof), "chiSq": toscalar(chiSq), "chiSqRed": toscalar(chiSqRed), "AIC": toscalar(AIC), "AICc": toscalar(AICc), "BIC": toscalar(BIC), "IfitDict": IfitDict } json.dump(saveDict, open(outFile, "w")) print("Results saved in JSON format to:\n '%s'\n" % outFile) # Plot the data and best-fitting model lamSqHirArr_m2 = np.linspace(lamSqArr_m2[0], lamSqArr_m2[-1], 10000) freqHirArr_Hz = C / np.sqrt(lamSqHirArr_m2) IModArr = poly5(IfitDict["p"])(freqHirArr_Hz / 1e9) pDict = {k: v for k, v in zip(parNames, p)} quModArr = model(pDict, lamSqHirArr_m2) specFig.clf() plot_Ipqu_spectra_fig(freqArr_Hz=freqArr_Hz, IArr=IArr, qArr=qArr, uArr=uArr, dIArr=dIArr, dqArr=dqArr, duArr=duArr, freqHirArr_Hz=freqHirArr_Hz, IModArr=IModArr, qModArr=quModArr.real, uModArr=quModArr.imag, fig=specFig) specFig.canvas.draw() # Plot the posterior samples in a corner plot chains = aObj.get_equal_weighted_posterior() chains = wrap_chains(chains, wraps, bounds, p)[:, :nDim] iFixed = [i for i, e in enumerate(fixedMsk) if e == 0] chains = np.delete(chains, iFixed, 1) for i in sorted(iFixed, reverse=True): del (labels[i]) del (p[i]) cornerFig = corner.corner(xs=chains, labels=labels, range=[0.99999] * nFree, truths=p, quantiles=[0.1572, 0.8427], bins=30) # Save the figures outFile = nestOut + "fig_m%d_specfit.pdf" % modelNum specFig.savefig(outFile) print("Plot of best-fitting model saved to:\n '%s'\n" % outFile) outFile = nestOut + "fig_m%d_corner.pdf" % modelNum cornerFig.savefig(outFile) print("Plot of posterior samples saved to \n '%s'\n" % outFile) # Display the figures if showPlots: specFig.show() cornerFig.show() print("> Press <RETURN> to exit ...", end="") sys.stdout.flush() input() # Clean up plt.close(specFig) plt.close(cornerFig) # Clean up MPI environment if mpiSwitch: MPI.Finalize()
def main(): # Get the processing environment if mpiSwitch: mpiComm = MPI.COMM_WORLD mpiSize = mpiComm.Get_size() mpiRank = mpiComm.Get_rank() else: mpiRank = 0 # Let's time the sampler if mpiRank == 0: startTime = time.time() # Create the output directory if mpiRank == 0: if os.path.exists(outDir): shutil.rmtree(outDir, True) os.mkdir(outDir) if mpiSwitch: mpiComm.Barrier() # Read in the spectrum if mpiRank == 0: specArr = np.loadtxt(specDat, dtype="float64", unpack=True) else: specArr = None if mpiSwitch: specArr = mpiComm.bcast(specArr, root=0) xArr = specArr[0] / 1e9 # GHz -> Hz for this dataset yArr = specArr[1] dyArr = specArr[4] # Set the prior function given the bounds of each parameter prior = prior_call(priorLst) nDim = len(priorLst) # Set the likelihood function lnlike = lnlike_call(xArr, yArr, dyArr) # Run nested sampling argsDict = init_mnest() argsDict["n_params"] = nDim argsDict["n_dims"] = nDim argsDict["outputfiles_basename"] = outDir + "/" argsDict["n_live_points"] = nPoints argsDict["verbose"] = verbose argsDict["LogLikelihood"] = lnlike argsDict["Prior"] = prior pmn.run(**argsDict) # Do the post-processing on one processor if mpiSwitch: mpiComm.Barrier() if mpiRank == 0: # Query the analyser object for results aObj = pmn.Analyzer(n_params=nDim, outputfiles_basename=outDir + "/") statDict = aObj.get_stats() fitDict = aObj.get_best_fit() endTime = time.time() # DEBUG if debug: print("\n", "-" * 80) print("GET_STATS() OUTPUT") for k, v in statDict.iteritems(): print("\n", k, "\n", v) print("\n", "-" * 80) print("GET_BEST_FIT() OUTPUT") for k, v in fitDict.iteritems(): print("\n", k, "\n", v) # Get the best fitting values and uncertainties p = fitDict["parameters"] lnLike = fitDict["log_likelihood"] lnEvidence = statDict["nested sampling global log-evidence"] dLnEvidence = statDict["nested sampling global log-evidence error"] med = [None] * nDim dp = [[None, None]] * nDim for i in range(nDim): dp[i] = statDict["marginals"][i]['1sigma'] med[i] = statDict["marginals"][i]['median'] # Calculate goodness-of-fit parameters nSamp = len(xArr) dof = nSamp - nDim - 1 chiSq = calc_chisq(p, xArr, yArr, dyArr) chiSqRed = chiSq / dof AIC = 2.0 * nDim - 2.0 * lnLike AICc = 2.0 * nDim * (nDim + 1) / (nSamp - nDim - 1) - 2.0 * lnLike BIC = nDim * np.log(nSamp) - 2.0 * lnLike # Summary of run print("") print("-" * 80) print("SUMMARY OF SAMPLING RUN:") print("NUM-PROCESSORS: %d" % mpiSize) print("RUN-TIME: %.2f" % (endTime - startTime)) print("DOF:", dof) print("CHISQ:", chiSq) print("CHISQ RED:", chiSqRed) print("AIC:", AIC) print("AICc", AICc) print("BIC", BIC) print("ln(EVIDENCE)", lnEvidence) print("dLn(EVIDENCE)", dLnEvidence) print("") print('-' * 80) print("BEST FIT PARAMETERS & MARGINALS:") for i in range(len(p)): print("p%d = %.4f [%.4f, %.4f]" % \ (i, p[i], dp[i][0], dp[i][1])) # Plot the data and best fit dataFig = plot_model(p, xArr, yArr, dyArr) dataFig.savefig(outDir + "/fig_best_fit.pdf") # Plot the triangle plot chains = aObj.get_equal_weighted_posterior() cornerFig = corner.corner(xs=chains[:, :nDim], labels=["p" + str(i) for i in range(nDim)], range=[0.99999] * nDim, truths=p, bins=30) cornerFig.savefig(outDir + "/fig_corner.pdf") # Show the figures if showPlots: dataFig.show() cornerFig.show() print("Press <return> to continue ...", end="") raw_input() # Clean up plt.close(dataFig) plt.close(cornerFig) # Clean up MPI environment if mpiSwitch: MPI.Finalize()
def main(comm = MPI.COMM_WORLD): numprocs = comm.size rank = comm.Get_rank() conf_file = open ('Configuration.txt',"r") lineList = conf_file.readlines() conf_file.close() input_folder = lineList[-1]+"/input_files/" with open(lineList[-1]+"/params.json") as json_file: data = json.load(json_file) if(data["seed"] == -1): data["seed"] = time seed(data["seed"]) output_folder = os.path.dirname(os.path.dirname(input_folder)) output_folder += "/SimulatedGraph/" try: if not os.path.exists(output_folder): os.makedirs(output_folder) except: pass temp_folder = os.path.dirname(os.path.dirname(input_folder)) temp_folder += "/temp/" if rank == 0: print(" - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -") print("Number of Processors: ", numprocs) print(" - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -") org_graphList = list(find_all_filenames(input_folder)) graphList = [] seedlist = [] for n in range(numprocs): seedlist.append(randint(1,10000)) graphList.append(choice(org_graphList)) startIndex = [] upperlevelNodes = [] count = 0 startIndex.append(count) for e in graphList: count += get_size(e, temp_folder) startIndex.append(count) upperlevelNodes.extend(sample(range(startIndex[-2], startIndex[-1]), 3)) else: startIndex = None graphList = None seedlist = None startIndex = comm.bcast(startIndex, root=0) graphList = comm.bcast(graphList, root=0) seedlist = comm.bcast(seedlist, root=0) create_graph(temp_folder,graphList[rank], seed = seedlist[rank], startpoint = startIndex[rank]) if rank == 0: #overallGraph = nx.MultiDiGraph() #print(upperlevelNodes) #upperlevelNodes = list(chain(*upperlevelNodes)) degree = nx.utils.powerlaw_sequence(len(upperlevelNodes), 2) Outdegree = [int(round(e)) for e in degree] Indegree= [int(round(e)) for e in degree] shuffle(Indegree) edgesList = [] outfile = open("SimulatedGraph/upperlevelGraph.csv", 'wb') while sum(Outdegree) >= 1: outIndex = Outdegree.index(max(Outdegree)) temp = Indegree[outIndex] Indegree[outIndex] = 0 for i in range(Outdegree[outIndex]): maxDegreeIndex = Indegree.index(max(Indegree)) outfile.write((to_edge(temp_folder,upperlevelNodes[outIndex], upperlevelNodes[maxDegreeIndex]) + '\n').encode("utf-8")) Indegree[maxDegreeIndex] -= 1 Outdegree[outIndex] = 0 Indegree[outIndex] = temp outfile.close() #nx.write_graphml(overallGraph, "upperlevelGraph.graphml") MPI.Finalize()
def cleanup(): global init_by_devito if init_by_devito and MPI.Is_initialized() and not MPI.Is_finalized(): MPI.Finalize()
def ales244_static_les_test(pp=None, sp=None): """ Arguments: ---------- pp: (optional) program parameters, parsed by argument parser provided by this file sp: (optional) solver parameters, parsed by spectralLES.parser """ if comm.rank == 0: print("\n----------------------------------------------------------") print("MPI-parallel Python spectralLES simulation of problem \n" "`Homogeneous Isotropic Turbulence' started with " "{} tasks at {}.".format(comm.size, timeofday())) print("----------------------------------------------------------") # if function called without passing in parsed arguments, then parse # the arguments from the command line if pp is None: pp = hit_parser.parse_known_args()[0] if sp is None: sp = spectralLES.parser.parse_known_args()[0] if comm.rank == 0: print('\nProblem Parameters:\n-------------------') for k, v in vars(pp).items(): print(k, v) print('\nSpectralLES Parameters:\n-----------------------') for k, v in vars(sp).items(): print(k, v) print("\n----------------------------------------------------------\n") assert len(set(pp.N)) == 1, ('Error, this beta-release HIT program ' 'requires equal mesh dimensions') N = pp.N[0] assert len(set(pp.L)) == 1, ('Error, this beta-release HIT program ' 'requires equal domain dimensions') L = pp.L[0] if N % comm.size > 0: if comm.rank == 0: print('Error: job started with improper number of MPI tasks for ' 'the size of the data specified!') MPI.Finalize() sys.exit(1) # ------------------------------------------------------------------------- # Configure the solver, writer, and analyzer # -- construct solver instance from sp's attribute dictionary solver = ales244_solver(comm, **vars(sp)) U_hat = solver.U_hat U = solver.U omega = solver.omega K = solver.K # -- configure solver instance to solve the NSE with the vorticity # formulation of the advective term, linear forcing, and # the ales244 SGS model solver.computeAD = solver.computeAD_vorticity_form Sources = [ solver.computeSource_linear_forcing, solver.computeSource_ales244_SGS ] H_244 = np.loadtxt('h_ij.dat', usecols=(1, 2, 3, 4, 5, 6), unpack=True) kwargs = {'H_244': H_244, 'dvScale': None} # -- form HIT initial conditions from either user-defined values or # physics-based relationships using epsilon and L Urms = 1.2 * (pp.epsilon * L)**(1. / 3.) # empirical coefficient Einit = getattr(pp, 'Einit', None) or Urms**2 # == 2*KE_equilibrium kexp = getattr(pp, 'kexp', None) or -1. / 3. # -> E(k) ~ k^(-2./3.) kpeak = getattr(pp, 'kpeak', None) or N // 4 # ~ kmax/2 # -- currently using a fixed random seed of comm.rank for testing solver.initialize_HIT_random_spectrum(Einit, kexp, kpeak, rseed=comm.rank) # -- configure the writer and analyzer from both pp and sp attributes writer = mpiWriter(comm, odir=pp.odir, N=N) analyzer = mpiAnalyzer(comm, odir=pp.adir, pid=pp.pid, L=L, N=N, config='hit', method='spectral') Ek_fmt = "\widehat{{{0}}}^*\widehat{{{0}}}".format # ------------------------------------------------------------------------- # Setup the various time and IO counters tauK = sqrt(pp.nu / pp.epsilon) # Kolmogorov time-scale taul = 0.2 * L * sqrt(3) / Urms # 0.2 is empirical coefficient c = pp.cfl * sqrt(2 * Einit) / Urms dt = solver.new_dt_constant_nu(c) # use as estimate if pp.tlimit == np.Inf: # put a very large but finite limit on the run pp.tlimit = 262 * taul # such as (256+6)*tau, for spinup and 128 samples dt_rst = getattr(pp, 'dt_rst', None) or 2 * taul dt_spec = getattr(pp, 'dt_spec', None) or max(0.1 * taul, tauK, 10 * dt) dt_drv = getattr(pp, 'dt_drv', None) or max(tauK, 10 * dt) t_sim = t_rst = t_spec = t_drv = 0.0 tstep = irst = ispec = 0 # ------------------------------------------------------------------------- # Run the simulation while t_sim < pp.tlimit + 1.e-8: # -- Update the dynamic dt based on CFL constraint dt = solver.new_dt_constant_nu(pp.cfl) t_test = t_sim + 0.5 * dt # -- output log messages every step if needed/wanted KE = 0.5 * comm.allreduce(psum(np.square(U))) / solver.Nx if comm.rank == 0: print("cycle = %7d time = %15.8e dt = %15.8e KE = %15.8e" % (tstep, t_sim, dt, KE)) # - output snapshots and data analysis products if t_test >= t_spec: analyzer.spectral_density(U_hat, '%3.3d_u' % ispec, 'velocity PSD\t%s' % Ek_fmt('u_i')) irfft3(comm, 1j * (K[0] * U_hat[1] - K[1] * U_hat[0]), omega[2]) irfft3(comm, 1j * (K[2] * U_hat[0] - K[0] * U_hat[2]), omega[1]) irfft3(comm, 1j * (K[1] * U_hat[2] - K[2] * U_hat[1]), omega[0]) analyzer.spectral_density(omega, '%3.3d_omga' % ispec, 'vorticity PSD\t%s' % Ek_fmt('\omega_i')) t_spec += dt_spec ispec += 1 if t_test >= t_rst: writer.write_scalar('Velocity1_%3.3d.rst' % irst, U[0], np.float64) writer.write_scalar('Velocity2_%3.3d.rst' % irst, U[1], np.float64) writer.write_scalar('Velocity3_%3.3d.rst' % irst, U[2], np.float64) t_rst += dt_rst irst += 1 # -- Update the forcing pattern if t_test >= t_drv: # call solver.computeSource_linear_forcing to compute dvScale only kwargs['dvScale'] = Sources[0](computeRHS=False) t_drv += dt_drv if comm.rank == 0: print("------ updated linear forcing pattern ------") # -- integrate the solution forward in time solver.RK4_integrate(dt, *Sources, **kwargs) t_sim += dt tstep += 1 sys.stdout.flush() # forces Python 3 to flush print statements # ------------------------------------------------------------------------- # Finalize the simulation irfft3(comm, 1j * (K[0] * U_hat[1] - K[1] * U_hat[0]), omega[2]) irfft3(comm, 1j * (K[2] * U_hat[0] - K[0] * U_hat[2]), omega[1]) irfft3(comm, 1j * (K[1] * U_hat[2] - K[2] * U_hat[1]), omega[0]) analyzer.spectral_density(U_hat, '%3.3d_u' % ispec, 'velocity PSD\t%s' % Ek_fmt('u_i')) analyzer.spectral_density(omega, '%3.3d_omga' % ispec, 'vorticity PSD\t%s' % Ek_fmt('\omega_i')) writer.write_scalar('Velocity1_%3.3d.rst' % irst, U[0], np.float64) writer.write_scalar('Velocity2_%3.3d.rst' % irst, U[1], np.float64) writer.write_scalar('Velocity3_%3.3d.rst' % irst, U[2], np.float64) return
def __init__(self, comm, odir, ndims, decomp, N, nh, byteswap): # DEFINE THE INSTANCE VARIABLES # "Protected" variables masked by property method # Global variables self._odir = odir self._comm = comm self._ndims = ndims self._byteswap = byteswap if decomp is None: decomp = list([ True, ]) decomp.extend([False] * (ndims - 1)) self._decomp = decomp elif len(decomp) == ndims: self._decomp = decomp else: raise IndexError("Either len(decomp) must be ndims or " "decomp must be None") if np.iterable(N): if len(N) == 1: self._nx = np.array(list(N) * ndims, dtype=int) elif len(N) == ndims: self._nx = np.array(N, dtype=int) else: raise IndexError("The length of N must be either 1 or ndims") else: self._nx = np.array([int(N)] * ndims, dtype=int) if nh is None: self._nh = np.zeros(ndims, dtype=int) elif len(nh) == ndims: self._nh = np.array(nh, dtype=int) else: raise IndexError("Either len(nh) must be ndims or nh " "must be None") # Local subdomain variables self._nnx = self._nx.copy() self._ixs = np.zeros(ndims, dtype=int) self._ixe = self._nx.copy() if sum(self._decomp) == 1: # 1D domain decomposition (plates in 3D, pencils in 2D) self._nnx[0] = self._nx[0] / comm.size self._ixs[0] = self._nnx[0] * comm.rank self._ixe[0] = self._ixs[0] + self._nnx[0] else: raise AssertionError("mpiReader can't yet handle anything " "but 1D Decomposition.") # MAKE ODIR, CHECKING IF IT IS A VALID PATH. if comm.rank == 0: try: os.makedirs(odir) except OSError as e: if not os.path.isdir(odir): raise e else: status = e finally: if os.path.isdir(odir): status = 0 else: status = None status = comm.bcast(status) if status != 0: MPI.Finalize() sys.exit(999) return
vsign = 1 startsim = timeit.default_timer() # send record to clock service addrList=metaclient.getServerAddr() addr = addrList[0] metaclient.Recordtime(addr, "SIM") for t in range (iteration): moveToCenter = False if (t>=changeVPeriod and t%changeVPeriod==0): moveToCenter = True updateGridValueFake(gridList,moveToCenter) putDataToDataSpaces(gridListNew,t) ds.finalize() MPI.Finalize() endsim = timeit.default_timer() print("time span") print (endsim-startsim)
def main(): comm = MPI.COMM_WORLD rank = comm.Get_rank() size = comm.Get_size() global cutoff, dimensions, dataset, num_clusters, data if rank == 0: print("Enter the number of clusters you want to make:") num_clusters = raw_input() num_clusters = int(num_clusters) with open('modified_video_game_sales.csv', 'rb') as f: reader = csv.reader(f) dataset = list(reader) initial = [] dataset.pop(0) data = dataset for i in xrange(num_clusters): initial.append(dataset[i]) # dataset.pop(0) num_points = len(dataset) dimensions = len(dataset[0]) else: initial = None data = None cutoff = 0.2 loop = 0 compare_cutoff = True while compare_cutoff: loop += 1 clusters = [] strpt = comm.bcast(initial, root=0) recv = comm.scatter(data, root=0) least = eucl_distance(strpt[0], recv) for i in xrange(len(strpt)): clusters.append([]) lpoint = 0 for i in xrange(len(strpt)): a = eucl_distance(strpt[i], recv) if a < least: least = a lpoint = i clusters[lpoint] = recv fc = comm.gather(clusters, root=0) if rank == 0: nfc = [] no = [] for i in xrange(len(initial)): nfc.append(['0', '0']) no.append('0') for i in xrange(len(fc)): for j in xrange(len(fc[i])): if len(fc[i][j]) != 0: no[j] = int(no[j]) + 1 for k in xrange(len(fc[i][j])): nfc[j][k] = float(nfc[j][k]) + float(fc[i][j][k]) for i in xrange(len(nfc)): for j in xrange(len(nfc[i])): nfc[i][j] = float(nfc[i][j]) / float(no[i]) flag = 0 for i in xrange(len(nfc)): if eucl_distance(nfc[i], initial[i]) > cutoff: flag += 1 if flag == 0: compare_cutoff = False print(nfc) compare_cutoff = comm.bcast(compare_cutoff, root=0) print(fc) print("Execution time %s seconds" % (time.time() - start_time)) print(loop) else: initial = nfc MPI.Finalize() exit(0)