def print_log(pre, time, gravity, E0=0.0 | nbody_system.energy, cpu0=0.0, wall0=0.0): cpu = cputime() wall = wallclocktime() N = len(gravity.particles) M = gravity.total_mass U = gravity.potential_energy T = gravity.kinetic_energy Etop = T + U E = Etop if E0 == 0 | nbody_system.energy: E0 = E Rvir = -0.5 * M * M / U Q = -T / U com = pa.center_of_mass(gravity.particles) comv = pa.center_of_mass_velocity(gravity.particles) if N > 15: dcen,rcore,rhocore \ = pa.densitycentre_coreradius_coredens(gravity.particles) else: dcen = com rcore = zero rhocore = zero cmx, cmy, cmz = dcen lagr, mf = pa.LagrangianRadii(gravity.particles, cm=dcen) # no units! print '' print pre + "time=", time.number print pre + "cpu=", cpu - cpu0 print pre + "wall=", wall - wall0 print pre + "Ntot=", N print pre + "mass=", M.number print pre + "Etot=", E.number print pre + "dE/E=", E / E0 - 1 print pre + "Rvir=", Rvir.number print pre + "Qvir=", Q cmx, cmy, cmz = com print pre + "cmpos[3]= %.8f %.8f %.8f" % (cmx.number, cmy.number, cmz.number) cmx, cmy, cmz = comv print pre + "cmvel[3]= %.8f %.8f %.8f" % (cmx.number, cmy.number, cmz.number) cmx, cmy, cmz = dcen print pre + "dcpos[3]= %.8f %.8f %.8f" % (cmx.number, cmy.number, cmz.number) print pre + "Rcore=", rcore.number print pre + "Mlagr[9]=", for m in mf: print "%.4f" % (m), print '' print pre + "Rlagr[9]=", for r in lagr.number: print "%.8f" % (r), print '' sys.stdout.flush() return E, cpu, wall
def print_log(pre, time, gravity, E0 = 0.0 | nbody_system.energy, cpu0 = 0.0, wall0 = 0.0): # Standard log output. cpu = cputime() wall = wallclocktime() N = len(gravity.particles) M = gravity.total_mass U = gravity.potential_energy T = gravity.kinetic_energy Etop = T + U E = Etop if E0 == 0 | nbody_system.energy: E0 = E Rvir = -0.5*M*M/U Q = -T/U com = pa.center_of_mass(gravity.particles) comv = pa.center_of_mass_velocity(gravity.particles) if N >= 100: dcen,rcore,rhocore \ = pa.densitycentre_coreradius_coredens(gravity.particles) cmx,cmy,cmz = dcen lagr,mf = pa.LagrangianRadii(gravity.particles, cm=dcen) # no units! print '' print pre+"time=", time.number print pre+"cpu=", cpu-cpu0 print pre+"wall=", wall-wall0 print pre+"Ntot=", N print pre+"mass=", M.number print pre+"Etot=", E.number print pre+"dE/E=", E/E0 - 1 print pre+"Rvir=", Rvir.number print pre+"Qvir=", Q cmx,cmy,cmz = com print pre+"cmpos[3]= %.8f %.8f %.8f" % (cmx.number, cmy.number, cmz.number) cmx,cmy,cmz = comv print pre+"cmvel[3]= %.8f %.8f %.8f" % (cmx.number, cmy.number, cmz.number) if N >= 100: cmx,cmy,cmz = dcen print pre+"dcpos[3]= %.8f %.8f %.8f" \ % (cmx.number, cmy.number, cmz.number) print pre+"Rcore=", rcore.number print pre+"Mlagr[9]=", for m in mf: print "%.4f" % (m), print '' print pre+"Rlagr[9]=", for r in lagr.number: print "%.8f" % (r), print '' sys.stdout.flush() return E,cpu,wall
def main(): global verbose global regoldindexes global createsysattrs parser = _get_parser() args = parser.parse_args() # check arguments if args.rng: try: args.rng = eval("slice(" + args.rng + ")") except Exception: parser.error("Error when getting the range parameter.") if args.chunkshape.isdigit() or args.chunkshape.startswith('('): args.chunkshape = eval(args.chunkshape) if args.complevel < 0 or args.complevel > 9: parser.error( 'invalid "complevel" value, it sould be in te range [0, 9]') # Catch the files passed as the last arguments src = args.src.rsplit(':', 1) dst = args.dst.rsplit(':', 1) if len(src) == 1: srcfile, srcnode = src[0], "/" else: srcfile, srcnode = src if len(dst) == 1: dstfile, dstnode = dst[0], "/" else: dstfile, dstnode = dst if srcnode == "": # case where filename == "filename:" instead of "filename:/" srcnode = "/" if dstnode == "": # case where filename == "filename:" instead of "filename:/" dstnode = "/" # Ignore the warnings for tables that contains oldindexes # (these will be handled by the copying routines) warnings.filterwarnings("ignore", category=OldIndexWarning) # Ignore the flavors warnings during upgrading flavor operations if args.upgradeflavors: warnings.filterwarnings("ignore", category=FlavorWarning) # Build the Filters instance filter_params = ( args.complevel, args.complib, args.shuffle, args.bitshuffle, args.fletcher32, ) if (filter_params == (None, ) * 4 or args.keepfilters): filters = None else: if args.complevel is None: args.complevel = 0 if args.shuffle is None: if args.complevel > 0: args.shuffle = True else: args.shuffle = False if args.bitshuffle is None: args.bitshuffle = False if args.bitshuffle: # Shuffle and bitshuffle are mutually exclusive args.shuffle = False if args.complib is None: args.complib = "zlib" if args.fletcher32 is None: args.fletcher32 = False filters = Filters(complevel=args.complevel, complib=args.complib, shuffle=args.shuffle, bitshuffle=args.bitshuffle, fletcher32=args.fletcher32) # The start, stop and step params: start, stop, step = None, None, 1 # Defaults if args.rng: start, stop, step = args.rng.start, args.rng.stop, args.rng.step # Set globals verbose = args.verbose regoldindexes = args.regoldindexes createsysattrs = args.createsysattrs # Some timing t1 = time.time() cpu1 = cputime() # Copy the file if verbose: print("+=+" * 20) print("Recursive copy:", args.recursive) print("Applying filters:", filters) if args.sortby is not None: print("Sorting table(s) by column:", args.sortby) print("Forcing a CSI creation:", args.checkCSI) if args.propindexes: print("Recreating indexes in copied table(s)") print("Start copying %s:%s to %s:%s" % (srcfile, srcnode, dstfile, dstnode)) print("+=+" * 20) # Check whether the specified source node is a group or a leaf h5srcfile = open_file(srcfile, 'r') srcnodeobject = h5srcfile.get_node(srcnode) # Close the file again h5srcfile.close() stats = {'groups': 0, 'leaves': 0, 'links': 0, 'bytes': 0, 'hardlinks': 0} if isinstance(srcnodeobject, Group): copy_children(srcfile, dstfile, srcnode, dstnode, title=args.title, recursive=args.recursive, filters=filters, copyuserattrs=args.copyuserattrs, overwritefile=args.overwritefile, overwrtnodes=args.overwrtnodes, stats=stats, start=start, stop=stop, step=step, chunkshape=args.chunkshape, sortby=args.sortby, check_CSI=args.checkCSI, propindexes=args.propindexes, upgradeflavors=args.upgradeflavors, use_hardlinks=True) else: # If not a Group, it should be a Leaf copy_leaf(srcfile, dstfile, srcnode, dstnode, title=args.title, filters=filters, copyuserattrs=args.copyuserattrs, overwritefile=args.overwritefile, overwrtnodes=args.overwrtnodes, stats=stats, start=start, stop=stop, step=step, chunkshape=args.chunkshape, sortby=args.sortby, check_CSI=args.checkCSI, propindexes=args.propindexes, upgradeflavors=args.upgradeflavors) # Gather some statistics t2 = time.time() cpu2 = cputime() tcopy = round(t2 - t1, 3) cpucopy = round(cpu2 - cpu1, 3) try: tpercent = int(round(cpucopy / tcopy, 2) * 100) except ZeroDivisionError: tpercent = 'NaN' if verbose: ngroups = stats['groups'] nleaves = stats['leaves'] nlinks = stats['links'] nhardlinks = stats['hardlinks'] nbytescopied = stats['bytes'] nnodes = ngroups + nleaves + nlinks + nhardlinks print( "Groups copied:", ngroups, ", Leaves copied:", nleaves, ", Links copied:", nlinks, ", Hard links copied:", nhardlinks, ) if args.copyuserattrs: print("User attrs copied") else: print("User attrs not copied") print("KBytes copied:", round(nbytescopied / 1024., 3)) print("Time copying: %s s (real) %s s (cpu) %s%%" % (tcopy, cpucopy, tpercent)) print("Copied nodes/sec: ", round((nnodes) / float(tcopy), 1)) print("Copied KB/s :", int(nbytescopied / (tcopy * 1024)))
date = dates[0] for hour in range(24): data = get_data(tiles, date, hour) hmap.plot_frame((data, date, hour)) hmap.save_frame() else: thm = movietools.ThreadedHorizMap(720, 12) thm.check() thm.setup_colorbar((vmin, vmax, cmap)) time_read = 0. time_video = 0. time_total = 0. nframes = 0 for date, hour in itertools.product(mydates, range(24)): t0 = cputime() assert ds.is_datetiles_online(tiles, date), comm.Abort() data = get_data(tiles, date, hour) t1 = cputime() thm.do_frame((axis, lons, lats), (data, date, hour)) t2 = cputime() time_read += t1 - t0 time_video += t2 - t1 time_total += t2 - t0 nframes += 1 if True: print( f"myrank: {myrank} io:{time_read:.1f} video:{time_video:.1f} total:{time_total:.1f} nframes:{nframes} timeperframe:{time_total/nframes:.2f}" ) thm.close() # pool.close()
def run_ph4(infile = None, number_of_stars = 40, end_time = 10 | nbody_system.time, delta_t = 1 | nbody_system.time, n_workers = 1, use_gpu = 1, gpu_worker = 1, gpu_id = -1, accuracy_parameter = 0.1, softening_length = -1 | nbody_system.length, manage_encounters = 1): if infile != None: print "input file =", infile print "end_time =", end_time.number print "delta_t =", delta_t.number print "n_workers =", n_workers print "use_gpu =", use_gpu print "manage_encounters =", manage_encounters print "initializing the gravity module" sys.stdout.flush() # Note that there are actually really three GPU options to test: # # 1. use the GPU code and allow GPU use (default) # 2. use the GPU code but disable GPU use (-g) # 3. use the non-GPU code (-G) #print "1"; sys.stdout.flush() gpu = 0 if gpu_worker == 1: try: gravity = grav(number_of_workers = n_workers, redirection = "none", mode = "gpu") # debugger='valgrind') gpu = 1 except Exception as ex: print \ '*** GPU worker code not found. Reverting to non-GPU code. ***' gpu = 0 if gpu == 0: gravity = grav(number_of_workers = n_workers, redirection = "none") # debugger='valgrind') #print "2"; sys.stdout.flush() gravity.initialize_code() #print "3"; sys.stdout.flush() gravity.parameters.set_defaults() gravity.parameters.gpu_id = gpu_id #----------------------------------------------------------------- #print "4"; sys.stdout.flush() print "making a Plummer model" stars = new_plummer_model(number_of_stars) id = numpy.arange(number_of_stars) stars.id = id+1 print "setting particle masses and radii" stars.mass = (1.0 / number_of_stars) | nbody_system.mass if 0: scaled_mass = new_salpeter_mass_distribution_nbody(number_of_stars) stars.mass = scaled_mass stars.radius = 0.0 | nbody_system.length print "centering stars" stars.move_to_center() if 0: print "scaling stars to virial equilibrium" stars.scale_to_standard(smoothing_length_squared = gravity.parameters.epsilon_squared) time = 0.0 | nbody_system.time sys.stdout.flush() #----------------------------------------------------------------- #print "5"; sys.stdout.flush() if softening_length == -1 | nbody_system.length: eps2 = 0.25*(float(number_of_stars))**(-0.666667) \ | nbody_system.length**2 else: eps2 = softening_length*softening_length #print "6"; sys.stdout.flush() gravity.parameters.timestep_parameter = accuracy_parameter gravity.parameters.epsilon_squared = eps2 gravity.parameters.use_gpu = use_gpu gravity.parameters.manage_encounters = manage_encounters print "adding particles" # print stars sys.stdout.flush() gravity.particles.add_particles(stars) gravity.commit_particles() print '' print "number_of_stars =", number_of_stars sys.stdout.flush() E0,cpu0,wall0 = print_log('', time, gravity) # Channel to copy values from the code to the set in memory. channel = gravity.particles.new_channel_to(stars) stopping_condition = gravity.stopping_conditions.collision_detection stopping_condition.enable() #----------------------------------------------------------------- cpu0 = cputime() t0 = 0. pi = math.pi times = [1., 2., pi, 4*pi/3, 5., 2*pi, 2*pi + pi/100, 2*pi + pi/5, 7., 8., 3*pi, 10.] gravity.parameters.force_sync = 1 # stays set until explicitly unset for t in times: time = t|nbody_system.time print "\nEvolving to time", time sys.stdout.flush() gravity.parameters.block_steps = 0 gravity.parameters.total_steps = 0 gravity.evolve_model(time) dt = t - t0 t0 = t cpu = cputime() dcpu = cpu - cpu0 cpu0 = cpu # Ensure that the stars list is consistent with the internal # data in the module. ls = len(stars) # Update the bookkeeping: synchronize stars with the module data. try: gravity.update_particle_set() gravity.particles.synchronize_to(stars) except: pass # Copy values from the module to the set in memory. channel.copy() # Copy the index (ID) as used in the module to the id field in # memory. The index is not copied by default, as different # codes may have different indices for the same particle and # we don't want to overwrite silently. channel.copy_attribute("index_in_code", "id") if stopping_condition.is_set(): star1 = stopping_condition.particles(0)[0] star2 = stopping_condition.particles(1)[0] print '\nstopping condition set at time', \ gravity.get_time().number,'for:\n' print star1 print '' print star2 print '' raise Exception("no encounter handling") if len(stars) != ls: if 0: print "stars:" for s in stars: print " ", s.id.number, s.mass.number, \ s.x.number, s.y.number, s.z.number else: print "number of stars =", len(stars) sys.stdout.flush() print_log('', time, gravity, E0, cpu0, wall0) print '@@@' print '@@@ t =', time.number, ' dt =', dt print '@@@ sync_time =', gravity.parameters.sync_time.number print '@@@ dcpu/dt =', dcpu/dt nb = gravity.parameters.block_steps ns = gravity.parameters.total_steps print '@@@ d(block_steps) =', nb, ' #/dt =', nb/dt print '@@@ d(total steps) =', ns, ' #/dt =', ns/dt #print stars sys.stdout.flush() #----------------------------------------------------------------- print '' gravity.stop()
def main(): global verbose global regoldindexes global createsysattrs parser = _get_parser() args = parser.parse_args() # check arguments if args.rng: try: args.rng = eval("slice(" + args.rng + ")") except Exception: parser.error("Error when getting the range parameter.") if args.chunkshape.isdigit() or args.chunkshape.startswith('('): args.chunkshape = eval(args.chunkshape) if args.complevel < 0 or args.complevel > 9: parser.error( 'invalid "complevel" value, it sould be in te range [0, 9]' ) # Catch the files passed as the last arguments src = args.src.rsplit(':', 1) dst = args.dst.rsplit(':', 1) if len(src) == 1: srcfile, srcnode = src[0], "/" else: srcfile, srcnode = src if len(dst) == 1: dstfile, dstnode = dst[0], "/" else: dstfile, dstnode = dst if srcnode == "": # case where filename == "filename:" instead of "filename:/" srcnode = "/" if dstnode == "": # case where filename == "filename:" instead of "filename:/" dstnode = "/" # Ignore the warnings for tables that contains oldindexes # (these will be handled by the copying routines) warnings.filterwarnings("ignore", category=OldIndexWarning) # Ignore the flavors warnings during upgrading flavor operations if args.upgradeflavors: warnings.filterwarnings("ignore", category=FlavorWarning) # Build the Filters instance filter_params = ( args.complevel, args.complib, args.shuffle, args.bitshuffle, args.fletcher32, ) if (filter_params == (None,) * 4 or args.keepfilters): filters = None else: if args.complevel is None: args.complevel = 0 if args.shuffle is None: if args.complevel > 0: args.shuffle = True else: args.shuffle = False if args.bitshuffle is None: args.bitshuffle = False if args.bitshuffle: # Shuffle and bitshuffle are mutually exclusive args.shuffle = False if args.complib is None: args.complib = "zlib" if args.fletcher32 is None: args.fletcher32 = False filters = Filters(complevel=args.complevel, complib=args.complib, shuffle=args.shuffle, bitshuffle=args.bitshuffle, fletcher32=args.fletcher32) # The start, stop and step params: start, stop, step = None, None, 1 # Defaults if args.rng: start, stop, step = args.rng.start, args.rng.stop, args.rng.step # Set globals verbose = args.verbose regoldindexes = args.regoldindexes createsysattrs = args.createsysattrs # Some timing t1 = time.time() cpu1 = cputime() # Copy the file if verbose: print("+=+" * 20) print("Recursive copy:", args.recursive) print("Applying filters:", filters) if args.sortby is not None: print("Sorting table(s) by column:", args.sortby) print("Forcing a CSI creation:", args.checkCSI) if args.propindexes: print("Recreating indexes in copied table(s)") print("Start copying %s:%s to %s:%s" % (srcfile, srcnode, dstfile, dstnode)) print("+=+" * 20) allow_padding = not args.dont_allow_padding # Check whether the specified source node is a group or a leaf h5srcfile = open_file(srcfile, 'r', allow_padding=allow_padding) srcnodeobject = h5srcfile.get_node(srcnode) # Close the file again h5srcfile.close() stats = {'groups': 0, 'leaves': 0, 'links': 0, 'bytes': 0, 'hardlinks': 0} if isinstance(srcnodeobject, Group): copy_children( srcfile, dstfile, srcnode, dstnode, title=args.title, recursive=args.recursive, filters=filters, copyuserattrs=args.copyuserattrs, overwritefile=args.overwritefile, overwrtnodes=args.overwrtnodes, stats=stats, start=start, stop=stop, step=step, chunkshape=args.chunkshape, sortby=args.sortby, check_CSI=args.checkCSI, propindexes=args.propindexes, upgradeflavors=args.upgradeflavors, allow_padding=allow_padding, use_hardlinks=True) else: # If not a Group, it should be a Leaf copy_leaf( srcfile, dstfile, srcnode, dstnode, title=args.title, filters=filters, copyuserattrs=args.copyuserattrs, overwritefile=args.overwritefile, overwrtnodes=args.overwrtnodes, stats=stats, start=start, stop=stop, step=step, chunkshape=args.chunkshape, sortby=args.sortby, check_CSI=args.checkCSI, propindexes=args.propindexes, upgradeflavors=args.upgradeflavors, allow_padding=allow_padding, ) # Gather some statistics t2 = time.time() cpu2 = cputime () tcopy = round(t2 - t1, 3) cpucopy = round(cpu2 - cpu1, 3) try: tpercent = int(round(cpucopy / tcopy, 2) * 100) except ZeroDivisionError: tpercent = 'NaN' if verbose: ngroups = stats['groups'] nleaves = stats['leaves'] nlinks = stats['links'] nhardlinks = stats['hardlinks'] nbytescopied = stats['bytes'] nnodes = ngroups + nleaves + nlinks + nhardlinks print( "Groups copied:", ngroups, ", Leaves copied:", nleaves, ", Links copied:", nlinks, ", Hard links copied:", nhardlinks, ) if args.copyuserattrs: print("User attrs copied") else: print("User attrs not copied") print("KBytes copied:", round(nbytescopied / 1024., 3)) print("Time copying: %s s (real) %s s (cpu) %s%%" % ( tcopy, cpucopy, tpercent)) print("Copied nodes/sec: ", round((nnodes) / float(tcopy), 1)) print("Copied KB/s :", int(nbytescopied / (tcopy * 1024)))