config = sys.argv[1] ad.init(config, comm) fd = ad.open("temperature", "adios_test_mpi.bp", "w", comm) NX = 10 groupsize = 4 + 4 + 4 + 8 * 1 * NX t = np.array(range(NX), dtype=np.float64) + rank*NX ad.set_group_size(fd, groupsize) ad.write_int(fd, "NX", NX) ad.write_int(fd, "rank", rank) ad.write_int(fd, "size", size) ad.write(fd, "temperature", t) ad.close(fd) ad.finalize() ## Reading if rank == 0: print "\n>>> Reading ...\n" f = ad.file("adios_test_mpi.bp", comm=MPI.COMM_SELF) f.printself() v = f.var['temperature'] v.printself() val = v.read() print val assert (int(sum(sum(val))) == (size*NX-1)*(size*NX)/2) f.close()
def sosToADIOS(): global SOS global config parseConfigFile() SOS = SSOS() printf("Initializing SOS: ...\b\b\b") SOS.init() printf("OK!\n") ##### # # Get the maximum simulation cycle found in the database. # # NOTE: The cycleFieldName variable should match what is being used # either by your application or SOSflow. If you are not using # an explicit cycle value, you can use SOSflow's internal # field named "frame" that is updated every time SOS_publish(...) # is called. As long as you are publishing to SOS at the end # of major program steps, this will give you what you want. # # NOTE: For online queries, if you want to ensure that your most # current projection represents a complete set of values, # and you're investigating a block-synchronous code, you can # grab the current maximum and subtract one. # # num_rows = 0 # Get at least one active aggregator lookupAggregators() g = None if config["output_adios"]: # ADIOS file output ad.init_noxml() g = ad.declare_group("TAU_metrics", "", ad.FLAG.YES) ad.define_var(g, "program_count", "", ad.DATATYPE.unsigned_integer, "", "", "") ad.define_var(g, "comm_rank_count", "", ad.DATATYPE.unsigned_integer, "", "", "") ad.define_var(g, "thread_count", "", ad.DATATYPE.unsigned_integer, "", "", "") ad.define_var(g, "metric_count", "", ad.DATATYPE.unsigned_integer, "", "", "") ad.define_var(g, "timer_count", "", ad.DATATYPE.unsigned_integer, "", "", "") ad.define_var(g, "timer_value_count", "", ad.DATATYPE.unsigned_integer, "", "", "") ad.define_var(g, "timer_values", "", ad.DATATYPE.unsigned_integer, "timer_value_count,6", "timer_value_count,6", "0,0") ad.define_var(g, "counter_count", "", ad.DATATYPE.unsigned_integer, "", "", "") ad.define_var(g, "counter_value_count", "", ad.DATATYPE.unsigned_integer, "", "", "") ad.define_var(g, "counter_values", "", ad.DATATYPE.double, "counter_value_count,5", "counter_value_count,5", "0,0") print "using ADIOS method:", str(config["adios_method"]) ad.select_method(g, str(config["adios_method"]), "verbose=3", "") # wait for a frame to show up. Frame 0 (and maybe 1) are TAU metadata. # The rest should be just timers. next_frame = 0 # first iteration, we are writing the file. after that, appending. adios_mode = "w" # Keep running until there are no more frames to wait for. # At runtime, this is a moving target, since next_frame gets updated. while config["aggregators"][ "runtime"] or next_frame < config["aggregators"]["maxframe"]: # wait for the next batch of frames timeout = waitForServer(SOS, next_frame) if timeout: break print "Processing frame", next_frame start = time.time() fd = ad.open("TAU_metrics", "tau-metrics.bp", adios_mode) writeMetaData(SOS, next_frame, g, fd) writeTimerData(SOS, next_frame, g, fd) writeCounterData(SOS, next_frame, g, fd) ad.close(fd) # future iterations are appending, not writing adios_mode = "a" # clean up the database for long runs cleanDB(SOS, next_frame) next_frame = next_frame + 1 end = time.time() print "loop time:", str(end - start) # finalize adios if config["output_adios"]: ad.finalize() # finalize SOS SOS.finalize() print " ...DONE!" return
def sosScatterplotGenerator(): global SOS global config parseConfigFile() SOS = SSOS() printf("Initializing SOS: ...\b\b\b") SOS.init() printf("OK!\n") # NOTE: When allocation time is scarce, 'stride' here can be # set so that intermediate cycles can be skipped, which is # especially useful when there are thousands of cycles. # stride = 1 ##### # # Get the maximum simulation cycle found in the database. # # NOTE: The cycleFieldName variable should match what is being used # either by your application or SOSflow. If you are not using # an explicit cycle value, you can use SOSflow's internal # field named "frame" that is updated every time SOS_publish(...) # is called. As long as you are publishing to SOS at the end # of major program steps, this will give you what you want. # # NOTE: For online queries, if you want to ensure that your most # current projection represents a complete set of values, # and you're investigating a block-synchronous code, you can # grab the current maximum and subtract one. # cycleFieldName = "frame" # num_rows = 0 # Get at least one active aggregator lookupAggregators() # wait for a few frames to show up. Frame 0 and 1 are TAU metadata. # Frame 2 represents a true iteration. max_cycle, maxtime = waitForServer(SOS, cycleFieldName, max(stride, 1), True) print "Maximum observed '" + cycleFieldName + "' value: " + str( max_cycle) + " (so far)" # sqlMaxFrame = "SELECT count(*) FROM tblpubs;" results, col_names = queryAllAggregators(sqlMaxFrame) # We got results from each aggregator, so sum them up rank_maxes = [int(x[0]) for x in results] rank_max = sum(rank_maxes) print "Maximum observed pub_guids: " + str(rank_max) # ##### if config["output_adios"]: # ADIOS file output ad.init_noxml() g = ad.declare_group("TAU_metrics", "", ad.FLAG.YES) ad.define_var(g, "process_count", "", ad.DATATYPE.unsigned_integer, "", "", "") ad.define_var(g, "program_count", "", ad.DATATYPE.unsigned_integer, "", "", "") ad.define_var(g, "process_index", "", ad.DATATYPE.unsigned_integer, "process_count", "process_count", "0") ad.define_var(g, "memory_HWM", "", ad.DATATYPE.double, "process_count", "process_count", "0") ad.define_var(g, "memory_RSS", "", ad.DATATYPE.double, "process_count", "process_count", "0") ad.define_var(g, "total_FLOPS", "", ad.DATATYPE.double, "process_count", "process_count", "0") ad.define_var(g, "latest_FLOPS", "", ad.DATATYPE.double, "process_count", "process_count", "0") #ad.define_var(g, "program_name", "", ad.DATATYPE.string, "program_count", "program_count", "0") ad.define_var(g, "program_index", "", ad.DATATYPE.unsigned_integer, "process_count", "process_count", "0") ad.define_var(g, "MPI_rank", "", ad.DATATYPE.unsigned_integer, "process_count", "process_count", "0") print "using ADIOS method:", str(config["adios_method"]) ad.select_method(g, str(config["adios_method"]), "verbose=3", "") # # # EXAMPLE A: Generate .txt set for ALL simulation cycles: print "Generating TXT files..." lastX = [0.0] * (rank_max + 1) lastY = [0.0] * (rank_max + 1) lastZ = [0.0] * (rank_max + 1) simCycle = max_cycle mintime = 0.0 # Keep running until there are no more frames to wait for. # At runtime, this is a moving target, since max_cycle gets updated. while config["aggregators"][ "runtime"] or simCycle < config["aggregators"]["maxframe"]: print "Processing frame", simCycle start = time.time() vtkOutputFileName = generateADIOSFile(SOS, cycleFieldName, simCycle, lastX, lastY, lastZ, stride, mintime, maxtime) # clean up the database for long runs cleanDB(SOS, cycleFieldName, simCycle) simCycle = simCycle + stride # wait for the next batch of frames mintime = maxtime max_cycle, maxtime = waitForServer(SOS, cycleFieldName, simCycle, False) end = time.time() print "loop time:", str(end - start) ##### # # Whew! All done! # # NOTE: See vtkWriter.py for more details. # if config["output_adios"]: ad.finalize() SOS.finalize() # ##### print " ...DONE!" print return
def sosToADIOS(): global SOS global config global validation parseConfigFile() SOS = SSOS() printf("Initializing SOS: ...\b\b\b") SOS.init() printf("OK!\n") ##### # # Get the maximum simulation cycle found in the database. # # NOTE: The cycleFieldName variable should match what is being used # either by your application or SOSflow. If you are not using # an explicit cycle value, you can use SOSflow's internal # field named "frame" that is updated every time SOS_publish(...) # is called. As long as you are publishing to SOS at the end # of major program steps, this will give you what you want. # # NOTE: For online queries, if you want to ensure that your most # current projection represents a complete set of values, # and you're investigating a block-synchronous code, you can # grab the current maximum and subtract one. # # num_rows = 0 # Get at least one active aggregator lookupAggregators() g = None if config["output_adios"]: # ADIOS file output ad.init_noxml() g = ad.declare_group("TAU_metrics", "", ad.FLAG.YES) ad.define_var(g, "program_count", "", ad.DATATYPE.unsigned_integer, "", "", "") ad.define_var(g, "comm_rank_count", "", ad.DATATYPE.unsigned_integer, "", "", "") ad.define_var(g, "thread_count", "", ad.DATATYPE.unsigned_integer, "", "", "") ad.define_var(g, "event_type_count", "", ad.DATATYPE.unsigned_integer, "", "", "") ad.define_var(g, "timer_count", "", ad.DATATYPE.unsigned_integer, "", "", "") ad.define_var(g, "timer_event_count", "", ad.DATATYPE.unsigned_integer, "", "", "") ad.define_var(g, "event_timestamps", "", ad.DATATYPE.unsigned_long, "timer_event_count,6", "timer_event_count,6", "0,0") ad.define_var(g, "counter_count", "", ad.DATATYPE.unsigned_integer, "", "", "") ad.define_var(g, "counter_event_count", "", ad.DATATYPE.unsigned_integer, "", "", "") ad.define_var(g, "counter_values", "", ad.DATATYPE.unsigned_long, "counter_event_count,6", "counter_event_count,6", "0,0") ad.define_var(g, "comm_count", "", ad.DATATYPE.unsigned_integer, "", "", "") ad.define_var(g, "comm_timestamps", "", ad.DATATYPE.unsigned_long, "comm_count,8", "comm_count,8", "0,0") print("using ADIOS method:", str(config["adios_method"])) ad.select_method(g, str(config["adios_method"]), "verbose=3", "") # wait for a frame to show up. Frame 0 (and maybe 1) are TAU metadata. # The rest should be just timers. next_frame = 0 # first iteration, we are writing the file. after that, appending. adios_mode = "w" waitForServer(SOS, 0) buildColumnMap(SOS) # Keep running until there are no more frames to wait for. # At runtime, this is a moving target, since next_frame gets updated. done = False total_count = 0 while (not done or total_count > 0) and ( config["aggregators"]["runtime"] or next_frame < config["aggregators"]["maxframe"]): # wait for the next batch of frames if not done: timeout = waitForServer(SOS, next_frame + 1) if timeout: done = True #if len(column_map) == 0: # buildColumnMap(SOS) print("Processing frame", next_frame) start = time.time() fd = ad.open("TAU_metrics", "tau-metrics.bp", adios_mode) meta_count = writeMetaData(SOS, next_frame, g, fd) timer_count = writeTimerData(SOS, next_frame, g, fd) total_count = meta_count + timer_count ad.close(fd) # future iterations are appending, not writing adios_mode = "a" print("Processed", total_count, "rows") if total_count == 0 and done: break next_frame = next_frame + 1 end = time.time() print("loop time:", str(end - start)) # finalize adios if config["output_adios"]: ad.finalize() # finalize SOS SOS.finalize() for p in validation: for r in validation[p]: for t in validation[p][r]: if len(validation[p][r][t]) != 0: print("VALIDATION ERROR!", p, r, t, validation[p][r][t], "was not exited") print(" ...DONE!") return
config = sys.argv[1] ad.init(config, comm) fd = ad.open("temperature", "adios_test_mpi.bp", "w", comm) NX = 10 groupsize = 4 + 4 + 4 + 8 * 1 * NX t = np.array(range(NX), dtype=np.float64) + rank*NX ad.set_group_size(fd, groupsize) ad.write_int(fd, "NX", NX) ad.write_int(fd, "rank", rank) ad.write_int(fd, "size", size) ad.write(fd, "temperature", t) ad.close(fd) ad.finalize() ## Reading if rank == 0: print "\n>>> Reading ...\n" f = ad.file("adios_test_mpi.bp", comm=MPI.COMM_SELF) f.printself() v = f.vars['temperature'] v.printself() val = v.read() print val assert (int(np.sum(val)) == (size*NX-1)*(size*NX)/2) f.close()