def writeCounterData(SOS, frame, adios_group, fd): global config global prog_names global comm_ranks global value_names global threads global counters # Get the frame-specific counter data... start = time.time() sqlValsToColByRank = "select value_name, coalesce(value,0.0), prog_name, comm_rank from viewCombined where value_name like 'TAU_COUNTER:%' and frame = " + str( frame) + " order by prog_name, comm_rank, value_name;" results, col_names = queryAllAggregators(sqlValsToColByRank) end = time.time() print(end - start), "seconds for counter query" values_array = np.zeros(shape=(len(results), 5), dtype=np.float) index = 0 for r in results: value_name = str(r[0]) value = float(r[1]) prog_name = str(r[2]) comm_rank = str(r[3]) if prog_name not in prog_names: attr_name = "program_name " + str(len(prog_names)) prog_names[prog_name] = len(prog_names) ad.define_attribute(adios_group, attr_name, "", ad.DATATYPE.string, prog_name, "") # may not be necessary... if comm_rank not in comm_ranks: comm_ranks[comm_rank] = len(comm_ranks) # tease apart the counter name. tokens = value_name.split(":", 2) thread = tokens[1] counter = tokens[2] if thread not in threads: threads[thread] = len(threads) if counter not in counters: attr_name = "counter " + str(len(counters)) counters[counter] = len(counters) ad.define_attribute(adios_group, attr_name, "", ad.DATATYPE.string, counter, "") values_array[index][0] = int(prog_names[prog_name]) values_array[index][1] = int(comm_ranks[comm_rank]) values_array[index][2] = int(threads[thread]) values_array[index][3] = counters[counter] values_array[index][4] = float(value) index = index + 1 # now that the data is queried and in arrays, write it out to the file # initialize the ADIOS data if config["output_adios"]: # write the adios ad.write_int(fd, "program_count", len(prog_names)) ad.write_int(fd, "comm_rank_count", len(comm_ranks)) ad.write_int(fd, "thread_count", len(threads)) ad.write_int(fd, "metric_count", len(metrics)) ad.write_int(fd, "counter_count", len(counters)) ad.write_int(fd, "counter_value_count", len(results)) ad.write(fd, "counter_values", values_array) return
## Writing print "\n>>> Writing ... (rank = %d)\n" % rank config = "config_mpi.xml" if len(sys.argv) > 1: config = sys.argv[1] ad.init(config, comm) fd = ad.open("temperature", "adios_test_mpi.bp", "w", comm) NX = 10 groupsize = 4 + 4 + 4 + 8 * 1 * NX t = np.array(range(NX), dtype=np.float64) + rank*NX ad.set_group_size(fd, groupsize) ad.write_int(fd, "NX", NX) ad.write_int(fd, "rank", rank) ad.write_int(fd, "size", size) ad.write(fd, "temperature", t) ad.close(fd) ad.finalize() ## Reading if rank == 0: print "\n>>> Reading ...\n" f = ad.file("adios_test_mpi.bp", comm=MPI.COMM_SELF) f.printself() v = f.var['temperature']
def generateADIOSFile(SOS, cycleFieldName, simCycle, lastX, lastY, lastZ, stride, mintime, maxtime): global previous_attr global cached_results_dim global cachedName global config global last_time global last_fp_ops global last_rss global adios_mode # Get the frame-specific data... # because we are querying different servers, the data has the potential to # arrive out-of-order (prog_name, comm_rank). So after each query, sort # the results into a dictionary of dictionaries. prog_names = {} # do the memory first - HWM start = time.time() sqlValsToColByRank = "select coalesce(value,0.0), prog_name, comm_rank from viewCombined where value_name = '" + str( config["events"]["counters"][0]) + "' and frame = " + str( simCycle) + ";" results, col_names = queryAllAggregators(sqlValsToColByRank) end = time.time() print(end - start), "seconds for frame query" for r in results: value = float(r[0]) prog_name = str(r[1]) comm_rank = int(r[2]) if prog_name not in prog_names: prog_names[prog_name] = {} if comm_rank not in prog_names[prog_name]: prog_names[prog_name][comm_rank] = [] prog_names[prog_name][comm_rank].append(value) # do the memory first - RSS start = time.time() current_rss = {} sqlValsToColByRank = "select coalesce(value,0.0), prog_name, comm_rank from viewCombined where value_name = '" + str( config["events"]["counters"][1]) + "' and frame = " + str( simCycle) + ";" results, col_names = queryAllAggregators(sqlValsToColByRank) end = time.time() print(end - start), "seconds for frame query" index = -1 for r in results: index = index + 1 value = float(r[0]) # change the mean value to a total value and save it current_rss[index] = value * simCycle # convert the running average to the most recent measurement if index in last_rss: value = current_rss[index] - last_rss[index] prog_name = str(r[1]) comm_rank = int(r[2]) if prog_name not in prog_names: prog_names[prog_name] = {} if comm_rank not in prog_names[prog_name]: prog_names[prog_name][comm_rank] = [] prog_names[prog_name][comm_rank].append(value) last_rss = current_rss # do the timer PAPI metric next start = time.time() # Make sure we sort by prog_name, comm_rank! sqlValsToColByRank = "select coalesce(value,0.0), prog_name, comm_rank from viewCombined where value_name = '" + str( config["events"]["timers"][0]) + "' and frame = " + str(simCycle) + ";" results, col_names = queryAllAggregators(sqlValsToColByRank) end = time.time() print(end - start), "seconds for frame query" for r in results: value = float(r[0]) prog_name = str(r[1]) comm_rank = int(r[2]) if prog_name not in prog_names: prog_names[prog_name] = {} if comm_rank not in prog_names[prog_name]: prog_names[prog_name][comm_rank] = [] prog_names[prog_name][comm_rank].append(value) # do the timer time metric next start = time.time() sqlValsToColByRank = "select coalesce(value,0.0), prog_name, comm_rank from viewCombined where value_name = '" + str( config["events"]["timers"][1]) + "' and frame = " + str(simCycle) + ";" results, col_names = queryAllAggregators(sqlValsToColByRank) end = time.time() print(end - start), "seconds for frame query" for r in results: value = float(r[0]) prog_name = str(r[1]) comm_rank = int(r[2]) if prog_name not in prog_names: prog_names[prog_name] = {} if comm_rank not in prog_names[prog_name]: prog_names[prog_name][comm_rank] = [] prog_names[prog_name][comm_rank].append(value) # now that the data is queried and sorted, write it out to the file # initialize the ADIOS data groupsize = 0 # How many MPI ranks in each program? for prog_name in prog_names: groupsize = groupsize + len(prog_names[prog_name]) program_count = len(prog_names) adios_process_index = np.array(range(groupsize), dtype=np.uint32) adios_memdata_hwm = np.array(range(groupsize), dtype=np.float64) adios_memdata_rss = np.array(range(groupsize), dtype=np.float64) adios_flops1 = np.array(range(groupsize), dtype=np.float64) adios_flops2 = np.array(range(groupsize), dtype=np.float64) adios_program = np.array(range(groupsize), dtype=np.uint32) adios_program_name = np.array(range(program_count), dtype=np.chararray) adios_mpi_index = np.array(range(groupsize), dtype=np.uint32) cyclestr = str(simCycle) cyclestr = cyclestr.rjust(5, '0') filename = "performance.metrics." + cyclestr + ".txt" # regular text file if config["output_text"]: flops_out = open(config["outputdir"] + "/" + filename, 'w') flops_out.write( "Process Index, Memory HWM, Memory RSS, Total FLOPS, Latest FLOPS, Program Name, Program Index, MPI Rank\n" ) s1 = config["events"]["timer_scaling"][0] s2 = config["events"]["timer_scaling"][1] index = -1 prog_index = -1 current_time = {} current_fp_ops = {} for prog_name in prog_names: prog_index = prog_index + 1 adios_program_name[prog_index] = prog_name for comm_rank in prog_names[prog_name]: index = index + 1 print prog_name, comm_rank, index, current_fp_ops, prog_names current_fp_ops[index] = prog_names[prog_name][comm_rank][2] * s1 current_time[index] = prog_names[prog_name][comm_rank][3] * s2 flops_to_date = current_fp_ops[index] / current_time[index] if len(last_fp_ops) > 0: tmp = (current_fp_ops[index] - last_fp_ops[index]) tmp2 = (current_time[index] - last_time[index]) if tmp2 > 0.0: # compute flops from lastest timestep flops_in_last_timestep = tmp / tmp2 else: if last_time[index] > 0.0: # compute flops from previous timestep flops_in_last_timestep = last_fp_ops[ index] / last_time[index] else: # something weird is happening... flops_in_last_timestep = 0.0 else: # compute flops from first timestep flops_in_last_timestep = flops_to_date if config["output_text"]: # write the sorted data to the file flops_out.write(str(index) + ", ") flops_out.write( str(prog_names[prog_name][comm_rank][0]) + ", ") flops_out.write( str(prog_names[prog_name][comm_rank][1]) + ", ") flops_out.write(str(flops_to_date) + ", ") flops_out.write(str(flops_in_last_timestep) + ", ") flops_out.write(prog_name + ", ") flops_out.write(str(prog_index) + ", ") flops_out.write(str(comm_rank) + "\n") if config["output_adios"]: adios_process_index[index] = index adios_memdata_hwm[index] = prog_names[prog_name][comm_rank][0] adios_memdata_rss[index] = prog_names[prog_name][comm_rank][1] adios_flops1[index] = flops_to_date adios_flops2[index] = flops_in_last_timestep adios_program[index] = prog_index adios_mpi_index[index] = comm_rank if config["output_text"]: flops_out.close() stream_file = "performance.metrics.txt" stream_out = open(config["outputdir"] + "/" + stream_file, 'w') stream_out.write(filename + "\n") stream_out.close() last_time = current_time last_fp_ops = current_fp_ops if config["output_adios"]: # write the adios fd = ad.open("TAU_metrics", "tau-metrics.bp", adios_mode) ad.write_int(fd, "process_count", groupsize) ad.write_int(fd, "program_count", program_count) ad.write(fd, "process_index", adios_process_index) ad.write(fd, "memory_HWM", adios_memdata_hwm) ad.write(fd, "memory_RSS", adios_memdata_rss) ad.write(fd, "total_FLOPS", adios_flops1) ad.write(fd, "latest_FLOPS", adios_flops2) #ad.write(fd, "program_name", adios_program_name) ad.write(fd, "program_index", adios_program) ad.write(fd, "MPI_rank", adios_mpi_index) ad.close(fd) #fd.close() # future iterations are appending, not writing adios_mode = "a" return filename
def writeTimerData(SOS, frame, adios_group, fd): global config global prog_names global comm_ranks global value_names global threads global groups global timers global event_types global validation # Get the frame-specific timer data... start = time.time() #sqlValsToColByRank = "select prog_name, comm_rank, value, value_name from viewCombined where (value_name like 'TAU_EVENT_ENTRY%' or value_name like 'TAU_EVENT_EXIT%') and frame = " + str(frame) + " order by value;" #results, col_names = queryAllAggregators(sqlValsToColByRank) pub_filter = "" val_filter = "TAU_EVENT" frame_start = frame frame_depth = 1 results, col_names = queryAllAggregatorsCache(pub_filter, val_filter, frame_start, frame_depth) end = time.time() print((end - start), "seconds for event query") timer_values_array = np.zeros(shape=(len(results), 6), dtype=np.uint64) counter_values_array = np.zeros(shape=(len(results), 6), dtype=np.uint64) comm_values_array = np.zeros(shape=(len(results), 8), dtype=np.uint64) timer_index = 0 counter_index = 0 comm_index = 0 prog_name_index = column_map["prog_name"] comm_rank_index = column_map["comm_rank"] value_name_index = column_map["val_name"] value_index = column_map["val"] frame_index = column_map["frame"] time_index = column_map["time_pack"] results = sorted(results, key=itemgetter(value_index)) total_valid = len(results) for r in results: prog_name = str(r[prog_name_index]) comm_rank = str(r[comm_rank_index]) value = int(r[value_index]) value_name = str(r[value_name_index]) row_frame = str(r[frame_index]) #print row_frame, prog_name, comm_rank, value_name if int(row_frame) != frame: total_valid = total_valid - 1 continue if prog_name not in validation: validation[prog_name] = {} if comm_rank not in validation[prog_name]: validation[prog_name][comm_rank] = {} if "TAU_EVENT_ENTRY" in value_name or "TAU_EVENT_EXIT" in value_name: if prog_name not in prog_names: attr_name = "program_name " + str(len(prog_names)) prog_names[prog_name] = len(prog_names) ad.define_attribute(adios_group, attr_name, "", ad.DATATYPE.string, prog_name, "") # may not be necessary... if comm_rank not in comm_ranks: comm_ranks[comm_rank] = len(comm_ranks) # tease apart the event name tokens = value_name.split(":", 2) event_type = tokens[0].replace("TAU_EVENT_", "") if event_type not in event_types: attr_name = "event_type " + str(len(event_types)) event_types[event_type] = len(event_types) ad.define_attribute(adios_group, attr_name, "", ad.DATATYPE.string, event_type, "") thread = int(tokens[1]) if thread not in validation[prog_name][comm_rank]: validation[prog_name][comm_rank][thread] = [] timer = tokens[2] if thread not in threads: threads[thread] = len(threads) if timer not in timers: attr_name = "timer " + str(len(timers)) timers[timer] = len(timers) ad.define_attribute(adios_group, attr_name, "", ad.DATATYPE.string, timer, "") if "MPI_Send" in value_name: print(value_name, thread) if "MPI_Recv" in value_name: print(value_name, thread) timer_values_array[timer_index][0] = int(prog_names[prog_name]) timer_values_array[timer_index][1] = int(comm_ranks[comm_rank]) timer_values_array[timer_index][2] = int(thread) timer_values_array[timer_index][3] = int(event_types[event_type]) timer_values_array[timer_index][4] = int(timers[timer]) timer_values_array[timer_index][5] = int(value) timer_index = timer_index + 1 if "TAU_EVENT_ENTRY" in value_name: validation[prog_name][comm_rank][thread].append(timer) else: if len(validation[prog_name][comm_rank][thread]) == 0: print("VALIDATION ERROR! empty stack", prog_name, comm_rank, thread, timer) #sys.exit() else: current_timer = validation[prog_name][comm_rank][ thread].pop() if current_timer != timer: print("VALIDATION ERROR!", value, prog_names[prog_name], comm_rank, thread, timers[timer], "!= current: ", timers[current_timer]) elif "TAU_EVENT_COUNTER" in value_name: # convert the timestamp from seconds to usec timestamp = float(r[time_index]) * 1000000 if prog_name not in prog_names: attr_name = "program_name " + str(len(prog_names)) prog_names[prog_name] = len(prog_names) ad.define_attribute(adios_group, attr_name, "", ad.DATATYPE.string, prog_name, "") # may not be necessary... if comm_rank not in comm_ranks: comm_ranks[comm_rank] = len(comm_ranks) # tease apart the event name tokens = value_name.split(":", 2) thread = tokens[1] counter = tokens[2] if thread not in threads: threads[thread] = len(threads) if counter not in counters: attr_name = "counter " + str(len(counters)) counters[counter] = len(counters) ad.define_attribute(adios_group, attr_name, "", ad.DATATYPE.string, counter, "") counter_values_array[counter_index][0] = int(prog_names[prog_name]) counter_values_array[counter_index][1] = int(comm_ranks[comm_rank]) counter_values_array[counter_index][2] = int(thread) counter_values_array[counter_index][3] = int(counters[counter]) counter_values_array[counter_index][4] = int(value) counter_values_array[counter_index][5] = int(timestamp) counter_index = counter_index + 1 elif "TAU_EVENT_SEND" in value_name or "TAU_EVENT_RECV" in value_name: if prog_name not in prog_names: attr_name = "program_name " + str(len(prog_names)) prog_names[prog_name] = len(prog_names) ad.define_attribute(adios_group, attr_name, "", ad.DATATYPE.string, prog_name, "") # may not be necessary... if comm_rank not in comm_ranks: comm_ranks[comm_rank] = len(comm_ranks) # tease apart the event name tokens = value_name.split(":", 2) event_type = tokens[0].replace("TAU_EVENT_", "") if event_type not in event_types: attr_name = "event_type " + str(len(event_types)) event_types[event_type] = len(event_types) ad.define_attribute(adios_group, attr_name, "", ad.DATATYPE.string, event_type, "") tokens = value_name.split(":", 4) thread = tokens[1] tag = tokens[2] partner = tokens[3] num_bytes = tokens[4] if thread not in threads: threads[thread] = len(threads) comm_values_array[comm_index][0] = int(prog_names[prog_name]) comm_values_array[comm_index][1] = int(comm_ranks[comm_rank]) comm_values_array[comm_index][2] = int(thread) comm_values_array[comm_index][3] = int(event_types[event_type]) comm_values_array[comm_index][4] = int(tag) comm_values_array[comm_index][5] = int(partner) comm_values_array[comm_index][6] = int(num_bytes) comm_values_array[comm_index][7] = int(value) comm_index = comm_index + 1 else: print("ERROR! unknown event:", prog_name, comm_rank, value_name) # now that the data is queried and in arrays, write it out to the file # initialize the ADIOS data if config["output_adios"] and (timer_index > 0 or counter_index > 0 or comm_index > 0): #if config["output_adios"]: # write the adios ad.write_int(fd, "program_count", len(prog_names)) ad.write_int(fd, "comm_rank_count", len(comm_ranks)) ad.write_int(fd, "thread_count", len(threads)) ad.write_int(fd, "timer_count", len(timers)) ad.write_int(fd, "event_type_count", len(event_types)) ad.write_int(fd, "timer_event_count", timer_index) ad.write_int(fd, "counter_count", len(counters)) ad.write_int(fd, "counter_event_count", counter_index) ad.write_int(fd, "comm_count", comm_index) np.resize(timer_values_array, (timer_index, 6)) np.resize(counter_values_array, (counter_index, 6)) np.resize(comm_values_array, (comm_index, 8)) ad.write(fd, "event_timestamps", timer_values_array) ad.write(fd, "counter_values", counter_values_array) ad.write(fd, "comm_timestamps", comm_values_array) return total_valid
## Writing print "\n>>> Writing ... (rank = %d)\n" % rank config = "config_mpi.xml" if len(sys.argv) > 1: config = sys.argv[1] ad.init(config, comm) fd = ad.open("temperature", "adios_test_mpi.bp", "w", comm) NX = 10 groupsize = 4 + 4 + 4 + 8 * 1 * NX t = np.array(range(NX), dtype=np.float64) + rank*NX ad.set_group_size(fd, groupsize) ad.write_int(fd, "NX", NX) ad.write_int(fd, "rank", rank) ad.write_int(fd, "size", size) ad.write(fd, "temperature", t) ad.close(fd) ad.finalize() ## Reading if rank == 0: print "\n>>> Reading ...\n" f = ad.file("adios_test_mpi.bp", comm=MPI.COMM_SELF) f.printself() v = f.vars['temperature']
ad.allocate_buffer(ad.BUFFER_ALLOC_WHEN.NOW, 10) ad.init_noxml(MPI.COMM_WORLD) g = ad.declare_group("temperature", "", 1) ad.define_var(g, "NX", "", ad.DATATYPE.integer, "", "", "") ad.define_var(g, "size", "", ad.DATATYPE.integer, "", "", "") ad.define_var(g, "temperature", "", ad.DATATYPE.double, "size,NX", "size,NX", "0,0") msg = str(datetime.datetime.now()) ad.define_attribute(g, "datetime", "", ad.DATATYPE.string, msg, "") print ">>> Method:", method, init ad.select_method(g, method, init, "") ## Writing for i in range(5): print ">>> step:", i fd = ad.open("temperature", "temp.bp", "a") NX = 10 size = 2 groupsize = 4 + 4 + 8 * size * NX t = np.array(range(NX * size), dtype=np.float64) + 100 * i tt = t.reshape((size, NX)) ad.set_group_size(fd, groupsize) ad.write_int(fd, "NX", NX) ad.write_int(fd, "size", size) ad.write(fd, "temperature", tt) ad.close(fd) ad.finalize() print ">>> Done."
def writeCommData(SOS, frame, adios_group, fd): global config global prog_names global comm_ranks global value_names global threads global groups global timers global event_types # Get the frame-specific timer data... start = time.time() sqlValsToColByRank = "select prog_name, comm_rank, value, value_name from viewCombined where (value_name like 'TAU_EVENT_SEND%' or value_name like 'TAU_EVENT_RECV%') and frame = " + str( frame) + " order by value;" results, col_names = queryAllAggregators(sqlValsToColByRank) end = time.time() print(end - start), "seconds for communication query" values_array = np.zeros(shape=(len(results), 8), dtype=np.uint64) index = 0 for r in results: prog_name = str(r[0]) comm_rank = str(r[1]) value = long(r[2]) value_name = str(r[3]) if prog_name not in prog_names: attr_name = "program_name " + str(len(prog_names)) prog_names[prog_name] = len(prog_names) ad.define_attribute(adios_group, attr_name, "", ad.DATATYPE.string, prog_name, "") # may not be necessary... if comm_rank not in comm_ranks: comm_ranks[comm_rank] = len(comm_ranks) # tease apart the event name tokens = value_name.split(":", 2) event_type = tokens[0].replace("TAU_EVENT_", "") if event_type not in event_types: attr_name = "event_type " + str(len(event_types)) event_types[event_type] = len(event_types) ad.define_attribute(adios_group, attr_name, "", ad.DATATYPE.string, event_type, "") tokens = value_name.split(":", 4) thread = tokens[1] tag = tokens[2] partner = tokens[3] num_bytes = tokens[4] if thread not in threads: threads[thread] = len(threads) values_array[index][0] = long(prog_names[prog_name]) values_array[index][1] = long(comm_ranks[comm_rank]) values_array[index][2] = long(threads[thread]) values_array[index][3] = long(event_types[event_type]) values_array[index][4] = long(tag) values_array[index][5] = long(partner) values_array[index][6] = long(num_bytes) values_array[index][7] = long(value) index = index + 1 # now that the data is queried and in arrays, write it out to the file # initialize the ADIOS data if config["output_adios"]: # write the adios ad.write_int(fd, "program_count", len(prog_names)) ad.write_int(fd, "comm_rank_count", len(comm_ranks)) ad.write_int(fd, "thread_count", len(threads)) ad.write_int(fd, "comm_count", len(results)) if len(results) > 0: ad.write(fd, "comm_timestamps", values_array) return
def writeCounterData(SOS, frame, adios_group, fd): global config global prog_names global comm_ranks global value_names global threads global counters # Get the frame-specific counter data... start = time.time() sqlValsToColByRank = "select prog_name, comm_rank, value, value_name, time_pack from viewCombined where (value_name like 'TAU_EVENT_COUNTER%') and frame = " + str( frame) + " order by time_pack;" results, col_names = queryAllAggregators(sqlValsToColByRank) end = time.time() print(end - start), "seconds for event query" values_array = np.zeros(shape=(len(results), 6), dtype=np.uint64) index = 0 for r in results: prog_name = str(r[0]) comm_rank = str(r[1]) value = long(r[2]) value_name = str(r[3]) # convert the timestamp from seconds to usec timestamp = float(r[4]) * 1000000 if prog_name not in prog_names: attr_name = "program_name " + str(len(prog_names)) prog_names[prog_name] = len(prog_names) ad.define_attribute(adios_group, attr_name, "", ad.DATATYPE.string, prog_name, "") # may not be necessary... if comm_rank not in comm_ranks: comm_ranks[comm_rank] = len(comm_ranks) # tease apart the event name tokens = value_name.split(":", 2) thread = tokens[1] counter = tokens[2] if thread not in threads: threads[thread] = len(threads) if counter not in counters: attr_name = "counter " + str(len(counters)) counters[counter] = len(counters) ad.define_attribute(adios_group, attr_name, "", ad.DATATYPE.string, counter, "") values_array[index][0] = long(prog_names[prog_name]) values_array[index][1] = long(comm_ranks[comm_rank]) values_array[index][2] = long(threads[thread]) values_array[index][3] = long(counters[counter]) values_array[index][4] = long(value) values_array[index][5] = long(timestamp) index = index + 1 # now that the data is queried and in arrays, write it out to the file # initialize the ADIOS data if config["output_adios"]: # write the adios # these get written when the comm data is written # ad.write_int(fd, "program_count", len(prog_names)) # ad.write_int(fd, "comm_rank_count", len(comm_ranks)) # ad.write_int(fd, "thread_count", len(threads)) ad.write_int(fd, "counter_count", len(counters)) ad.write_int(fd, "counter_event_count", len(results)) if len(results) > 0: ad.write(fd, "counter_values", values_array) return
## Init ad.allocate_buffer (ad.BUFFER_ALLOC_WHEN.NOW, 10); ad.init_noxml(MPI.COMM_WORLD) g = ad.declare_group("temperature", "", 1) ad.define_var(g, "NX", "", ad.DATATYPE.integer, "", "", "") ad.define_var(g, "size", "", ad.DATATYPE.integer, "", "", "") ad.define_var(g, "temperature", "", ad.DATATYPE.double, "size,NX", "size,NX", "0,0") msg = str(datetime.datetime.now()) ad.define_attribute(g, "datetime", "", ad.DATATYPE.string, msg, "") print ">>> Method:", method, init ad.select_method(g, method, init, "") ## Writing for i in range(5): print ">>> step:", i fd = ad.open("temperature", "temp.bp", "a") NX = 10 size = 2 groupsize = 4 + 4 + 8 * size * NX t = np.array(range(NX*size), dtype=np.float64) + 100*i tt = t.reshape((size, NX)) ad.set_group_size(fd, groupsize) ad.write_int(fd, "NX", NX) ad.write_int(fd, "size", size) ad.write(fd, "temperature", tt) ad.close(fd) ad.finalize() print ">>> Done."