print( " " + sys.argv[0] + " [jobdata mule tag for y axis] [output_filename.pdf] [jobdir1] [jobdir2] ... [jobdirN]" ) print("") sys.exit(1) if len(sys.argv) > 3: # Load Jobs specified via program parameters jd = JobsData(job_dirs=sys.argv[3:]) else: # Load all Jobs jd = JobsData() # Consolidate data... jdc = JobsDataConsolidate(jd) # ... which belongs to the same time integration method jdc_groups = jdc.create_groups(['runtime.timestepping_method']) # # Filter to exclude data which indicates instabilities # def data_filter(x, y, jd): if y == None: return True if 'runtime.max_simulation_time' in jd: if jd['runtime.max_simulation_time'] <= 24 * 60 * 60: if y > 100:
from mule.plotting.Plotting import * from mule.postprocessing.JobsData import * from mule.postprocessing.JobsDataConsolidate import * import matplotlib.pyplot as plt from matplotlib.lines import Line2D groups = ['runtime.timestepping_method', 'runtime.timestepping_order'] tagnames_y = [ 'plane_data_diff_prog_u.res_norm_linf', ] j = JobsData(verbosity=0) c = JobsDataConsolidate(j) print("") print("Groups:") job_groups = c.create_groups(groups) for key, g in job_groups.items(): print(" + " + key) # Filter out errors beyond this value! def data_filter(x, y, jobdata): if y == None: return True # Filter out NaNs for wallclock time studies # NaNs require significantly more computation time
for j in range(len(data))] for j in range(len(data)): for i in range(len(data[0]) - 1): data_new[j][i] = data[j][i] return data_new num_ensembles = 10 ensemble_data = [] for ensemble_id in range(num_ensembles): j = JobsData('job_bench_*ensemble' + str(ensemble_id).zfill(2) + '*', verbosity=100) c = JobsDataConsolidate(j) d = JobsData_DataTable(j, 'parallelization.num_ranks', bar_data) data = d.get_data_float() if True: #if False: """ Add last column 'nl_timestepping' """ data_new = [[None for i in range(len(data[0]) + 1)] for j in range(len(data))] for j in range(len(data)): for i in range(len(data[0])): data_new[j][i] = data[j][i]