def custom_plot3(sph_schm, sph_schm_legend, bins = 20, sz=(19.2,14.4), save=False): fig, axs = plt.subplots(2, 2, figsize=sz) i = 0 for schm in sph_schm: file_loc = file_base + '/Outputs/' + schm leg = sph_schm_legend[schm] files = get_files(file_loc) data = load(files[-1])['arrays']['fluid'] x, y, lmda, DRh = data.get('x', 'y', 'lmda', 'DRh') if i <= 1: plot_loghist(DRh, ax=axs[0,i%2], bins = bins) axs[0,i%2].set_title(leg, fontsize=20) else: plot_loghist(DRh, ax=axs[1,i%2], bins = bins) axs[1,i%2].set_title(leg, fontsize=20) i += 1 fig.suptitle(figTitle3, fontsize=24) fig.tight_layout() fig.subplots_adjust(top=0.9) if save == True: tle = file_base + '/TGV_DRH_plot' + savefig_additional + '.png' fig.savefig(tle, dpi=400)
def combined_loghist(file_loc, ax=None, cond=0.0, Bins=20, **plot_kwargs): if ax == None: ax = plt.gca() files = get_files(file_loc) df = np.array([]) cnt = 0 for solver_data, fluid in iter_output(files, 'fluid'): if cnt == 0: cnt = 1 else: df = np.concatenate([df, fluid.DRh]) cnt += 1 cnt -= 1 hist, bins = np.histogram(df, bins=Bins) hist_norm = hist / cnt width = 0.9 * (bins[1] - bins[0]) center = (bins[:-1] + bins[1:]) / 2 #ax.bar(center, hist_norm, align='center', width=width, log=True) plot_loghist(df, bins=Bins, ax=ax, log=True) temp = (df <= cond).sum() res = round(temp * 100 / len(df), 3) return res
def post_processing(folder): files = get_files(folder) t = [] x = [] y = [] for file in files: current_t, xcm, ycm = get_com(file) if current_t > 0: current_t = current_t - 0.05 t.append(current_t) x.append(xcm) y.append(ycm) print("Done") my_data = genfromtxt('xcom_dataset.csv', delimiter=',') plt.plot(t, x, label='x/L DEM') plt.plot(my_data[:, 0], my_data[:, 1], label="x/L experiment") my_data = genfromtxt('ycom_dataset.csv', delimiter=',') plt.plot(my_data[:, 0], my_data[:, 1], label="y/L experiment") plt.plot(t, y, label='y/L DEM') plt.xlabel("time") plt.ylabel("centre of mass(x, y) / L") plt.legend() # plt.show() plt.savefig('/Users/harishraja/iitbreport/doc/rnd/' + str(folder[:-1]) + '.png')
def __init__(self, path, cache=True): self.path = path self.paths_list = get_files(path) # Caching # # Note : Caching is only used by get_frame and widget handlers. if cache: self.cache = {} else: self.cache = None
def post_processing(self): files = get_files('test_4_output') t = [] y1 = [] for solver_data, sand, wall in iter_output(files, 'sand', 'wall'): t.append(solver_data['t']) y1.append(sand.wz[0]) print("Done") target = open('output.txt', 'a+') target.write(str(y1[-1])) target.close() print(y1[-1])
def post_processing(self): files = get_files('big_simulation_output') t = [] y1 = [] for solver_data, cube in iter_output(files, 'cube'): t.append(solver_data['t']) y1.append(cube.cm[1]) print("Done") dat = np.array([t, y1]) dat = dat.T print(dat) np.savetxt('data.txt', dat, delimiter=',')
def post_processing(self): files = get_files('test_7_output') t = [] y1 = [] y2 = [] for solver_data, al, copper in iter_output(files, 'al', 'copper'): t.append(solver_data['t']) y1.append(-al.wz[0]) y2.append(-copper.wz[0]) print("Done") print(y1) print(y2)
def post_processing(self): files = get_files('stacked-particles_output') t = [] y1 = [] y2 = [] for solver_data, sand, in iter_output(files, 'sand'): t.append(solver_data['t']) y1.append(sand.y[0]) y2.append(sand.y[1]) print("Done") plt.plot(t, y1) # plt.plot(t, y2) plt.show()
def post_processing(self): files = get_files('test_3_output') t = [] y1 = [] y2 = [] for solver_data, mg, al in iter_output(files, 'mg', 'al'): t.append(solver_data['t']) y2.append(-mg.u[0]) y1.append(-al.u[0]) print("Done") print(y1) print(y1[-1] / 3.9) print(y2[-1] / 3.9)
def post_processing(self): files = get_files('dcdem_output') t = [] y1 = [] y2 = [] for solver_data, glass, lime in iter_output(files, 'glass', 'lime'): t.append(solver_data['t']) y1.append(-glass.fx[0]) y2.append(-lime.fx[0]) print("Done") plt.plot(t, y1) plt.plot(t, y2) plt.title("Force in overlap") plt.xlabel("time") plt.ylabel("Force") plt.show()
def post_processing(self): files = get_files('test_2_output') t = [] y1 = [] y2 = [] for solver_data, mg, al in iter_output(files, 'mg', 'al'): t.append(solver_data['t']) y1.append(-mg.fx[0]) y2.append(-al.fx[0]) print("Done") plt.plot(t, y1) plt.plot(t, y2) plt.title("Force in overlap") plt.xlabel("time") plt.ylabel("Force") plt.show()
def find_sim_dirs(path, sim_paths_list=[]): ''' Finds all the directories in a given directory that contain pysph output files. ''' path = os.path.abspath(path) sim_files = get_files(path) if len(sim_files) != 0: sim_paths_list.append(path) elif len(sim_files) == 0: files = os.listdir(path) files = [f for f in files if os.path.isdir(f)] files = [os.path.abspath(f) for f in files if not f.startswith('.')] for f in files: sim_paths_list = find_sim_dirs(f, sim_paths_list) return sim_paths_list
def run(options): for fname in options.inputfile: if os.path.isdir(fname): files = get_files(fname) options.inputfile.extend(files) continue data = load(fname) particles = [] for ptype, pdata in data['arrays'].items(): particles.append(pdata) filename = os.path.splitext(fname)[0] outdir = options.outdir if outdir is not None: if not os.path.exists(outdir): os.makedirs(outdir) filename = os.path.join(outdir, os.path.basename(filename)) dump_vtk(filename, particles, scalars=options.scalars, velocity=['u', 'v', 'w'])
def cull(src_path, c): src_path = os.path.abspath(src_path) sim_paths_list = find_sim_dirs(src_path) initial_size = find_dir_size(src_path) for path in sim_paths_list: files = get_files(path) l = len(files) del_files = [files[i] for i in set(range(l)) - set(range(0, l, c))] if len(del_files) != 0: for f in del_files: os.remove(f) final_size = find_dir_size(src_path) print("Initial size of the directory was: " + str(initial_size) + " bytes") print("Final size of the directory is: " + str(final_size) + " bytes") return
def custom_plot2(sph_schm, sph_schm_legend, sz=(19.2,14.4), save=False): from pysph.solver.utils import load, iter_output, get_files cnt = 0 for schm in sph_schm: file_loc = file_base + '/Outputs/' + schm leg = sph_schm_legend[schm] files = get_files(file_loc) data = load(files[-1])['arrays']['fluid'] x, y, lmda, DRh = data.get('x', 'y', 'lmda', 'DRh') xlabel, ylabel = r'$\dfrac{|\delta \mathbf{\hat{r}_i}|}{\Delta x_i}$', r'$\lambda$' title = 'Taylor-Green Vortex | ' + leg if save == True: tle = file_base + '/TGV_jointplot-' + str(cnt) + savefig_additional + '.png' cnt += 1 jointplot_semilogx(y=lmda, x=DRh, ylabel=ylabel, xlabel=xlabel, title=title, save=tle, kind='reg', height=8) else: jointplot_semilogx(y=lmda, x=DRh, ylabel=ylabel, xlabel=xlabel, title=title, save=save, kind='reg', height=8)
def post_processing(self): files = get_files('test_6_output') t = [] u1 = [] v1 = [] u2 = [] v2 = [] for solver_data, al, nylon in iter_output(files, 'al', 'nylon'): t.append(solver_data['t']) u1.append(-al.u[0]) v1.append(-al.v[0]) u2.append(-nylon.u[0]) v2.append(-nylon.v[0]) print("Done") # aluminium print('aluminium') print(u1) print(v1) print(v1[-1] / u1[-1]) # nylon print('nylon') print(u2) print(v2) print(v2[-1] / u2[-1])
def test_get_files(self): self.assertEqual(get_files(self.dirname), self.files) self.assertEqual(get_files(self.dirname, fname=self.fname), self.files) self.assertEqual( get_files(self.dirname, fname=self.fname, endswith=('npz', 'hdf5')), self.files)
KEnorm = np.average(temp_a.m*0.5*vmag*vmag) temp_b = load('a10_b_output/a10_b_0.npz')['arrays']['fluid'] u, v = temp_b.u, temp_b.v norm_b = np.max((u**2 + v**2)**0.5) temp_c = load('deep_output/deep_0.hdf5')['arrays']['fluid'] u, v = temp_c.u, temp_c.v norm_c = np.max((u**2 + v**2)**0.5) for fname in get_files('a10_a_output/'): pa = load(fname) u = pa['arrays']['fluid'].u v = pa['arrays']['fluid'].v vmag = (u**2 + v**2)**0.5 m = pa['arrays']['fluid'].m KE_a.append(np.average(0.5*m*vmag*vmag)/KEnorm) vmax = np.max((u**2 + v**2)**0.5) vmax_a.append(vmax/norm_a) time_a.append(pa['solver_data']['t']) for fname in get_files('a10_b_output/'): pa = load(fname)
def run_example(self, filename, nprocs=2, timeout=300, atol=1e-14, serial_kwargs=None, extra_parallel_kwargs=None): """Run an example and compare the results in serial and parallel. Parameters: ----------- filename : str The name of the file to run nprocs : int Number of processors to use for the parallel run. timeout : float Time in seconds to wait for execution before an error is raised. atol: float Absolute tolerance for differences between the runs. serial_kwargs : dict The options to pass for a serial run. Note that if the value of a particular key is None, the option is simply set and no value passed. For example if `openmp=None`, then `--openmp` is used. extra_parallel_kwargs: dict The extra options to pass for the parallel run. """ if serial_kwargs is None: serial_kwargs = {} if extra_parallel_kwargs is None: extra_parallel_kwargs = {} parallel_kwargs = dict(serial_kwargs) parallel_kwargs.update(extra_parallel_kwargs) prefix = os.path.splitext(os.path.basename(filename))[0] # dir1 is for the serial run dir1 = tempfile.mkdtemp() serial_kwargs.update(fname=prefix, directory=dir1) # dir2 is for the parallel run dir2 = tempfile.mkdtemp() parallel_kwargs.update(fname=prefix, directory=dir2) serial_args = self._kwargs_to_command_line(serial_kwargs) parallel_args = self._kwargs_to_command_line(parallel_kwargs) try: # run the example script in serial run_parallel_script.run(filename=filename, args=serial_args, nprocs=1, timeout=timeout, path=MY_DIR) # run the example script in parallel run_parallel_script.run(filename=filename, args=parallel_args, nprocs=nprocs, timeout=timeout, path=MY_DIR) # get the serial and parallel results dir1path = os.path.abspath(dir1) dir2path = os.path.abspath(dir2) # load the serial output file = get_files(dirname=dir1path, fname=prefix)[-1] serial = load(file) serial = serial['arrays']['fluid'] # load the parallel output file = get_files(dirname=dir2path, fname=prefix)[-1] parallel = load(file) parallel = parallel['arrays']['fluid'] finally: shutil.rmtree(dir1, True) shutil.rmtree(dir2, True) # test self._test(serial, parallel, atol, nprocs)
def load(self): self.files = files = utils.get_files(self.dirname, self.fname, self.endswith) self.nfiles = len(files)
u, v = temp_a.u, temp_a.v norm_a = np.max((u**2 + v**2)**0.5) vmag = (v**2 + u**2)**0.5 KEnorm = np.average(temp_a.m * 0.5 * vmag * vmag) temp_b = load('TVFoutput/b/vortex_spin_down_output/vortex_spin_down_0.hdf5' )['arrays']['fluid'] u, v = temp_b.u, temp_b.v norm_b = np.max((u**2 + v**2)**0.5) temp_c = load('TVFoutput/c/vortex_spin_down_output/vortex_spin_down_0.hdf5' )['arrays']['fluid'] u, v = temp_c.u, temp_c.v norm_c = np.max((u**2 + v**2)**0.5) for fname in get_files('TVFoutput/a/vortex_spin_down_output/'): pa = load(fname) u = pa['arrays']['fluid'].u v = pa['arrays']['fluid'].v vmag = (u**2 + v**2)**0.5 m = pa['arrays']['fluid'].m KE_a.append(np.average(0.5 * m * vmag * vmag) / KEnorm) vmax = np.max((u**2 + v**2)**0.5) vmax_a.append(vmax / norm_a) time_a.append(pa['solver_data']['t']) for fname in get_files('TVFoutput/b/vortex_spin_down_output/'): pa = load(fname) u = pa['arrays']['fluid'].u v = pa['arrays']['fluid'].v vmax = np.max((u**2 + v**2)**0.5)