def compute_direction(cls): """ Computes model update direction from stored function and gradient values """ unix.cd(cls.path) m_new = loadnpy('m_new') f_new = loadtxt('f_new') g_new = loadnpy('g_new') if PAR.SCHEME == 'GradientDescent': p_new = -g_new elif PAR.SCHEME == 'ConjugateGradient': # compute NLCG udpate p_new = cls.NLCG.compute() elif PAR.SCHEME == 'QuasiNewton': # compute L-BFGS update if cls.iter == 1: p_new = -g_new else: cls.LBFGS.update() p_new = -cls.LBFGS.solve() # save results unix.cd(cls.path) savenpy('p_new', p_new) savetxt('s_new', np.dot(g_new, p_new))
def smooth(self, input_path='', output_path='', parameters=[], span=0.): """ Smooths kernels by convolving them with a Gaussian. Wrapper over xsmooth_sem utility. """ if not exists(input_path): raise Exception if not exists(output_path): unix.mkdir(output_path) # apply smoothing operator unix.cd(self.cwd) files = [] files += glob(PATH.MODEL_INIT + '/proc??????_x.bin') files += glob(PATH.MODEL_INIT + '/proc??????_z.bin') files += glob(PATH.MODEL_INIT + '/proc??????_NSPEC_ibool.bin') files += glob(PATH.MODEL_INIT + '/proc??????_jacobian.bin') for file in files: unix.cp(file, input_path) for name in parameters or self.parameters: print(' smoothing', name) call_solver(system.mpiexec(), PATH.SPECFEM_BIN + '/' + 'xsmooth_sem ' + str(span) + ' ' + str(span) + ' ' + name + '_kernel' + ' ' + input_path + '/ ' + output_path + '/ F', output=output_path + '/smooth_' + name + '.log') print('') # rename output files files = glob(output_path + '/*') unix.rename('_smooth', '', files)
def finalize_search(cls): """ Cleans working directory and writes updated model """ unix.cd(cls.path) m0 = loadnpy('m_new') p = loadnpy('p_new') x = cls.step_lens() f = cls.func_vals() # clean working directory unix.rm('alpha') unix.rm('m_try') unix.rm('f_try') if cls.iter > 1: unix.rm('m_old') unix.rm('f_old') unix.rm('g_old') unix.rm('p_old') unix.rm('s_old') unix.mv('m_new', 'm_old') unix.mv('f_new', 'f_old') unix.mv('g_new', 'g_old') unix.mv('p_new', 'p_old') unix.mv('s_new', 's_old') # write updated model alpha = x[f.argmin()] savetxt('alpha', alpha) savenpy('m_new', m0 + p * alpha) savetxt('f_new', f.min()) cls.writer([], [], [])
def clip(self, path='', parameters=[], minval=-np.inf, maxval=np.inf): """ Clips kernels by convolving them with a Gaussian. Wrapper over xclip_sem utility. """ assert exists(path) assert len(parameters) > 0 unix.cd(self.getpath) for name in self.parameters: self.mpirun( PATH.SPECFEM_BIN +'/'+ 'xclip_sem ' + str(minval) + ' ' + str(maxval) + ' ' + name + '_kernel' + ' ' + path + '/ ' + path + '/ ') # move input files src = path dst = path + '_noclip' unix.mkdir(dst) for name in self.parameters: unix.mv(glob(src+'/*'+name+'.bin'), dst) # rename output files unix.rename('_clip', '', glob(src+'/*'))
def generate_mesh(self, model_path=None, model_name=None, model_type='gll'): """ Performs meshing and database generation """ assert (model_name) assert (model_type) self.initialize_solver_directories() unix.cd(self.cwd) if model_type == 'gll': assert (exists(model_path)) self.check_mesh_properties(model_path) unix.cp(glob(model_path + '/' + '*'), self.model_databases) call_solver(system.mpiexec(), 'bin/xmeshfem3D') if self.taskid == 0: self.export_model(PATH.OUTPUT + '/' + model_name) else: raise NotImplementedError
def generate_data(self, **model_kwargs): """ Generates data (perform meshing and database generation first) """ self.generate_mesh(**model_kwargs) unix.cd(self.cwd) setpar('SIMULATION_TYPE', '1') setpar('SAVE_FORWARD', '.false.') call_solver(system.mpiexec(), 'bin/xmeshfem2D', output='mesher.log') call_solver(system.mpiexec(), 'bin/xspecfem2D', output='solver.log') if PAR.FORMAT in ['SU', 'su']: src = glob('OUTPUT_FILES/*.su') # work around SPECFEM2D's different file names (depending on the # version used : # ?? junliu # _d? unix.rename('single_d.su', 'single.su', src) src = glob('OUTPUT_FILES/*.su') dst = 'traces/obs' unix.mv(src, dst) if PAR.SAVETRACES: self.export_traces(PATH.OUTPUT + '/' + 'traces/obs')
def submit(self, workflow): """Submits job """ unix.mkdir(PATH.OUTPUT) unix.cd(PATH.OUTPUT) # save current state self.checkpoint() # construct resource list nodes = PAR.NTASK / PAR.NODESIZE cores = PAR.NTASK % PAR.NODESIZE hours = PAR.WALLTIME / 60 minutes = PAR.WALLTIME % 60 resources = 'walltime=%02d:%02d:00 ' % (hours, minutes) if nodes == 0: resources += ',nodes=1:ppn=%d' % cores elif cores == 0: resources += ',nodes=%d:ppn=16' % nodes else: resources += ',nodes=%d:ppn=16+1:ppn=%d' % (nodes, cores) # construct arguments list unix.run('qsub ' + '-N %s ' % PAR.TITLE + '-o %s ' % (PATH.SUBMIT + '/' + 'output.log') + '-l %s ' % resources + '-j %s ' % 'oe' + findpath('system') + '/' + 'pbs/wrapper_qsub ' + PATH.OUTPUT, shell=True)
def run(self, classname, funcname, hosts='all', **kwargs): """ Runs tasks in serial or parallel on specified hosts """ self.checkpoint() self.save_kwargs(classname, funcname, kwargs) if hosts == 'all': unix.cd(join(findpath('seisflows.system'), 'wrappers')) unix.run('mpiexec -n {} '.format(PAR.NTASK) + PAR.MPIARGS + ' ' + 'run_mpi' + ' ' + PATH.OUTPUT + ' ' + classname + ' ' + funcname) elif hosts == 'head': unix.cd(join(findpath('seisflows.system'), 'wrappers')) unix.run('mpiexec -n 1 ' + PAR.MPIARGS + ' ' + 'run_mpi_head' + ' ' + PATH.OUTPUT + ' ' + classname + ' ' + funcname) else: raise(KeyError('Hosts parameter not set/recognized.'))
def export_kernels(self, path): unix.cd(self.kernel_databases) # work around conflicting name conventions files = [] files += glob('*proc??????_alpha_kernel.bin') files += glob('*proc??????_alpha[hv]_kernel.bin') files += glob('*proc??????_reg1_alpha_kernel.bin') files += glob('*proc??????_reg1_alpha[hv]_kernel.bin') unix.rename('alpha', 'vp', files) files = [] files += glob('*proc??????_beta_kernel.bin') files += glob('*proc??????_beta[hv]_kernel.bin') files += glob('*proc??????_reg1_beta_kernel.bin') files += glob('*proc??????_reg1_beta[hv]_kernel.bin') unix.rename('beta', 'vs', files) # hack to deal with problems on parallel filesystem unix.mkdir(join(path, 'kernels'), noexit=True) unix.mkdir(join(path, 'kernels', basename(self.getpath))) src = join(glob('*_kernel.bin')) dst = join(path, 'kernels', basename(self.getpath)) unix.mv(src, dst)
def combine(self, path=''): """ combines SPECFEM3D kernels """ unix.cd(self.getpath) # create temporary files and directories dirs = unix.ls(path) with open('kernels_list.txt', 'w') as file: file.write('\n'.join(dirs) + '\n') unix.mkdir('INPUT_KERNELS') unix.mkdir('OUTPUT_SUM') for dir in dirs: src = path + '/' + dir dst = 'INPUT_KERNELS' + '/' + dir unix.ln(src, dst) # sum kernels self.mpirun(PATH.SOLVER_BINARIES + '/' + 'xsum_kernels') unix.mv('OUTPUT_SUM', path + '/' + 'sum') # remove temporary files and directories unix.rm('INPUT_KERNELS') unix.rm('kernels_list.txt') unix.cd(path)
def submit(self, workflow): """Submits job """ unix.mkdir(PATH.OUTPUT) unix.cd(PATH.OUTPUT) # save current state save_objects('SeisflowsObjects') save_parameters('SeisflowsParameters.json') save_paths('SeisflowsPaths.json') nodes = PAR.NTASK / 16 cores = PAR.NTASK % 16 hours = PAR.WALLTIME / 60 minutes = PAR.WALLTIME % 60 # construct resource list resources = 'walltime=%02d:%02d:00 ' % (hours, minutes) if nodes == 0: resources += ',nodes=1:ppn=%d' % cores elif cores == 0: resources += ',nodes=%d:ppn=16' % nodes else: resources += ',nodes=%d:ppn=16+1:ppn=%d' % (nodes, cores) args = ('qsub ' + '-N %s ' % PAR.TITLE + '-o %s ' % (PATH.SUBMIT + '/' + 'output.log') + '-l %s ' % resources + '-j %s ' % 'oe' + findpath('system') + '/' + 'pbs/wrapper_qsub ' + PATH.OUTPUT) print args # DEBUG subprocess.call(args, shell=1)
def generate_mesh(self, model_path=None, model_name=None, model_type='gll'): """ Performs meshing and database generation """ assert (model_name) assert (model_type) self.initialize_solver_directories() unix.cd(self.getpath) if model_type == 'gll': assert (exists(model_path)) unix.cp(glob(model_path + '/' + '*'), self.databases) elif model_type == 'sep': pass elif model_type == 'default': pass elif model_type == 'tomo': pass self.mpirun('bin/xmeshfem3D') self.mpirun('bin/xgenerate_databases') self.export_model(PATH.OUTPUT + '/' + model_name)
def combine(self, input_path='', output_path='', parameters=[]): """ Sums individual source contributions. Wrapper over xcombine_sem utility. """ if not exists(input_path): raise Exception if not exists(output_path): unix.mkdir(output_path) unix.cd(self.cwd) names = self.check_source_names() subset = [names[isrc] for isrc in self._source_subset] with open('kernel_paths', 'w') as f: f.writelines([join(input_path, dir) + '\n' for dir in subset]) # SAGA component - include contributions from reference gradient remainder = list(set(self._source_names) - set(subset)) with open('kernel_paths', 'a') as f: f.writelines( [join(PATH.GRAD_AGG, dir) + '\n' for dir in remainder]) for name in parameters or self.parameters: call_solver( system.mpiexec(), PATH.SPECFEM_BIN + '/' + 'xcombine_sem ' + name + '_kernel' + ' ' + 'kernel_paths' + ' ' + output_path)
def update(self): """ Updates L-BFGS algorithm history """ unix.cd(self.path) s = self.load("m_new") - self.load("m_old") y = self.load("g_new") - self.load("g_old") m = len(s) n = self.memory if self.memory_used == 0: S = np.memmap("LBFGS/S", mode="w+", dtype="float32", shape=(m, n)) Y = np.memmap("LBFGS/Y", mode="w+", dtype="float32", shape=(m, n)) S[:, 0] = s Y[:, 0] = y self.memory_used = 1 else: S = np.memmap("LBFGS/S", mode="r+", dtype="float32", shape=(m, n)) Y = np.memmap("LBFGS/Y", mode="r+", dtype="float32", shape=(m, n)) S[:, 1:] = S[:, :-1] Y[:, 1:] = Y[:, :-1] S[:, 0] = s Y[:, 0] = y if self.memory_used < self.memory: self.memory_used += 1 return S, Y
def write_receivers(self): unix.cd(self.getpath) key = 'use_existing_STATIONS' val = '.true.' setpar(key, val) _, h = preprocess.load('traces/obs') solvertools.write_receivers(h.nr, h.rx, h.rz)
def smooth(self, path='', tag='gradient', span=0.): """ smooths SPECFEM3D kernels """ unix.cd(self.getpath) # list kernels kernels = [] for name in self.model_parameters: if name in self.inversion_parameters: flag = True else: flag = False kernels = kernels + [[name, flag]] # smooth kernels for name, flag in kernels: if flag: print ' smoothing', name self.mpirun(PATH.SOLVER_BINARIES + '/' + 'xsmooth_sem ' + str(span) + ' ' + str(span) + ' ' + name + ' ' + path + '/' + tag + '/ ' + path + '/' + tag + '/ ') # move kernels src = path + '/' + tag dst = path + '/' + tag + '_nosmooth' unix.mkdir(dst) for name, flag in kernels: if flag: unix.mv(glob(src + '/*' + name + '.bin'), dst) else: unix.cp(glob(src + '/*' + name + '.bin'), dst) unix.rename('_smooth', '', glob(src + '/*')) print '' unix.cd(path)
def smooth(self, path='', parameters=[], span=0.): """ Smooths kernels by convolving them with a Gaussian. Wrapper over xsmooth_sem utility. """ assert exists(path) assert len(parameters) > 0 # apply smoothing operator unix.cd(self.getpath) for name in parameters: print ' smoothing', name self.mpirun( PATH.SPECFEM_BIN +'/'+ 'xsmooth_sem ' + str(span) + ' ' + str(span) + ' ' + name + '_kernel' + ' ' + path + '/ ' + path + '/ ', output=self.getpath+'/'+'OUTPUT_FILES/output_smooth_sem.txt') print '' # move input files src = path dst = path + '_nosmooth' unix.mkdir(dst) for name in self.parameters: unix.mv(glob(src+'/*'+name+'.bin'), dst) # rename output files unix.rename('_smooth', '', glob(src+'/*'))
def update_w(self, chi, kk): """ Updates SRVM algorithm history """ self.path = PATH.OPTIMIZE unix.cd(self.path) mm = 500 Shat_chi = chi for ii in range(mm): jj = ii + 1 + kk - mm if jj > 0: unix.cp('a_%04d' % jj, 'A') a = self.loadtxt('A') unix.cp('nu_%04d' % jj, 'Nu') nu = self.loadtxt('Nu') unix.cp('w_%04d' % jj, 'W') wtemp = self.load('W') xtemp = self.dot(wtemp, Shat_chi) Shat_chi = Shat_chi - xtemp * nu / a * wtemp return Shat_chi
def generate_data(self, **model_kwargs): """ Generates data in the synthetic-synthetic comparison case. Not for use in the real-data problem. Differs from specfem3d.nz in that it automatically calls generate mesh for the true model, rather than passing them in as kwargs. """ # Prepare for the forward simulation self.generate_mesh(**model_kwargs) print 'specfem3d_nz.generate data' unix.cd(self.cwd) setpar('SIMULATION_TYPE', '1') setpar('SAVE_FORWARD', '.true.') setpar('ATTENUATION ', '.true.') call_solver(system.mpiexec(), 'bin/xspecfem3D') # seismic unix format if PAR.FORMAT in ['SU', 'su']: src = glob('OUTPUT_FILES/*_d?_SU') dst = 'traces/obs' unix.mv(src, dst) # ascii sem output format elif PAR.FORMAT == "ascii": src = glob('OUTPUT_FILES/*sem?') dst = 'traces/obs' unix.mv(src, dst) if PAR.SAVETRACES: self.export_traces(PATH.OUTPUT + '/' + 'traces/obs')
def run(self, classname, funcname, hosts='all', **kwargs): """ Runs tasks in serial or parallel on specified hosts """ # to avoid cryptic MPI messages, use "--mca_warn_on_fork 0" as the # default value for MPIARGS, and use subprocess.call rather than # call_catch to invoke mpiexec self.checkpoint() self.save_kwargs(classname, funcname, kwargs) if hosts == 'all': unix.cd(join(findpath('seisflows.system'), 'wrappers')) subprocess.call(PAR.MPIEXEC + ' ' + '-n %d ' % PAR.NTASK + PAR.MPIARGS + ' ' + 'run_mpi' + ' ' + PATH.OUTPUT + ' ' + classname + ' ' + funcname, shell=True) elif hosts == 'head': unix.cd(join(findpath('seisflows.system'), 'wrappers')) subprocess.call(PAR.MPIEXEC + ' ' + '-n 1 ' + PAR.MPIARGS + ' ' + 'run_mpi_head' + ' ' + PATH.OUTPUT + ' ' + classname + ' ' + funcname, shell=True) else: raise (KeyError('Hosts parameter not set/recognized.'))
def smooth(self, input_path='', output_path='', parameters=[], span=0.): """ Smooths kernels by convolving them with a Gaussian. Wrapper over xsmooth_sem utility. """ if not exists(input_path): raise Exception if not exists(output_path): unix.mkdir(output_path) # apply smoothing operator unix.cd(self.cwd) for name in parameters or self.parameters: print ' smoothing', name call_solver( system.mpiexec(), PATH.SPECFEM_BIN +'/'+ 'xsmooth_sem ' + str(span) + ' ' + str(span) + ' ' + name + '_kernel' + ' ' + input_path + '/ ' + output_path + '/ ', output='/dev/null') print '' # rename output files files = glob(output_path+'/*') unix.rename('_smooth', '', files)
def initialize_solver_directories(self): """ Creates directory structure expected by SPECFEM3D_GLOBE, copies executables, and prepares input files. Executables must be supplied by user as there is currently no mechanism to automatically compile from source. """ unix.mkdir(self.getpath) unix.cd(self.getpath) # create directory structure unix.mkdir('bin') unix.mkdir('DATA') unix.mkdir('traces/obs') unix.mkdir('traces/syn') unix.mkdir('traces/adj') unix.mkdir(self.databases) # copy exectuables src = glob(PATH.SOLVER_BINARIES + '/' + '*') dst = 'bin/' unix.cp(src, dst) # copy input files src = glob(PATH.SOLVER_FILES + '/' + '*') dst = 'DATA/' unix.cp(src, dst)
def run(self, classname, funcname, hosts='all', **kwargs): """ Runs tasks in serial or parallel on specified hosts """ # to avoid cryptic MPI messages, use "--mca_warn_on_fork 0" as the # default value for MPIARGS, and use subprocess.call rather than # call_catch to invoke mpiexec self.checkpoint() self.save_kwargs(classname, funcname, kwargs) if hosts == 'all': unix.cd(join(findpath('seisflows.system'), 'wrappers')) subprocess.call(PAR.MPIEXEC + ' ' + '-n %d ' % PAR.NTASK + PAR.MPIARGS + ' ' + 'run_mpi' + ' ' + PATH.OUTPUT + ' ' + classname + ' ' + funcname, shell=True) elif hosts == 'head': unix.cd(join(findpath('seisflows.system'), 'wrappers')) subprocess.call(PAR.MPIEXEC + ' ' + '-n 1 ' + PAR.MPIARGS + ' ' + 'run_mpi_head' + ' ' + PATH.OUTPUT + ' ' + classname + ' ' + funcname, shell=True) else: raise(KeyError('Hosts parameter not set/recognized.'))
def smooth(self, path='', parameters=[], span=0.): """ Smooths kernels by convolving them with a Gaussian. Wrapper over xsmooth_sem utility. """ assert exists(path) assert len(parameters) > 0 # apply smoothing operator unix.cd(self.getpath) for name in parameters or self.parameters: print ' smoothing', name call_solver( system.mpiexec(), PATH.SPECFEM_BIN +'/'+ 'xsmooth_sem ' + str(span) + ' ' + str(span) + ' ' + name + '_kernel' + ' ' + path + '/ ' + path + '/ ', output=self.getpath+'/'+'OUTPUT_FILES/output_smooth_sem.txt') print '' # move input files src = path dst = path + '_nosmooth' unix.mkdir(dst) for name in parameters or self.parameters: unix.mv(glob(src+'/*'+name+'_kernel.bin'), dst) # rename output files unix.rename('_smooth', '', glob(src+'/*'))
def generate_mesh(self, model_path=None, model_name=None, model_type='gll'): """ Performs meshing and database generation """ assert(model_name) assert(model_type) self.initialize_solver_directories() unix.cd(self.getpath) if model_type in ['gll']: par = getpar('MODEL').strip() if par != 'gll': if self.getnode == 0: print 'WARNING: Unexpected Par_file setting:' print 'MODEL =', par assert(exists(model_path)) self.check_mesh_properties(model_path) src = glob(model_path +'/'+ '*') dst = self.model_databases unix.cp(src, dst) self.call('bin/xmeshfem3D') self.call('bin/xgenerate_databases') self.export_model(PATH.OUTPUT +'/'+ model_name) else: raise NotImplementedError
def submit(self, workflow): """Submits job """ unix.mkdir(PATH.OUTPUT) unix.cd(PATH.OUTPUT) # save current state self.checkpoint() # construct resource list nodes = PAR.NTASK/PAR.NODESIZE cores = PAR.NTASK%PAR.NODESIZE hours = PAR.WALLTIME/60 minutes = PAR.WALLTIME%60 resources = 'walltime=%02d:%02d:00 '%(hours, minutes) if nodes == 0: resources += ',nodes=1:ppn=%d'%cores elif cores == 0: resources += ',nodes=%d:ppn=16'%nodes else: resources += ',nodes=%d:ppn=16+1:ppn=%d'%(nodes, cores) # construct arguments list unix.run('qsub ' + '-N %s '%PAR.TITLE + '-o %s '%(PATH.SUBMIT +'/'+ 'output.log') + '-l %s '%resources + '-j %s '%'oe' + findpath('system') +'/'+ 'pbs/wrapper_qsub ' + PATH.OUTPUT, shell=True)
def solve(self): unix.cd(self.path) g = self.load('g_new') q = np.copy(g) n = len(q) unix.cd('LBFGS') S = np.memmap('S', mode='r', dtype='float32', shape=(n, self.kmax)) Y = np.memmap('Y', mode='r', dtype='float32', shape=(n, self.kmax)) k = loadtxt('k') rh = np.zeros(k) al = np.zeros(k) for i in range(0, k): rh[i] = 1 / np.dot(Y[:, i], S[:, i]) al[i] = rh[i] * np.dot(S[:, i], q) q = q - al[i] * Y[:, i] sty = np.dot(Y[:, 0], S[:, 0]) yty = np.dot(Y[:, 0], Y[:, 0]) r = sty / yty * q for i in range(k - 1, -1, -1): be = rh[i] * np.dot(Y[:, i], r) r = r + S[:, i] * (al[i] - be) # check for ill conditioning if np.dot(g, -r) >= 0: self.restart() return g return r
def compute_step(cls): """ Computes next trial step length """ unix.cd(cls.path) m0 = loadnpy('m_new') p = loadnpy('p_new') f0 = loadtxt('f_new') g0 = loadtxt('s_new') x = cls.step_lens() f = cls.func_vals() # compute trial step length if PAR.SRCHTYPE == 'Backtrack': alpha = lib.backtrack2(f0, g0, x[1], f[1], b1=0.1, b2=0.5) elif PAR.SRCHTYPE == 'Bracket': FACTOR = 2. if any(f[1:] < f[0]) and (f[-2] < f[-1]): alpha = lib.polyfit2(x, f) elif any(f[1:] < f[0]): alpha = loadtxt('alpha') * FACTOR else: alpha = loadtxt('alpha') * FACTOR**-1 elif PAR.SRCHTYPE == 'Fixed': alpha = cls.step_ratio * (step + 1) * PAR.STEPLEN else: raise ValueError # write trial model savetxt('alpha', alpha) savenpy('m_try', m0 + p * alpha)
def write_receivers(self): unix.cd(self.cwd) key = 'use_existing_STATIONS' val = '.true.' setpar(key, val) _, h = preprocess.load('traces/obs') solvertools.write_receivers(h.nr, h.rx, h.rz)
def combine(self, input_path='', output_path='', parameters=[]): """ Sums individual source contributions. Wrapper over xcombine_sem utility. """ if not exists(input_path): raise Exception if not exists(output_path): unix.mkdir(output_path) unix.cd(self.cwd) names = self.check_source_names() subset = [names[isrc] for isrc in self._source_subset] with open('kernel_paths', 'w') as f: f.writelines([join(input_path, dir)+'\n' for dir in subset]) # SAGA component - include contributions from reference gradient remainder = list(set(self._source_names) - set(subset)) with open('kernel_paths', 'a') as f: f.writelines([join(PATH.GRAD_AGG, dir)+'\n' for dir in remainder]) for name in parameters or self.parameters: call_solver( system.mpiexec(), PATH.SPECFEM_BIN +'/'+ 'xcombine_sem ' + name + '_kernel' + ' ' + 'kernel_paths' + ' ' + output_path)
def combine_vol_data(self, output_path='', quantity=''): """ This does not work Call Specfems executable combine_vol_data_vtk on kernels or model files """ if not exists(output_path): unix.mkdir(output_path) # This should probably be moved to its own function # def import_kernels() unix.cd(self.cwd) src = glob(join(PATH.GRAD, self.source_name, "*{}*".format(quantity))) dst = join(self.cwd, "kernels") unix.mkdir(dst) unix.ln(src=src, dst=dst) solver_call = " ".join([ PATH.SPECFEM_BIN + '/' + 'xcombine_vol_data_vtk', 0, # NPROC_START PAR.NPROC, # NPROC_END quantity, # QUANTITY dst, # DIR_IN dst, # DIR_OUT, we will rename the files first 0 # GPU ACCEL ]) call_solver(system_mpiexec(), solver_call) unix.rm(dst) print ''
def submit(self, workflow): """Submits job """ unix.mkdir(PATH.OUTPUT) unix.cd(PATH.OUTPUT) # save current state self.checkpoint() # construct resource list nodes = int(PAR.NTASK / PAR.NODESIZE) cores = PAR.NTASK % PAR.NODESIZE hours = int(PAR.WALLTIME / 60) minutes = PAR.WALLTIME % 60 resources = 'walltime=%02d:%02d:00' % (hours, minutes) if nodes == 0: resources += ',mem=%dgb,nodes=1:ppn=%d' % (PAR.MEMORY, cores) elif cores == 0: resources += ',mem=%dgb,nodes=%d:ppn=%d' % (PAR.MEMORY, nodes, PAR.NODESIZE) else: resources += ',mem=%dgb,nodes=%d:ppn=%d+1:ppn=%d' % ( PAR.MEMORY, nodes, PAR.NODESIZE, cores) # construct arguments list call('qsub ' + '%s ' % PAR.PBSARGS + '-N %s ' % PAR.TITLE + '-o %s ' % (PATH.SUBMIT + '/' + 'output.log') + '-l %s ' % resources + '-j %s ' % 'oe' + findpath('seisflows.system') + '/' + 'wrappers/submit ' + '-F %s ' % PATH.OUTPUT)
def generate_mesh(self, model_path=None, model_name=None, model_type='gll'): """ Performs meshing and database generation """ # Assert that the model name and path are not empty assert (model_name) assert (model_type) self.initialize_solver_directories() unix.cd(self.cwd) assert (exists(model_path)) # Check that the path exist # Fill _mesh_properties which contain the number of integration points, # the number of procs used and the coordinates of the points self.check_mesh_properties(model_path) # Copy the model files (ex: proc000023_vp.bin ...) into DATA src = glob(join(model_path, '*')) dst = join(self.cwd, 'DATA') unix.cp(src, dst) # Export the model into output folder if self.taskid == 0: self.export_model(PATH.OUTPUT + '/' + model_name)
def update(self): """ Updates L-BFGS algorithm history """ unix.cd(self.path) s = self.load('m_new') - self.load('m_old') y = self.load('g_new') - self.load('g_old') m = len(s) n = self.memory if self.memory_used == 0: S = np.memmap('LBFGS/S', mode='w+', dtype='float32', shape=(m, n)) Y = np.memmap('LBFGS/Y', mode='w+', dtype='float32', shape=(m, n)) S[:, 0] = s Y[:, 0] = y self.memory_used = 1 else: S = np.memmap('LBFGS/S', mode='r+', dtype='float32', shape=(m, n)) Y = np.memmap('LBFGS/Y', mode='r+', dtype='float32', shape=(m, n)) S[:, 1:] = S[:, :-1] Y[:, 1:] = Y[:, :-1] S[:, 0] = s Y[:, 0] = y if self.memory_used < self.memory: self.memory_used += 1 return S, Y
def __call__(self): """ Returns L-BFGS search direction """ self.iter += 1 print '\tComputing search direction using L-BFGS optimization schema' unix.cd(self.path) g = self.load('g_new') if self.iter == 1: return -g, 0 elif self.iter > self.maxiter: print 'restarting LBFGS... [periodic restart]' self.restart() return -g, 1 S, Y = self.update() q = self.apply(g, S, Y) status = self.check_status(g, q) if status != 0: self.restart() return -g, status else: return -q, status
def generate_mesh(self, model_path=None, model_name=None, model_type='gll'): """ Performs meshing and database generation """ print 'specfem3d_nz.generate mesh' assert (model_name) assert (model_type) unix.cd(self.cwd) if model_type in ['gll']: par = getpar('MODEL').strip() if par != 'gll': if self.taskid == 0: print 'WARNING: Unexpected Par_file setting:' print 'MODEL =', par assert (exists(model_path)) self.check_mesh_properties(model_path) src = glob(model_path + '/' + '*') dst = self.model_databases unix.cp(src, dst) call_solver(system.mpiexec(), 'bin/xgenerate_databases') if self.taskid == 0: self.export_model(PATH.OUTPUT + '/' + model_name) else: raise NotImplementedError
def clip(self, path='', parameters=[], minval=-np.inf, maxval=np.inf): """ Clips kernels by convolving them with a Gaussian. Wrapper over xclip_sem utility. """ assert exists(path) assert len(parameters) > 0 unix.cd(self.getpath) for name in parameters or self.parameters: call_solver( system.mpiexec, PATH.SPECFEM_BIN +'/'+ 'xclip_sem ' + str(minval) + ' ' + str(maxval) + ' ' + name + '_kernel' + ' ' + path + '/ ' + path + '/ ') # move input files src = path dst = path + '_noclip' unix.mkdir(dst) for name in parameters or self.parameters: unix.mv(glob(src+'/*'+name+'.bin'), dst) # rename output files unix.rename('_clip', '', glob(src+'/*'))
def smooth(self, input_path='', output_path='', parameters=[], span=0.): """ Smooths kernels by convolving them with a Gaussian. Wrapper over xsmooth_sem utility. """ if not exists(input_path): raise Exception if not exists(output_path): unix.mkdir(output_path) # apply smoothing operator unix.cd(self.cwd) for name in parameters or self.parameters: print ' smoothing', name call_solver(system.mpiexec(), PATH.SPECFEM_BIN + '/' + 'xsmooth_sem ' + str(span) + ' ' + str(span) + ' ' + name + '_kernel' + ' ' + input_path + '/ ' + output_path + '/ ', output='/dev/null') print '' # rename output files files = glob(output_path + '/*') unix.rename('_smooth', '', files)
def submit(self, workflow): """ Submits workflow """ unix.mkdir(PATH.OUTPUT) unix.cd(PATH.OUTPUT) unix.mkdir(PATH.SUBMIT+'/'+'output.pbs') self.checkpoint() hours = PAR.WALLTIME/60 minutes = PAR.WALLTIME%60 walltime = 'walltime=%02d:%02d:00 ' % (hours, minutes) ncpus = PAR.NODESIZE mpiprocs = PAR.NODESIZE # prepare qsub arguments unix.run( 'qsub ' + '%s ' % PAR.PBSARGS + '-l select=1:ncpus=%d:mpiprocs=%d ' % (ncpus, mpiprocs) + '-l %s ' % walltime + '-N %s ' % PAR.TITLE + '-j %s '%'oe' + '-o %s ' % (PATH.SUBMIT+'/'+'output.log') + '-V ' + ' -- ' + findpath('seisflows.system') +'/'+ 'wrappers/submit ' + PATH.OUTPUT)
def generate_mesh(self, model_path=None, model_name=None, model_type='gll'): """ Performs meshing and database generation """ assert(model_name) assert(model_type) self.initialize_solver_directories() unix.cd(self.getpath) if model_type in ['gll']: par = getpar('MODEL').strip() if par != 'gll': if self.getnode == 0: print 'WARNING: Unexpected Par_file setting:' print 'MODEL =', par assert(exists(model_path)) self.check_mesh_properties(model_path) src = glob(model_path +'/'+ '*') dst = self.model_databases unix.cp(src, dst) self.mpirun('bin/xmeshfem3D') self.mpirun('bin/xgenerate_databases') self.export_model(PATH.OUTPUT +'/'+ model_name) else: raise NotImplementedError
def submit(self, workflow): """Submits job """ unix.mkdir(PATH.OUTPUT) unix.cd(PATH.OUTPUT) # save current state self.checkpoint() # construct resource list nodes = int(PAR.NTASK / PAR.NODESIZE) cores = PAR.NTASK % PAR.NODESIZE hours = int(PAR.WALLTIME / 60) minutes = PAR.WALLTIME % 60 resources = 'walltime=%02d:%02d:00'%(hours, minutes) if nodes == 0: resources += ',mem=%dgb,nodes=1:ppn=%d'%(PAR.MEMORY, cores) elif cores == 0: resources += ',mem=%dgb,nodes=%d:ppn=%d'%(PAR.MEMORY, nodes, PAR.NODESIZE) else: resources += ',mem=%dgb,nodes=%d:ppn=%d+1:ppn=%d'%(PAR.MEMORY, nodes, PAR.NODESIZE, cores) # construct arguments list unix.run('qsub ' + '-N %s '%PAR.TITLE + '-o %s '%(PATH.SUBMIT +'/'+ 'output.log') + '-l %s '%resources + '-j %s '%'oe' + findpath('system') +'/'+ 'wrappers/submit ' + '-F %s '%PATH.OUTPUT)
def adjoint(self): """ Perform adjoint simulation. Must launch from /bin """ unix.cd(join(self.getpath, 'bin')) script = './xewf2d' super(ewf2d, self).mpirun(script, PATH.SUBMIT + '/dump') unix.cd(PATH.SUBMIT)
def submit(self, workflow): """ Submits job """ unix.mkdir(PATH.OUTPUT) unix.cd(PATH.OUTPUT) self.checkpoint() workflow.main()
def data_filenames(self): unix.cd(self.cwd) unix.cd('traces/obs') print 'made it here' if PAR.FORMAT in ['ASCII', 'ascii']: filenames = [] for channel in PAR.CHANNELS: filenames += glob('*.??%s.sem.ascii' % channel) return [filenames]
def setup(cls): unix.mkdir(cls.path) unix.cd(cls.path) optimize.check() optimize.setup() unix.cd(cls.path) m = problem.model_init() savenpy('m_new',m)
def export_kernels(self, path): unix.cd(self.kernel_databases) # work around conflicting name conventions self.rename_kernels() src = glob('*_kernel.bin') dst = join(path, 'kernels', self.source_name) unix.mkdir(dst) unix.mv(src, dst)
def eval_grad(self, path='', export_traces=False): """ Evaluates gradient by carrying out adjoint simulation. Adjoint traces must be in place beforehand. """ unix.cd(self.getpath) self.adjoint() self.export_kernels(path) if export_traces: self.export_traces(path, prefix='traces/adj')
def restart(self): """ Discards history and resets counters """ self.iter = 1 self.memory_used = 0 unix.cd(self.path) S = np.memmap("LBFGS/S", mode="r+") Y = np.memmap("LBFGS/Y", mode="r+") S[:] = 0.0 Y[:] = 0.0
def restart(self): """ Discards history and resets counters """ self.iter = 1 self.memory_used = 0 unix.cd(self.path) S = np.memmap('LBFGS/S', mode='r+') Y = np.memmap('LBFGS/Y', mode='r+') S[:] = 0. Y[:] = 0.
def generate_data(self, **model_kwargs): """ Generates data """ self.generate_mesh(**model_kwargs) unix.cd(self.getpath) setpar('SIMULATION_TYPE', '1') setpar('SAVE_FORWARD', '.true.') self.call('bin/xspecfem3D') unix.mv(self.data_wildcard, 'traces/obs') self.export_traces(PATH.OUTPUT, 'traces/obs')
def apply_hess(self, path=''): """ Computes action of Hessian on a given model vector. """ solver = sys.modules['seisflows_solver'] unix.cd(solver.cwd) solver.import_model(path) unix.mkdir('traces/lcg') solver.forward('traces/lcg') self.prepare_apply_hess(solver.cwd) solver.adjoint() solver.export_kernels(path)
def data_filenames(self): unix.cd(self.cwd+'/'+'traces/obs') if PAR.FORMAT in ['SU', 'su']: if not PAR.CHANNELS: return sorted(glob('*_d?_SU')) filenames = [] for channel in PAR.CHANNELS: filenames += sorted(glob('*_d'+channel+'_SU')) return filenames else: raise NotImplementedError
def combine(self, path='', parameters=[]): """ Combines SPECFEM2D kernels """ unix.cd(self.getpath) names = self.check_source_names() with open('kernel_paths', 'w') as f: f.writelines([join(path, dir)+'\n' for dir in names]) self.mpirun( 'bin/xcombine_rho_vp_vs ' + 'kernel_paths' + ' ' + path +'/'+ 'sum')
def apply_hess(self, path=''): """ Computes action of Hessian on a given model vector. (A gradient evaluation must have already been carried out.) :input path :: directory to which output files are exported """ unix.cd(self.cwd) self.import_model(path) unix.mkdir('traces/lcg') self.forward('traces/lcg') preprocess.prepare_apply_hess(self.cwd) self.adjoint() self.export_kernels(path)
def eval_func(self, path='', export_traces=False): """ Evaluates misfit function by carrying out forward simulation and comparing observations and synthetics. """ unix.cd(self.getpath) self.import_model(path) self.forward() unix.mv(self.data_wildcard, 'traces/syn') preprocess.prepare_eval_grad(self.getpath) self.export_residuals(path) if export_traces: self.export_traces(path, prefix='traces/syn')
def apply_hess(self, path=''): """ Computes action of Hessian on a given model vector. """ # a gradient evaluation must have already been carried out unix.cd(self.getpath) unix.mkdir('traces/lcg') self.import_model(path) self.forward() unix.mv(self.data_wildcard, 'traces/lcg') preprocess.prepare_apply_hess(self.getpath) self.adjoint() self.export_kernels(path)
def data_filenames(self): if PAR.CHANNELS: if PAR.FORMAT in ['SU', 'su']: filenames = [] for channel in PAR.CHANNELS: filenames += ['U%s_file_single.su' % channel] return filenames else: unix.cd(self.cwd) unix.cd('traces/obs') if PAR.FORMAT in ['SU', 'su']: return glob('U?_file_single.su')