class tikhonov2(loadclass('postprocess', 'regularize')): """ Adds regularization options to base class Available options include 0-, 1-, and 2- order Tikhonov and total variation regularization. While the underlying theory is classical, these options are experimental in the sense that their application to unstructured numerical grids is quite new. SO FAR, CAN ONLY BE USED FOR 2D WAVEFORM INVERSION. """ def check(self): """ Checks parameters and paths """ super(tikhonov2, self).check() if 'CREEPING' not in PAR: setattr(PAR, 'CREEPING', False) if not PAR.LAMBDA: raise ValueError def nabla(self, mesh, m, g): if PAR.CREEPING: G, grid = mesh2grid(g, mesh) DG = nabla(G, order=2) dg = grid2mesh(DG, grid, mesh) return -dg / np.mean(m) else: M, grid = mesh2grid(m, mesh) DM = nabla(M, order=2) dm = grid2mesh(DM, grid, mesh) return dm / np.mean(m)
class total_variation(loadclass('postprocess', 'regularize')): """ Adds regularization options to base class Available options include 0-, 1-, and 2- order Tikhonov and total variation regularization. While the underlying theory is classical, these options are experimental in the sense that their application to unstructured numerical grids is quite new. SO FAR, CAN ONLY BE USED FOR 2D WAVEFORM INVERSION. """ def check(self): """ Checks parameters and paths """ super(total_variation, self).check() if not PAR.LAMBDA: raise ValueError if not hasattr(PAR, 'EPSILON'): setattr(PAR, 'EPSILON', 0.) def nabla(self, mesh, m, g): M, grid = mesh2grid(g, mesh) DM = tv(M, epsilon=PAR.EPSILON) dm = grid2mesh(DM, grid, mesh) return dm / np.mean(m)
class tikhonov0(loadclass('postprocess', 'regularize')): """ Adds regularization options to base class Available options include 0-, 1-, and 2- order Tikhonov and total variation regularization. While the underlying theory is classical, these options are experimental in the sense that their application to unstructured numerical grids is quite new. SO FAR, CAN ONLY BE USED FOR 2D WAVEFORM INVERSION. """ def check(self): """ Checks parameters and paths """ super(tikhonov0, self).check() if not PAR.LAMBDA: raise ValueError def nabla(self, mesh, m, g): return m / np.mean(m)
class tiger_lg(loadclass('system', 'slurm_lg')): """ Specially designed system interface for tiger.princeton.edu By hiding environment details behind a python interface layer, these classes provide a consistent command set across different computing environments. For more informations, see http://seisflows.readthedocs.org/en/latest/manual/manual.html#system-interfaces """ def check(self): """ Checks parameters and paths """ if 'TITLE' not in PAR: setattr(PAR, 'TITLE', unix.basename(abspath('.'))) if 'SUBTITLE' not in PAR: setattr(PAR, 'SUBTITLE', unix.basename(abspath('..'))) if 'GLOBAL' not in PATH: setattr( PATH, 'GLOBAL', join('/scratch/gpfs', unix.whoami(), PAR.SUBTITLE, PAR.TITLE)) if 'LOCAL' not in PATH: setattr(PATH, 'LOCAL', '') if 'NPROC_PER_NODE' not in PAR: setattr(PAR, 'NPROC_PER_NODE', 16) super(tiger_lg, self).check() def submit(self, *args, **kwargs): """Submits job """ unix.ln(PATH.GLOBAL, PATH.SUBMIT + '/' + 'scratch') super(tiger_lg, self).submit(*args, **kwargs)
def test_load_extension_module(self): # Get a class from one of seisflow module cls = tools.loadclass('system', 'tiger_sm') # Check if we can instanciate the class. self.assertIsInstance(cls(), cls),
def test_load_base_module(self): # Get a class from one of seisflow module cls = tools.loadclass('system', 'serial') # Check if we can instanciate the class. self.assertIsInstance(cls(), cls),
def test_noargs(self): self.assertEqual(tools.Null, tools.loadclass())
import numpy as np from seisflows.tools import unix from seisflows.tools.array import loadnpy, savenpy from seisflows.tools.code import exists from seisflows.tools.config import SeisflowsParameters, SeisflowsPaths, \ loadclass, ParameterError PAR = SeisflowsParameters() PATH = SeisflowsPaths() import solver import postprocess migration = loadclass('workflow','migration')() class test_postprocess(object): """ Postprocessing class """ def check(self): """ Checks parameters and paths """ migration.check() if 'INPUT' not in PATH: setattr(PATH, 'INPUT', None)
class specfem2d(loadclass('solver', 'base')): """ Python interface for SPECFEM2D See base class for method descriptions """ if PAR.MATERIALS == 'LegacyAcoustic': parameters = [] parameters += ['vs'] def check(self): """ Checks parameters and paths """ super(specfem2d, self).check() # check time stepping parameters if 'NT' not in PAR: raise Exception if 'DT' not in PAR: raise Exception if 'F0' not in PAR: raise Exception def check_solver_parameter_files(self): """ Checks solver parameters """ nt = getpar('nt', cast=int) dt = getpar('deltat', cast=float) f0 = getpar('f0', file='DATA/SOURCE', cast=float) if nt != PAR.NT: if self.getnode == 0: print "WARNING: nt != PAR.NT" setpar('nt', PAR.NT) if dt != PAR.DT: if self.getnode == 0: print "WARNING: dt != PAR.DT" setpar('deltat', PAR.DT) if f0 != PAR.F0: if self.getnode == 0: print "WARNING: f0 != PAR.F0" setpar('f0', PAR.F0, file='DATA/SOURCE') if self.mesh.nproc != PAR.NPROC: if self.getnode == 0: print 'Warning: mesh.nproc != PAR.NPROC' if 'MULTIPLES' in PAR: if PAR.MULTIPLES: setpar('absorbtop', '.false.') else: setpar('absorbtop', '.true.') def generate_data(self, **model_kwargs): """ Generates data """ self.generate_mesh(**model_kwargs) unix.cd(self.getpath) setpar('SIMULATION_TYPE', '1') setpar('SAVE_FORWARD', '.true.') self.mpirun('bin/xmeshfem2D') self.mpirun('bin/xspecfem2D', output='log.solver') unix.mv(self.data_wildcard, 'traces/obs') self.export_traces(PATH.OUTPUT, 'traces/obs') def generate_mesh(self, model_path=None, model_name=None, model_type='gll'): """ Performs meshing and database generation """ assert (model_name) assert (model_type) self.initialize_solver_directories() unix.cd(self.getpath) assert (exists(model_path)) self.check_mesh_properties(model_path) src = glob(join(model_path, '*')) dst = join(self.getpath, 'DATA') unix.cp(src, dst) self.export_model(PATH.OUTPUT + '/' + model_name) ### low-level solver interface def forward(self): """ Calls SPECFEM2D forward solver """ setpar('SIMULATION_TYPE', '1') setpar('SAVE_FORWARD', '.true.') self.mpirun('bin/xmeshfem2D') self.mpirun('bin/xspecfem2D') def adjoint(self): """ Calls SPECFEM2D adjoint solver """ setpar('SIMULATION_TYPE', '3') setpar('SAVE_FORWARD', '.false.') unix.rm('SEM') unix.ln('traces/adj', 'SEM') self.mpirun('bin/xmeshfem2D') self.mpirun('bin/xspecfem2D') ### postprocessing utilities def smooth(self, path='', parameters='dummy', span=0.): """ Smooths SPECFEM2D kernels by convolving them with a Gaussian """ from seisflows.tools.array import meshsmooth, stack #assert parameters == self.parameters # implementing nproc > 1 would be straightforward, but a bit tedious #assert self.mesh.nproc == 1 kernels = self.load(path, suffix='_kernel') if not span: return kernels # set up grid _, x = loadbypar(PATH.MODEL_INIT, ['x'], 0) _, z = loadbypar(PATH.MODEL_INIT, ['z'], 0) mesh = stack(x[0], z[0]) for key in self.parameters: kernels[key] = [meshsmooth(kernels[key][0], mesh, span)] unix.rm(path + '_nosmooth') unix.mv(path, path + '_nosmooth') self.save(path, kernels, suffix='_kernel') ### file transfer utilities def import_model(self, path): src = glob(path + '/' + 'model/*') dst = join(self.getpath, 'DATA/') unix.cp(src, dst) def export_model(self, path): if self.getnode == 0: unix.mkdir(path) src = glob(join(self.getpath, 'DATA/*.bin')) dst = path unix.cp(src, dst) ### input file writers def write_parameters(self): unix.cd(self.getpath) solvertools.write_parameters(vars(PAR)) def write_receivers(self): unix.cd(self.getpath) key = 'use_existing_STATIONS' val = '.true.' setpar(key, val) _, h = preprocess.load('traces/obs') solvertools.write_receivers(h.nr, h.rx, h.rz) def write_sources(self): unix.cd(self.getpath) _, h = preprocess.load(dir='traces/obs') solvertools.write_sources(vars(PAR), h) ### utility functions def mpirun(self, script, output='/dev/null'): """ Wrapper for mpirun """ with open(output, 'w') as f: subprocess.call(script, shell=True, stdout=f) ### miscellaneous @property def data_wildcard(self): return glob('OUTPUT_FILES/U?_file_single.su') #return glob('OUTPUT_FILES/*semd') @property def model_databases(self): return join(self.getpath, 'DATA') @property def kernel_databases(self): return join(self.getpath, 'OUTPUT_FILES') @property def source_prefix(self): return 'SOURCE' def combine(self, path='', parameters=[]): """ Sums individual source contributions. Wrapper over xcombine_sem utility. """ unix.cd(self.getpath) names = self.check_source_names() with open('kernel_paths', 'w') as f: f.writelines([join(path, dir) + '\n' for dir in names]) unix.mkdir(path + '/' + 'sum') for name in parameters: self.mpirun(PATH.SPECFEM_BIN + '/' + 'xcombine_sem ' + name + '_kernel' + ' ' + 'kernel_paths' + ' ' + path + '/' + 'sum')
class slurm_lg(loadclass('system', 'base')): """ An interface through which to submit workflows, run tasks in serial or parallel, and perform other system functions. By hiding environment details behind a python interface layer, these classes provide a consistent command set across different computing environments. For more informations, see http://seisflows.readthedocs.org/en/latest/manual/manual.html#system-interfaces """ def check(self): """ Checks parameters and paths """ if 'TITLE' not in PAR: setattr(PAR, 'TITLE', unix.basename(abspath('..'))) if 'SUBTITLE' not in PAR: setattr(PAR, 'SUBTITLE', unix.basename(abspath('.'))) # check parameters if 'WALLTIME' not in PAR: setattr(PAR, 'WALLTIME', 30.) if 'STEPTIME' not in PAR: setattr(PAR, 'STEPTIME', 30.) if 'SLEEPTIME' not in PAR: setattr(PAR, 'SLEEPTIME', 1.) if 'VERBOSE' not in PAR: setattr(PAR, 'VERBOSE', 1) if 'NTASK' not in PAR: raise ParameterError(PAR, 'NTASK') if 'NPROC' not in PAR: raise ParameterError(PAR, 'NPROC') if 'NODESIZE' not in PAR: raise ParameterError(PAR, 'NODESIZE') # check paths if 'GLOBAL' not in PATH: setattr(PATH, 'GLOBAL', join(abspath('.'), 'scratch')) if 'LOCAL' not in PATH: setattr(PATH, 'LOCAL', None) if 'SUBMIT' not in PATH: setattr(PATH, 'SUBMIT', unix.pwd()) if 'OUTPUT' not in PATH: setattr(PATH, 'OUTPUT', join(PATH.SUBMIT, 'output')) if 'SYSTEM' not in PATH: setattr(PATH, 'SYSTEM', join(PATH.GLOBAL, 'system')) def submit(self, workflow): """ Submits workflow """ unix.mkdir(PATH.OUTPUT) unix.cd(PATH.OUTPUT) unix.mkdir(PATH.SUBMIT + '/' + 'output.slurm') self.checkpoint() # prepare sbatch arguments unix.run('sbatch ' + '--job-name=%s ' % PAR.SUBTITLE + '--output %s ' % (PATH.SUBMIT + '/' + 'output.log') + '--ntasks-per-node=%d ' % PAR.NODESIZE + '--nodes=%d ' % 1 + '--time=%d ' % PAR.WALLTIME + findpath('system') + '/' + 'slurm/wrapper_sbatch ' + PATH.OUTPUT) def run(self, classname, funcname, hosts='all', **kwargs): """ Runs tasks in serial or parallel on specified hosts. """ self.checkpoint() self.save_kwargs(classname, funcname, kwargs) jobs = self._launch(classname, funcname, hosts) while 1: time.sleep(60. * PAR.SLEEPTIME) self._timestamp() isdone, jobs = self._status(classname, funcname, jobs) if isdone: return def mpiargs(self): return 'srun ' def getnode(self): """ Gets number of running task """ try: return int(os.getenv('SEISFLOWS_TASK_ID')) except: try: return int(os.getenv('SLURM_ARRAY_TASK_ID')) except: raise Exception("TASK_ID environment variable not defined.") ### private methods def _launch(self, classname, funcname, hosts='all'): unix.mkdir(PATH.SYSTEM) # prepare sbatch arguments if hosts == 'all': args = ('--array=%d-%d ' % (0, PAR.NTASK - 1) + '--output %s ' % (PATH.SUBMIT + '/' + 'output.slurm/' + '%A_%a')) elif hosts == 'head': args = ('--array=%d-%d ' % (0, 0) + '--output=%s ' % (PATH.SUBMIT + '/' + 'output.slurm/' + '%j')) #+('--export=SEISFLOWS_TASK_ID=%s ' % 0 # submit job with open(PATH.SYSTEM + '/' + 'job_id', 'w') as f: subprocess.call( 'sbatch ' + '--job-name=%s ' % PAR.TITLE + '--nodes=%d ' % math.ceil(PAR.NPROC / float(PAR.NODESIZE)) + '--ntasks-per-node=%d ' % PAR.NODESIZE + '--time=%d ' % PAR.STEPTIME + args + findpath('system') + '/' + 'slurm/wrapper_srun ' + PATH.OUTPUT + ' ' + classname + ' ' + funcname + ' ', shell=1, stdout=f) # retrieve job ids with open(PATH.SYSTEM + '/' + 'job_id', 'r') as f: line = f.readline() job = line.split()[-1].strip() if hosts == 'all' and PAR.NTASK > 1: nn = range(PAR.NTASK) return [job + '_' + str(ii) for ii in nn] else: return [job] def _status(self, classname, funcname, jobs): """ Determines completion status of one or more jobs """ for job in jobs: state = self._query(job) states = [] if state in ['COMPLETED']: states += [1] else: states += [0] if state in ['FAILED', 'NODE_FAIL', 'TIMEOUT']: print msg.TaskError_SLURM % (classname, funcname, job) sys.exit(-1) isdone = all(states) return isdone, jobs def _query(self, jobid): """ Queries job state from SLURM database """ with open(PATH.SYSTEM + '/' + 'job_status', 'w') as f: subprocess.call('sacct -n -o state -j ' + jobid, shell=True, stdout=f) with open(PATH.SYSTEM + '/' + 'job_status', 'r') as f: line = f.readline() state = line.strip() return state ### utility function def _timestamp(self): with open(PATH.SYSTEM + '/' + 'timestamps', 'a') as f: line = time.strftime('%H:%M:%S') + '\n' f.write(line) def save_kwargs(self, classname, funcname, kwargs): kwargspath = join(PATH.OUTPUT, 'SeisflowsObjects', classname + '_kwargs') kwargsfile = join(kwargspath, funcname + '.p') unix.mkdir(kwargspath) saveobj(kwargsfile, kwargs)
from seisflows.tools.config import loadclass, loadvars, ConfigObj, ParameterObj, Null OBJ = ConfigObj('SeisflowsObjects') PAR = ParameterObj('SeisflowsParameters') PATH = ParameterObj('SeisflowsPaths') # run test if __name__ == '__main__': PAR.update(loadvars('parameters', '.')) PATH.update(loadvars('paths', '.')) register = OBJ.register system = loadclass('system', PAR.SYSTEM)() register('system', system) preprocess = Null() register('preprocess', preprocess) solver = Null() register('solver', solver) postprocess = Null() register('postprocess', postprocess) optimize = Null() register('optimize', optimize) workflow = loadclass('workflow', 'test_system')()
class specfem3d_globe(loadclass('solver', 'base')): """ Python interface for SPECFEM3D_GLOBE See base class for method descriptions """ if PAR.MATERIALS in ['Isotropic']: parameters = [] parameters += ['vp'] parameters += ['vs'] else: parameters = [] parameters += ['vpv'] parameters += ['vph'] parameters += ['vsv'] parameters += ['vsh'] parameters += ['eta'] def check(self): """ Checks parameters and paths """ super(specfem3d_globe, self).check() def generate_data(self, **model_kwargs): """ Generates data """ self.generate_mesh(**model_kwargs) unix.cd(self.getpath) setpar('SIMULATION_TYPE', '1') setpar('SAVE_FORWARD', '.true.') self.mpirun('bin/xspecfem3D') unix.mv(self.data_wildcard, 'traces/obs') self.export_traces(PATH.OUTPUT, 'traces/obs') def generate_mesh(self, model_path=None, model_name=None, model_type='gll'): """ Performs meshing and database generation """ assert (model_name) assert (model_type) self.initialize_solver_directories() unix.cd(self.getpath) if model_type == 'gll': assert (exists(model_path)) self.check_mesh_properties(model_path) unix.cp(glob(model_path + '/' + '*'), self.model_databases) self.mpirun('bin/xmeshfem3D') self.export_model(PATH.OUTPUT + '/' + model_name) else: raise NotImplementedError ### model input/output def load(self, path, prefix='reg1_', suffix='', verbose=False): """ reads SPECFEM model or kernel Models are stored in Fortran binary format and separated into multiple files according to material parameter and processor rank. """ model = Model(self.parameters) minmax = Minmax(self.parameters) for iproc in range(self.mesh.nproc): # read database files keys, vals = loadbypar(path, self.parameters, iproc, prefix, suffix) for key, val in zip(keys, vals): model[key] += [val] minmax.update(keys, vals) if verbose: minmax.write(path, logpath=PATH.SUBMIT) return model def save(self, path, model, prefix='reg1_', suffix=''): """ writes SPECFEM3D_GLOBE transerverly isotropic model """ unix.mkdir(path) for iproc in range(self.mesh.nproc): for key in ['vpv', 'vph', 'vsv', 'vsh', 'eta']: if key in self.parameters: savebin(model[key][iproc], path, iproc, prefix + key + suffix) elif 'kernel' in suffix: pass else: src = PATH.OUTPUT + '/' + 'model_init' dst = path copybin(src, dst, iproc, prefix + key + suffix) if 'rho' in self.parameters: savebin(model['rho'][iproc], path, iproc, prefix + 'rho' + suffix) elif 'kernel' in suffix: pass else: src = PATH.OUTPUT + '/' + 'model_init' dst = path copybin(src, dst, iproc, prefix + 'rho' + suffix) ### low-level solver interface def forward(self): """ Calls SPECFEM3D_GLOBE forward solver """ solvertools.setpar('SIMULATION_TYPE', '1') solvertools.setpar('SAVE_FORWARD', '.true.') self.mpirun('bin/xspecfem3D') unix.mv(self.data_wildcard, 'traces/syn') def adjoint(self): """ Calls SPECFEM3D_GLOBE adjoint solver """ solvertools.setpar('SIMULATION_TYPE', '3') solvertools.setpar('SAVE_FORWARD', '.false.') unix.rm('SEM') unix.ln('traces/adj', 'SEM') self.mpirun('bin/xspecfem3D') ### utility functions @property def data_wildcard(self): return glob('OUTPUT_FILES/*.sem.ascii') @property def kernel_databases(self): return join(self.getpath, 'OUTPUT_FILES/DATABASES_MPI') @property def model_databases(self): return join(self.getpath, 'OUTPUT_FILES/DATABASES_MPI') @property def source_prefix(self): return 'CMTSOLUTION'
class slurm_sm(loadclass('system', 'base')): """ An interface through which to submit workflows, run tasks in serial or parallel, and perform other system functions. By hiding environment details behind a python interface layer, these classes provide a consistent command set across different computing environments. For more informations, see http://seisflows.readthedocs.org/en/latest/manual/manual.html#system-interfaces """ def check(self): """ Checks parameters and paths """ if 'TITLE' not in PAR: setattr(PAR, 'TITLE', unix.basename(abspath('..'))) if 'SUBTITLE' not in PAR: setattr(PAR, 'SUBTITLE', unix.basename(abspath('.'))) # check parameters if 'WALLTIME' not in PAR: setattr(PAR, 'WALLTIME', 30.) if 'VERBOSE' not in PAR: setattr(PAR, 'VERBOSE', 1) if 'NPROC' not in PAR: raise ParameterError(PAR, 'NPROC') if 'NTASK' not in PAR: raise ParameterError(PAR, 'NTASK') # check paths if 'GLOBAL' not in PATH: setattr(PATH, 'GLOBAL', join(abspath('.'), 'scratch')) if 'LOCAL' not in PATH: setattr(PATH, 'LOCAL', None) if 'SUBMIT' not in PATH: setattr(PATH, 'SUBMIT', unix.pwd()) if 'OUTPUT' not in PATH: setattr(PATH, 'OUTPUT', join(PATH.SUBMIT, 'output')) def submit(self, workflow): """ Submits workflow """ unix.mkdir(PATH.OUTPUT) unix.cd(PATH.OUTPUT) self.checkpoint() # submit workflow unix.run('sbatch ' + '--job-name=%s ' % PAR.SUBTITLE + '--output=%s ' % (PATH.SUBMIT + '/' + 'output.log') + '--cpus-per-task=%d ' % PAR.NPROC + '--ntasks=%d ' % PAR.NTASK + '--time=%d ' % PAR.WALLTIME + findpath('system') + '/' + 'slurm/wrapper_sbatch ' + PATH.OUTPUT) def run(self, classname, funcname, hosts='all', **kwargs): """ Runs tasks in serial or parallel on specified hosts """ self.checkpoint() self.save_kwargs(classname, funcname, kwargs) if hosts == 'all': # run on all available nodes unix.run('srun ' + '--wait=0 ' + join(findpath('system'), 'slurm/wrapper_srun ') + PATH.OUTPUT + ' ' + classname + ' ' + funcname) elif hosts == 'head': # run on head node unix.run('srun ' + '--wait=0 ' + join(findpath('system'), 'slurm/wrapper_srun_head ') + PATH.OUTPUT + ' ' + classname + ' ' + funcname) def getnode(self): """ Gets number of running task """ gid = os.getenv('SLURM_GTIDS').split(',') lid = int(os.getenv('SLURM_LOCALID')) return int(gid[lid]) def mpiargs(self): return 'mpirun -np %d ' % PAR.NPROC def save_kwargs(self, classname, funcname, kwargs): kwargspath = join(PATH.OUTPUT, 'SeisflowsObjects', classname + '_kwargs') kwargsfile = join(kwargspath, funcname + '.p') unix.mkdir(kwargspath) saveobj(kwargsfile, kwargs)
class regularize(loadclass('postprocess', 'base')): """ Adds regularization options to base class Available options include 0-, 1-, and 2- order Tikhonov and total variation regularization. While the underlying theory is classical, these options are experimental in the sense that their application to unstructured numerical grids is quite new. SO FAR, CAN ONLY BE USED FOR 2D WAVEFORM INVERSION. Prior to regularizing gradient, near field artifacts must be corrected. The "FIXRADIUS" parameter specifies the radius, in number of GLL points, within which the correction is applied. """ def check(self): """ Checks parameters and paths """ super(regularize, self).check() if 'FIXRADIUS' not in PAR: setattr(PAR, 'FIXRADIUS', 7.5) if 'LAMBDA' not in PAR: setattr(PAR, 'LAMBDA', 0.) def write_gradient(self, path): super(regularize, self).write_gradient(path) g = self.regularize(path) self.save(path, g, backup='noregularize') def process_kernels(self, path, parameters): """ Processes kernels in accordance with parameter settings """ fullpath = path + '/' + 'kernels' assert exists(path) if exists(fullpath + '/' + 'sum'): unix.mv(fullpath + '/' + 'sum', fullpath + '/' + 'sum_nofix') # mask sources and receivers system.run('postprocess', 'fix_near_field', hosts='all', path=fullpath) system.run('solver', 'combine', hosts='head', path=fullpath, parameters=parameters) def fix_near_field(self, path=''): """ """ import preprocess preprocess.setup() name = solver.check_source_names()[solver.getnode] fullpath = path + '/' + name g = solver.load(fullpath, suffix='_kernel') if not PAR.FIXRADIUS: return mesh = self.getmesh() x, z = self.getxz() lx = x.max() - x.min() lz = z.max() - z.min() nn = x.size nx = np.around(np.sqrt(nn * lx / lz)) nz = np.around(np.sqrt(nn * lz / lx)) dx = lx / nx dz = lz / nz sigma = 0.5 * PAR.FIXRADIUS * (dx + dz) _, h = preprocess.load(solver.getpath + '/' + 'traces/obs') # mask sources mask = np.exp(-0.5 * ((x - h.sx[0])**2. + (z - h.sy[0])**2.) / sigma**2.) for key in solver.parameters: weight = np.sum(mask * g[key][0]) / np.sum(mask) g[key][0] *= 1. - mask g[key][0] += mask * weight # mask receivers for ir in range(h.nr): mask = np.exp(-0.5 * ((x - h.rx[ir])**2. + (z - h.ry[ir])**2.) / sigma**2.) for key in solver.parameters: weight = np.sum(mask * g[key][0]) / np.sum(mask) g[key][0] *= 1. - mask g[key][0] += mask * weight solver.save(fullpath, g, suffix='_kernel') def regularize(self, path): assert (exists(path)) g = solver.load(path + '/' + 'gradient', suffix='_kernel') if not PAR.LAMBDA: return solver.merge(g) m = solver.load(path + '/' + 'model') mesh = self.getmesh() for key in solver.parameters: for iproc in range(PAR.NPROC): g[key][iproc] += PAR.LAMBDA *\ self.nabla(mesh, m[key][iproc], g[key][iproc]) #self.nabla(m[key][iproc], g[key][iproc] , mesh, h) return solver.merge(g) def nabla(self, mesh, m, g): raise NotImplementedError("Must be implemented by subclass.") def getmesh(self): model_path = PATH.OUTPUT + '/' + 'model_init' try: m = solver.load(model_path) x = m['x'][0] z = m['z'][0] mesh = stack(x, z) except: from seisflows.seistools.io import loadbin x = loadbin(model_path, 0, 'x') z = loadbin(model_path, 0, 'z') mesh = stack(x, z) return mesh def getxz(self): model_path = PATH.OUTPUT + '/' + 'model_init' try: m = solver.load(model_path) x = m['x'][0] z = m['z'][0] except: from seisflows.seistools.io import loadbin x = loadbin(model_path, 0, 'x') z = loadbin(model_path, 0, 'z') return x, z
from seisflows.tools.config import ConfigObj, ParameterObj, loadclass OBJ = ConfigObj('SeisflowsObjects') PAR = ParameterObj('SeisflowsParameters') PATH = ParameterObj('SeisflowsPaths') # ensure number of processers per source is defined if 'NPROC' not in PAR: raise Exception # there are 16 processers per node on tiger if 'NPROC_PER_NODE' in PAR: assert (PAR.NPROC_PER_NODE == 16) else: PAR.NPROC_PER_NODE = 16 # if nproc per source exceeds nproc per node, use tiger_lg # otherwise, use tiger_sm if PAR.NPROC > PAR.NPROC_PER_NODE: tiger = loadclass('system', 'tiger_lg') else: tiger = loadclass('system', 'tiger_sm')
class specfem3d(loadclass('solver', 'base')): """ Python interface for SPECFEM3D See base class for method descriptions """ def check(self): """ Checks parameters and paths """ super(specfem3d, self).check() # check time stepping parameters if 'NT' not in PAR: raise Exception if 'DT' not in PAR: raise Exception if 'F0' not in PAR: raise Exception def generate_data(self, **model_kwargs): """ Generates data """ self.generate_mesh(**model_kwargs) unix.cd(self.getpath) setpar('SIMULATION_TYPE', '1') setpar('SAVE_FORWARD', '.true.') self.mpirun('bin/xspecfem3D') unix.mv(self.data_wildcard, 'traces/obs') self.export_traces(PATH.OUTPUT, 'traces/obs') def generate_mesh(self, model_path=None, model_name=None, model_type='gll'): """ Performs meshing and database generation """ assert(model_name) assert(model_type) self.initialize_solver_directories() unix.cd(self.getpath) if model_type in ['gll']: par = getpar('MODEL').strip() if par != 'gll': if self.getnode == 0: print 'WARNING: Unexpected Par_file setting:' print 'MODEL =', par assert(exists(model_path)) self.check_mesh_properties(model_path) src = glob(model_path +'/'+ '*') dst = self.model_databases unix.cp(src, dst) self.mpirun('bin/xmeshfem3D') self.mpirun('bin/xgenerate_databases') self.export_model(PATH.OUTPUT +'/'+ model_name) else: raise NotImplementedError ### low-level solver interface def forward(self): """ Calls SPECFEM3D forward solver """ setpar('SIMULATION_TYPE', '1') setpar('SAVE_FORWARD', '.true.') self.mpirun('bin/xgenerate_databases') self.mpirun('bin/xspecfem3D') def adjoint(self): """ Calls SPECFEM3D adjoint solver """ setpar('SIMULATION_TYPE', '3') setpar('SAVE_FORWARD', '.false.') unix.rm('SEM') unix.ln('traces/adj', 'SEM') self.mpirun('bin/xspecfem3D') ### input file writers def check_solver_parameter_files(self): """ Checks solver parameters """ nt = getpar('NSTEP', cast=int) dt = getpar('DT', cast=float) if nt != PAR.NT: if self.getnode == 0: print "WARNING: nt != PAR.NT" setpar('NSTEP', PAR.NT) if dt != PAR.DT: if self.getnode == 0: print "WARNING: dt != PAR.DT" setpar('DT', PAR.DT) if self.mesh.nproc != PAR.NPROC: if self.getnode == 0: print 'Warning: mesh.nproc != PAR.NPROC' if 'MULTIPLES' in PAR: raise NotImplementedError def write_parameters(self): unix.cd(self.getpath) solvertools.write_parameters(vars(PAR)) def write_receivers(self): unix.cd(self.getpath) key = 'use_existing_STATIONS' val = '.true.' setpar(key, val) _, h = preprocess.load('traces/obs') solvertools.write_receivers(h.nr, h.rx, h.rz) def write_sources(self): unix.cd(self.getpath) _, h = preprocess.load(dir='traces/obs') solvertools.write_sources(vars(PAR), h) ### miscellaneous @property def data_wildcard(self): return glob('OUTPUT_FILES/*SU') @property def kernel_databases(self): return join(self.getpath, 'OUTPUT_FILES/DATABASES_MPI') @property def model_databases(self): return join(self.getpath, 'OUTPUT_FILES/DATABASES_MPI') @property def source_prefix(self): return 'FORCESOLUTION'
from seisflows.tools.config import loadclass, loadvars, ConfigObj, ParameterObj, Null OBJ = ConfigObj('SeisflowsObjects') PAR = ParameterObj('SeisflowsParameters') PATH = ParameterObj('SeisflowsPaths') # run test if __name__ == '__main__': PAR.update(loadvars('parameters', '.')) PATH.update(loadvars('paths', '.')) register = OBJ.register system = loadclass('system', PAR.SYSTEM)() register('system', system) preprocess = Null() register('preprocess', preprocess) solver = Null() register('solver', solver) postprocess = Null() register('postprocess', postprocess) optimize = loadclass('optimize', PAR.OPTIMIZE)() register('optimize', optimize) workflow = loadclass('workflow', 'test_optimize')()
import numpy as np from seisflows.tools import unix from seisflows.tools.array import loadnpy, savenpy from seisflows.tools.code import exists from seisflows.tools.config import SeisflowsParameters, SeisflowsPaths, \ loadclass, ParameterError PAR = SeisflowsParameters() PATH = SeisflowsPaths() import solver import postprocess migration = loadclass('workflow', 'migration')() class test_postprocess(object): """ Postprocessing class """ def check(self): """ Checks parameters and paths """ migration.check() if 'INPUT' not in PATH: setattr(PATH, 'INPUT', None) def main(self): """ Writes gradient of objective function """
from seisflows.tools.config import loadclass from seisflows.tools.config import ParameterError, SeisflowsParameters, SeisflowsPaths PAR = SeisflowsParameters() PATH = SeisflowsPaths() # ensure number of processers per source is defined if "NPROC" not in PAR: raise Exception # there are 16 processers per node on tiger if "NODESIZE" in PAR: assert PAR.NODESIZE == 16 else: PAR.NODESIZE = 16 # if nproc per source exceeds nproc per node, use tiger_lg # otherwise, use tiger_sm if PAR.NPROC > PAR.NODESIZE: tiger = loadclass("system", "tiger_lg") else: tiger = loadclass("system", "tiger_sm")