def setup(self): """ Sets up nonlinear optimization machinery """ unix.mkdir(PATH.OPTIMIZE) # prepare output writers self.writer = Writer(path=PATH.OUTPUT) self.stepwriter = StepWriter(path=PATH.SUBMIT) # prepare algorithm machinery if PAR.SCHEME in ["NLCG"]: self.NLCG = NLCG(path=PATH.OPTIMIZE, maxiter=PAR.NLCGMAX, thresh=PAR.NLCGTHRESH, precond=self.precond) elif PAR.SCHEME in ["LBFGS"]: self.LBFGS = LBFGS( path=PATH.OPTIMIZE, memory=PAR.LBFGSMEM, maxiter=PAR.LBFGSMAX, thresh=PAR.LBFGSTHRESH, precond=self.precond, ) # write initial model if exists(PATH.MODEL_INIT): src = PATH.MODEL_INIT dst = join(PATH.OPTIMIZE, "m_new") savenpy(dst, solver.merge(solver.load(src)))
def compute_step(cls): """ Computes next trial step length """ unix.cd(cls.path) m0 = loadnpy('m_new') p = loadnpy('p_new') f0 = loadtxt('f_new') g0 = loadtxt('s_new') x = cls.step_lens() f = cls.func_vals() # compute trial step length if PAR.SRCHTYPE == 'Backtrack': alpha = lib.backtrack2(f0, g0, x[1], f[1], b1=0.1, b2=0.5) elif PAR.SRCHTYPE == 'Bracket': FACTOR = 2. if any(f[1:] < f[0]) and (f[-2] < f[-1]): alpha = lib.polyfit2(x, f) elif any(f[1:] < f[0]): alpha = loadtxt('alpha') * FACTOR else: alpha = loadtxt('alpha') * FACTOR**-1 elif PAR.SRCHTYPE == 'Fixed': alpha = cls.step_ratio * (step + 1) * PAR.STEPLEN else: raise ValueError # write trial model savetxt('alpha', alpha) savenpy('m_try', m0 + p * alpha)
def initialize_io_machinery(self): """ Writes mesh files expected by input/output methods """ if system.getnode() == 0: model_set = set(self.model_parameters) inversion_set = set(self.inversion_parameters) parts = self.load(PATH.MODEL_INIT) try: path = PATH.GLOBAL + '/' + 'mesh' except: raise Exception if not exists(path): for key in list(setdiff(model_set, inversion_set)) + ['x', 'z']: unix.mkdir(path + '/' + key) for proc in range(PAR.NPROC): with open(path + '/' + key + '/' + '%06d' % proc, 'w') as file: np.save(file, parts[key][proc]) try: path = PATH.OPTIMIZE + '/' + 'm_new' except: return if not exists(path): savenpy(path, self.merge(parts))
def setup(self): """ Sets up nonlinear optimization machinery """ unix.mkdir(PATH.OPTIMIZE) # prepare output writers self.writer = Writer(path=PATH.OUTPUT) self.stepwriter = StepWriter(path=PATH.SUBMIT) # prepare algorithm machinery if PAR.SCHEME in ['NLCG']: self.NLCG = NLCG(path=PATH.OPTIMIZE, maxiter=PAR.NLCGMAX, thresh=PAR.NLCGTHRESH, precond=self.precond()) elif PAR.SCHEME in ['LBFGS']: self.LBFGS = LBFGS(path=PATH.OPTIMIZE, memory=PAR.LBFGSMEM, maxiter=PAR.LBFGSMAX, thresh=PAR.LBFGSTHRESH, precond=self.precond()) # write initial model if exists(PATH.MODEL_INIT): import solver src = PATH.MODEL_INIT dst = join(PATH.OPTIMIZE, 'm_new') savenpy(dst, solver.merge(solver.load(src)))
def compute_direction(cls): """ Computes model update direction from stored function and gradient values """ unix.cd(cls.path) m_new = loadnpy('m_new') f_new = loadtxt('f_new') g_new = loadnpy('g_new') if PAR.SCHEME == 'GradientDescent': p_new = -g_new elif PAR.SCHEME == 'ConjugateGradient': # compute NLCG udpate p_new = cls.NLCG.compute() elif PAR.SCHEME == 'QuasiNewton': # compute L-BFGS update if cls.iter == 1: p_new = -g_new else: cls.LBFGS.update() p_new = -cls.LBFGS.solve() # save results unix.cd(cls.path) savenpy('p_new', p_new) savetxt('s_new', np.dot(g_new, p_new))
def finalize_search(cls): """ Cleans working directory and writes updated model """ unix.cd(cls.path) m0 = loadnpy('m_new') p = loadnpy('p_new') x = cls.step_lens() f = cls.func_vals() # clean working directory unix.rm('alpha') unix.rm('m_try') unix.rm('f_try') if cls.iter > 1: unix.rm('m_old') unix.rm('f_old') unix.rm('g_old') unix.rm('p_old') unix.rm('s_old') unix.mv('m_new', 'm_old') unix.mv('f_new', 'f_old') unix.mv('g_new', 'g_old') unix.mv('p_new', 'p_old') unix.mv('s_new', 's_old') # write updated model alpha = x[f.argmin()] savetxt('alpha', alpha) savenpy('m_new', m0 + p * alpha) savetxt('f_new', f.min()) cls.writer([], [], [])
def setup(cls): unix.mkdir(cls.path) unix.cd(cls.path) optimize.check() optimize.setup() unix.cd(cls.path) m = problem.model_init() savenpy('m_new', m)
def compute_direction_newton(cls): optimize.initialize_newton() for ilcg in range(PAR.LCGMAX): m = loadnpy('m_lcg') g = problem.grad(m) savenpy('g_lcg', g) isdone = optimize.iterate_newton() if isdone: break
def evaluate_gradient(cls): m = loadnpy('m_new') f = problem.func(m) g = problem.grad(m) savetxt('f_new', f) savenpy('g_new', g) if PAR.OPTIMIZE in ['SRVM']: optimize.update_SRVM()
def setup(cls): unix.mkdir(cls.path) unix.cd(cls.path) optimize.check() optimize.setup() unix.cd(cls.path) m = problem.model_init() savenpy('m_new',m)
def initialize_search(cls): """ Determines initial step length for line search """ unix.cd(cls.path) if cls.iter == 1: s_new = loadtxt('s_new') f_new = loadtxt('f_new') else: s_old = loadtxt('s_old') s_new = loadtxt('s_new') f_old = loadtxt('f_old') f_new = loadtxt('f_new') alpha = loadtxt('alpha') m = loadnpy('m_new') p = loadnpy('p_new') # reset search history cls.search_history = [[0., f_new]] cls.isdone = 0 cls.isbest = 0 cls.isbrak = 0 # determine initial step length len_m = max(abs(m)) len_d = max(abs(p)) cls.step_ratio = float(len_m / len_d) if cls.iter == 1: assert PAR.STEPLEN != 0. alpha = PAR.STEPLEN * cls.step_ratio elif PAR.SRCHTYPE in ['Bracket']: alpha *= 2. * s_old / s_new elif PAR.SCHEME in ['GradientDescent', 'ConjugateGradient']: alpha *= 2. * s_old / s_new else: alpha = 1. # ad hoc scaling if PAR.ADHOCSCALING: alpha *= PAR.ADHOCSCALING # limit maximum step length if PAR.STEPMAX > 0.: if alpha / cls.step_ratio > PAR.STEPMAX: alpha = PAR.STEPMAX * cls.step_ratio # write trial model savenpy('m_try', m + p * alpha) savetxt('alpha', alpha) cls.writer(cls.iter, 0., f_new)
def evaluate_gradient(self): """ Performs adjoint simulation to evaluate gradient """ system.run('solver', 'eval_grad', hosts='all', path=PATH.GRAD, export_traces=divides(optimize.iter, PAR.SAVETRACES)) postprocess.write_gradient( path=PATH.GRAD) src = join(PATH.GRAD, 'gradient') dst = join(PATH.OPTIMIZE, 'g_new') savenpy(dst, solver.merge(solver.load(src, suffix='_kernel')))
def evaluate_gradient(self): """ Performs adjoint simulation to evaluate gradient """ system.run('solver', 'eval_grad', hosts='all', path=PATH.GRAD, export_traces=divides(optimize.iter, PAR.SAVETRACES)) postprocess.write_gradient(path=PATH.GRAD) src = join(PATH.GRAD, 'gradient') dst = join(PATH.OPTIMIZE, 'g_new') savenpy(dst, solver.merge(solver.load(src, suffix='_kernel')))
def write_gradient(self, path): """ Writes gradient of objective function """ # check parameters if 'OPTIMIZE' not in PATH: raise ParameterError(PATH, 'OPTIMIZE') # check input arguments if not exists(path): raise Exception() self.combine_kernels(path) self.process_kernels(path) g = solver.merge(solver.load( path +'/'+ 'kernels/sum', suffix='_kernel', verbose=True)) # apply scaling if float(PAR.SCALE) == 1.: pass elif not PAR.SCALE: pass else: g *= PAR.SCALE # write gradient solver.save(PATH.GRAD +'/'+ 'gradient', solver.split(g), suffix='_kernel') savenpy(PATH.OPTIMIZE +'/'+ 'g_new', g) try: for iproc in range(PAR.NPROC): y = g['Gs'][iproc] x = - g['Gc'][iproc] t = 0.5*np.arctan2(y, x) filename = 'proc%06d_%s.bin' % (iproc, 'azimuth') savebin(t, PATH.GRAD +'/'+ filename) except: pass
def smooth(self, precond=False, span=0.): """ Process gradient """ if precond: print('Applying preconditioner..') Pr = self.load(join(PATH.GRAD, 'precond.bin')) Pr = Pr.reshape((p.nz, p.nx)) else: Pr = 1 for par in self.parameters: filename = par + '_kernel.bin' g = self.load(join(PATH.GRAD, filename)) g = g.reshape((p.nz, p.nx)) g *= Pr gs = gridsmooth(g, span) self.save(join(PATH.GRAD, par + '_smooth_kernel.bin'), gs) g_new = self.merge(PATH.GRAD, '_smooth_kernel.bin') savenpy(join(PATH.OPTIMIZE, 'g_new'), g_new)
def save(self, filename, v): savenpy(PATH.OPTIMIZE + '/' + filename, v)
def save(self, filename, array): # writes vectors to disk savenpy(PATH.OPTIMIZE+'/'+filename, array)
def save(self, filename, array): # writes vectors to disk savenpy(PATH.OPTIMIZE + '/' + filename, array)
def evaluate_gradient(cls): m = loadnpy('m_new') f = problem.func(m) g = problem.grad(m) savetxt('f_new', f) savenpy('g_new', g)
def evaluate_gradient(cls): m = loadnpy('m_new') f = problem.func(m) g = problem.grad(m) savetxt('f_new',f) savenpy('g_new',g)