def __init__(self, objective, memory_length=None, reset_on_new_inner_loop_call=True, *args, **kwargs): OptimizationBase.__init__(self, objective, *args, **kwargs) self.prev_alpha = None self.prev_model = None # collections.deque uses None to indicate no length self.memory_length=memory_length self.reset_on_new_inner_loop_call = reset_on_new_inner_loop_call self._reset_memory()
def __init__(self, objective, reset_length=None, beta_style='fletcher-reeves', *args, **kwargs): OptimizationBase.__init__(self, objective, *args, **kwargs) self.prev_alpha = None self.reset_length = reset_length self.prev_gradient = None self.prev_direction = None if beta_style not in ['fletcher-reeves', 'polak-ribiere']: raise ValueError('Invalid beta computation method.') self.beta_style = beta_style
def __init__(self, objective, memory_length=None, reset_on_new_inner_loop_call=True, proj_op=None, maxiter_PGD=1000, maxiter_linesearch_PGD=100, PQN_start_iteration=5, *args, **kwargs): OptimizationBase.__init__(self, objective, *args, **kwargs) self.prev_alpha = None self.prev_model = None # collections.deque uses None to indicate no length self.memory_length = memory_length self.reset_on_new_inner_loop_call = reset_on_new_inner_loop_call self.proj_op = proj_op self.maxiter_PGD = maxiter_PGD self.maxiter_linesearch_PGD = maxiter_linesearch_PGD self.PQN_start_iteration = PQN_start_iteration self._reset_memory()
def __init__(self, objective, krylov_maxiter=50, *args, **kwargs): OptimizationBase.__init__(self, objective, *args, **kwargs) self.krylov_maxiter = krylov_maxiter
def __init__(self, objective, *args, **kwargs): OptimizationBase.__init__(self, objective, *args, **kwargs) self.prev_alpha = None
def __init__(self, objective, proj_op=None, alpha0=0.001, *args, **kwargs): OptimizationBase.__init__(self, objective, *args, **kwargs) self.prev_alpha = None self.proj_op = proj_op self.alpha0 = alpha0
def __init__(self, objective, shots, hessiantype, krylov_maxiter=50, n_realizations=250, noise_type='white-noise', n_nodes=0, sparse_mem_ensemble_avg=False, timing=False, use_diag_preconditioner=True, *args, **kwargs): OptimizationBase.__init__(self, objective, *args, **kwargs) #Certain Hessiantypes (Target Oriented) require some extra work if you want to use them for inversion, as the complete Hessian has many rows of 0.0? But then again, any Hessian has nullspace and we apply damping. Think about it more self.krylov_maxiter = krylov_maxiter self.n_realizations = n_realizations self.n_nodes = n_nodes self.use_diag_preconditioner = use_diag_preconditioner self.timing = timing self.used = False #THIS FLAG TRACKS WHETHER THE STOCHASTIC HESSIAN HAS BEEN CALCULATED BEFORE ALREADY. IF SO, WE NEED TO REGENERATE THE NOISE SUPERSHOTS BECAUSE OTHERWISE THE STOCHASTIC HESSIAN WILL BE EXACTLY THE SAME self.noise_type = noise_type self.sparse_mem_ensemble_avg = sparse_mem_ensemble_avg #Using a sparse ensemble average matrix is slower because indexing takes more time. But it does not take (nx*nz)**2 elements in memory. For large problems the dense ensemble average size can become problematic. A dense matrix is normally used, but only the elements for which the Hessian will be computed are filled with non-zeros if noise_type == 'white-noise': self.noisesource = WhiteNoiseSource self.autocorrelation = 2.0 #expected value is 2.0 (variance of real and variance of imaginary part) elif noise_type == 'const-amp-unif-distr-phase': self.noisesource = UnifDistrPhaseSource self.autocorrelation = 1.0 #autocorrelation of the noise always gives amplitude 1.0 in this case else: raise Exception( 'The wrong information for noise source is supplied') if timing: self._timing_ensemble_source_list = [ ] #a list. On the top level there will be a dictionary for each nonlinear iteration. Each frequency will be an entry in such a dictionary with a list as key. This list will contain the time each realization of the noise correlation took for that specific frequency. Be careful, if LU decomposition is used the first realization of each frequency will cost more!!! self._timing_ensemble_receiver_list = [] self._timing_total_list = [ ] #a list, contains the total computation for each nonlinear iteration (noise realization, cross-correlation and Hessian element estimation, but not the shot generation at source and receiver locations or connectiondict since those are just abstractions) #CHECK SOURCES for shot in shots: if type(shot.sources) != PointSource: raise TypeError( 'Stochastic Hessian only works with point sources so far') else: if shot.sources.approximation != "delta": raise TypeError( 'Stochastic Hessian requires spatial delta functions right now' ) #USE SAME AMOUNT OF REALIZATIONS FOR SOURCES AND RECEIVERS n_realizations_sources = self.n_realizations n_realizations_receivers = self.n_realizations #ASSUME THAT THE RECEIVERS DO NOT MOVE AND THAT THEY RECORD EACH SHOT. SO TO GET RECEIVER LOCATIONS WE JUST LOOP OVER RECEIVER LOCATIONS IN A SHOT #We only have to generate the random noise sources once, so might as well do it now. More efficient, since creating all the objects for this abstraction level is actually quite expensive print "Initializer: Creating supershots \n" self.shot_sourceloc = self._create_noise_supershots_sources( shots, n_realizations_sources) self.shot_receiverloc = self._create_noise_supershots_receivers( shots, n_realizations_receivers) print "Initializer: Creating connectiondict \n" self._create_connectiondict(hessiantype) self.hessiantype = [hessiantype[0], hessiantype[1]] #Use this as a check in _get_stoch_hessian_elements. If a shots object is supplied to this routine, check if it is the same #as the one given to the constructor. If that is the case, don't redo the work. self.shots = shots print "Initializer: Done...\n"