def run_inversion( m0, simulation, data, actind, mesh, maxIter=15, beta0_ratio=1e0, coolingFactor=5, coolingRate=2, upper=np.inf, lower=-np.inf, use_sensitivity_weight=True, alpha_s=1e-4, alpha_x=1.0, alpha_y=1.0, alpha_z=1.0, ): """ Run DC inversion """ dmisfit = data_misfit.L2DataMisfit(simulation=simulation, data=data) # Map for a regularization regmap = maps.IdentityMap(nP=int(actind.sum())) # Related to inversion if use_sensitivity_weight: reg = regularization.Sparse(mesh, indActive=actind, mapping=regmap) reg.alpha_s = alpha_s reg.alpha_x = alpha_x reg.alpha_y = alpha_y reg.alpha_z = alpha_z else: reg = regularization.Tikhonov(mesh, indActive=actind, mapping=regmap) reg.alpha_s = alpha_s reg.alpha_x = alpha_x reg.alpha_y = alpha_y reg.alpha_z = alpha_z opt = optimization.ProjectedGNCG(maxIter=maxIter, upper=upper, lower=lower) invProb = inverse_problem.BaseInvProblem(dmisfit, reg, opt) beta = directives.BetaSchedule(coolingFactor=coolingFactor, coolingRate=coolingRate) betaest = directives.BetaEstimate_ByEig(beta0_ratio=beta0_ratio) target = directives.TargetMisfit() # Need to have basice saving function update_Jacobi = directives.UpdatePreconditioner() if use_sensitivity_weight: updateSensW = directives.UpdateSensitivityWeights() directiveList = [beta, target, updateSensW, update_Jacobi, betaest] else: directiveList = [beta, target, update_Jacobi, betaest] inv = inversion.BaseInversion(invProb, directiveList=directiveList) opt.LSshorten = 0.5 opt.remember("xc") # Run inversion mopt = inv.run(m0) return mopt, invProb.dpred
def test_inv_mref_setting(self): reg1 = regularization.Tikhonov(self.mesh) reg2 = regularization.Tikhonov(self.mesh) reg = reg1 + reg2 opt = optimization.ProjectedGNCG( maxIter=10, lower=-10, upper=10, maxIterLS=20, maxIterCG=50, tolCG=1e-4 ) invProb = inverse_problem.BaseInvProblem(self.dmiscombo, reg, opt) directives_list = [ directives.ScalingMultipleDataMisfits_ByEig(chi0_ratio=[0.01, 1.0], verbose=True), directives.AlphasSmoothEstimate_ByEig(verbose=True), directives.BetaEstimate_ByEig(beta0_ratio=1e-2), directives.BetaSchedule(), ] inv = inversion.BaseInversion(invProb, directiveList=directives_list) m0 = self.model.mean() * np.ones_like(self.model) mrec = inv.run(m0) self.assertTrue(np.all(reg1.mref == m0)) self.assertTrue(np.all(reg2.mref == m0))
def setUp(self): mesh = discretize.TensorMesh([4, 4, 4]) # Magnetic inducing field parameter (A,I,D) B = [50000, 90, 0] # Create a MAGsurvey rx = mag.Point(np.vstack([[0.25, 0.25, 0.25], [-0.25, -0.25, 0.25]])) srcField = mag.SourceField([rx], parameters=(B[0], B[1], B[2])) survey = mag.Survey(srcField) # Create the forward model operator sim = mag.Simulation3DIntegral(mesh, survey=survey, chiMap=maps.IdentityMap(mesh)) # Compute forward model some data m = np.random.rand(mesh.nC) data = sim.make_synthetic_data(m, add_noise=True) reg = regularization.Sparse(mesh) reg.mref = np.zeros(mesh.nC) reg.norms = np.c_[0, 1, 1, 1] reg.eps_p, reg.eps_q = 1e-3, 1e-3 # Data misfit function dmis = data_misfit.L2DataMisfit(data) dmis.W = 1.0 / data.relative_error # Add directives to the inversion opt = optimization.ProjectedGNCG(maxIter=2, lower=-10.0, upper=10.0, maxIterCG=2) invProb = inverse_problem.BaseInvProblem(dmis, reg, opt) self.mesh = mesh self.invProb = invProb self.sim = sim
# Define the regularization (model objective function) dc_regularization = regularization.Simple( mesh, indActive=ind_active, mref=starting_conductivity_model, alpha_s=0.01, alpha_x=1, alpha_y=1, ) # Define how the optimization problem is solved. Here we will use a projected # Gauss-Newton approach that employs the conjugate gradient solver. dc_optimization = optimization.ProjectedGNCG(maxIter=5, lower=-10.0, upper=2.0, maxIterLS=20, maxIterCG=10, tolCG=1e-3) # Here we define the inverse problem that is to be solved dc_inverse_problem = inverse_problem.BaseInvProblem(dc_data_misfit, dc_regularization, dc_optimization) ####################################################################### # Define DC Inversion Directives # ------------------------------ # # Here we define any directives that are carried out during the inversion. This # includes the cooling schedule for the trade-off parameter (beta), stopping # criteria for the inversion and saving inversion results at each iteration.
reg_s.mref = np.zeros(3 * nC) reg_t = regularization.Sparse(mesh, indActive=actv, mapping=wires.t) reg_t.mref = np.zeros(3 * nC) reg = reg_p + reg_s + reg_t reg.mref = np.zeros(3 * nC) # Data misfit function dmis = data_misfit.L2DataMisfit(simulation=simulation, data=data_object) dmis.W = 1.0 / data_object.standard_deviation # Add directives to the inversion opt = optimization.ProjectedGNCG(maxIter=10, lower=-10, upper=10.0, maxIterLS=20, maxIterCG=20, tolCG=1e-4) invProb = inverse_problem.BaseInvProblem(dmis, reg, opt) # A list of directive to control the inverson betaest = directives.BetaEstimate_ByEig(beta0_ratio=1e1) # Add sensitivity weights sensitivity_weights = directives.UpdateSensitivityWeights() # Here is where the norms are applied # Use a threshold parameter empirically based on the distribution of # model parameters IRLS = directives.Update_IRLS(f_min_change=1e-3,
def run(plotIt=True): # Define the inducing field parameter H0 = (50000, 90, 0) # Create a mesh dx = 5.0 hxind = [(dx, 5, -1.3), (dx, 10), (dx, 5, 1.3)] hyind = [(dx, 5, -1.3), (dx, 10), (dx, 5, 1.3)] hzind = [(dx, 5, -1.3), (dx, 10)] mesh = TensorMesh([hxind, hyind, hzind], "CCC") # Get index of the center midx = int(mesh.nCx / 2) midy = int(mesh.nCy / 2) # Lets create a simple Gaussian topo and set the active cells [xx, yy] = np.meshgrid(mesh.vectorNx, mesh.vectorNy) zz = -np.exp((xx**2 + yy**2) / 75**2) + mesh.vectorNz[-1] # We would usually load a topofile topo = np.c_[utils.mkvc(xx), utils.mkvc(yy), utils.mkvc(zz)] # Go from topo to array of indices of active cells actv = utils.surface2ind_topo(mesh, topo, "N") actv = np.where(actv)[0] nC = len(actv) # Create and array of observation points xr = np.linspace(-20.0, 20.0, 20) yr = np.linspace(-20.0, 20.0, 20) X, Y = np.meshgrid(xr, yr) # Move the observation points 5m above the topo Z = -np.exp((X**2 + Y**2) / 75**2) + mesh.vectorNz[-1] + 5.0 # Create a MAGsurvey rxLoc = np.c_[utils.mkvc(X.T), utils.mkvc(Y.T), utils.mkvc(Z.T)] rxLoc = magnetics.receivers.Point(rxLoc, components=["tmi"]) srcField = magnetics.sources.SourceField(receiver_list=[rxLoc], parameters=H0) survey = magnetics.survey.Survey(srcField) # We can now create a susceptibility model and generate data # Here a simple block in half-space model = np.zeros((mesh.nCx, mesh.nCy, mesh.nCz)) model[(midx - 2):(midx + 2), (midy - 2):(midy + 2), -6:-2] = 0.02 model = utils.mkvc(model) model = model[actv] # Create active map to go from reduce set to full actvMap = maps.InjectActiveCells(mesh, actv, -100) # Create reduced identity map idenMap = maps.IdentityMap(nP=nC) # Create the forward model operator simulation = magnetics.simulation.Simulation3DIntegral( survey=survey, mesh=mesh, chiMap=idenMap, actInd=actv, ) # Compute linear forward operator and compute some data d = simulation.dpred(model) # Add noise and uncertainties # We add some random Gaussian noise (1nT) synthetic_data = d + np.random.randn(len(d)) wd = np.ones(len(synthetic_data)) * 1.0 # Assign flat uncertainties data_object = data.Data(survey, dobs=synthetic_data, noise_floor=wd) # Create a regularization reg = regularization.Sparse(mesh, indActive=actv, mapping=idenMap) reg.mref = np.zeros(nC) reg.norms = np.c_[0, 0, 0, 0] # reg.eps_p, reg.eps_q = 1e-0, 1e-0 # Create sensitivity weights from our linear forward operator rxLoc = survey.source_field.receiver_list[0].locations m0 = np.ones(nC) * 1e-4 # Starting model # Data misfit function dmis = data_misfit.L2DataMisfit(simulation=simulation, data=data_object) dmis.W = 1 / wd # Add directives to the inversion opt = optimization.ProjectedGNCG(maxIter=20, lower=0.0, upper=1.0, maxIterLS=20, maxIterCG=20, tolCG=1e-3) invProb = inverse_problem.BaseInvProblem(dmis, reg, opt) betaest = directives.BetaEstimate_ByEig(beta0_ratio=1e-1) # Here is where the norms are applied # Use pick a threshold parameter empirically based on the distribution of # model parameters IRLS = directives.Update_IRLS(f_min_change=1e-3, max_irls_iterations=40) saveDict = directives.SaveOutputEveryIteration(save_txt=False) update_Jacobi = directives.UpdatePreconditioner() # Add sensitivity weights sensitivity_weights = directives.UpdateSensitivityWeights(everyIter=False) inv = inversion.BaseInversion( invProb, directiveList=[ sensitivity_weights, IRLS, betaest, update_Jacobi, saveDict ], ) # Run the inversion mrec = inv.run(m0) if plotIt: # Here is the recovered susceptibility model ypanel = midx zpanel = -5 m_l2 = actvMap * invProb.l2model m_l2[m_l2 == -100] = np.nan m_lp = actvMap * mrec m_lp[m_lp == -100] = np.nan m_true = actvMap * model m_true[m_true == -100] = np.nan # Plot the data utils.plot_utils.plot2Ddata(rxLoc, d) plt.figure() # Plot L2 model ax = plt.subplot(321) mesh.plotSlice( m_l2, ax=ax, normal="Z", ind=zpanel, grid=True, clim=(model.min(), model.max()), ) plt.plot( ([mesh.vectorCCx[0], mesh.vectorCCx[-1]]), ([mesh.vectorCCy[ypanel], mesh.vectorCCy[ypanel]]), color="w", ) plt.title("Plan l2-model.") plt.gca().set_aspect("equal") plt.ylabel("y") ax.xaxis.set_visible(False) plt.gca().set_aspect("equal", adjustable="box") # Vertica section ax = plt.subplot(322) mesh.plotSlice( m_l2, ax=ax, normal="Y", ind=midx, grid=True, clim=(model.min(), model.max()), ) plt.plot( ([mesh.vectorCCx[0], mesh.vectorCCx[-1]]), ([mesh.vectorCCz[zpanel], mesh.vectorCCz[zpanel]]), color="w", ) plt.title("E-W l2-model.") plt.gca().set_aspect("equal") ax.xaxis.set_visible(False) plt.ylabel("z") plt.gca().set_aspect("equal", adjustable="box") # Plot Lp model ax = plt.subplot(323) mesh.plotSlice( m_lp, ax=ax, normal="Z", ind=zpanel, grid=True, clim=(model.min(), model.max()), ) plt.plot( ([mesh.vectorCCx[0], mesh.vectorCCx[-1]]), ([mesh.vectorCCy[ypanel], mesh.vectorCCy[ypanel]]), color="w", ) plt.title("Plan lp-model.") plt.gca().set_aspect("equal") ax.xaxis.set_visible(False) plt.ylabel("y") plt.gca().set_aspect("equal", adjustable="box") # Vertical section ax = plt.subplot(324) mesh.plotSlice( m_lp, ax=ax, normal="Y", ind=midx, grid=True, clim=(model.min(), model.max()), ) plt.plot( ([mesh.vectorCCx[0], mesh.vectorCCx[-1]]), ([mesh.vectorCCz[zpanel], mesh.vectorCCz[zpanel]]), color="w", ) plt.title("E-W lp-model.") plt.gca().set_aspect("equal") ax.xaxis.set_visible(False) plt.ylabel("z") plt.gca().set_aspect("equal", adjustable="box") # Plot True model ax = plt.subplot(325) mesh.plotSlice( m_true, ax=ax, normal="Z", ind=zpanel, grid=True, clim=(model.min(), model.max()), ) plt.plot( ([mesh.vectorCCx[0], mesh.vectorCCx[-1]]), ([mesh.vectorCCy[ypanel], mesh.vectorCCy[ypanel]]), color="w", ) plt.title("Plan true model.") plt.gca().set_aspect("equal") plt.xlabel("x") plt.ylabel("y") plt.gca().set_aspect("equal", adjustable="box") # Vertical section ax = plt.subplot(326) mesh.plotSlice( m_true, ax=ax, normal="Y", ind=midx, grid=True, clim=(model.min(), model.max()), ) plt.plot( ([mesh.vectorCCx[0], mesh.vectorCCx[-1]]), ([mesh.vectorCCz[zpanel], mesh.vectorCCz[zpanel]]), color="w", ) plt.title("E-W true model.") plt.gca().set_aspect("equal") plt.xlabel("x") plt.ylabel("z") plt.gca().set_aspect("equal", adjustable="box") # Plot convergence curves fig, axs = plt.figure(), plt.subplot() axs.plot(saveDict.phi_d, "k", lw=2) axs.plot( np.r_[IRLS.iterStart, IRLS.iterStart], np.r_[0, np.max(saveDict.phi_d)], "k:", ) twin = axs.twinx() twin.plot(saveDict.phi_m, "k--", lw=2) axs.text( IRLS.iterStart, 0, "IRLS Steps", va="bottom", ha="center", rotation="vertical", size=12, bbox={"facecolor": "white"}, ) axs.set_ylabel("$\phi_d$", size=16, rotation=0) axs.set_xlabel("Iterations", size=14) twin.set_ylabel("$\phi_m$", size=16, rotation=0)
mtrue[indx] = -(((1 - t**2.0)**2.0)[indx]) mtrue = np.zeros(mesh.nC) mtrue[mesh.cell_centers_x > 0.3] = 1.0 mtrue[mesh.cell_centers_x > 0.45] = -0.5 mtrue[mesh.cell_centers_x > 0.6] = 0 # SimPEG problem and survey prob = simulation.LinearSimulation(mesh, G=G, model_map=maps.IdentityMap()) std = 0.01 survey = prob.make_synthetic_data(mtrue, relative_error=std, add_noise=True) # Setup the inverse problem reg = regularization.Tikhonov(mesh, alpha_s=1.0, alpha_x=1.0) dmis = data_misfit.L2DataMisfit(data=survey, simulation=prob) opt = optimization.ProjectedGNCG(maxIter=10, maxIterCG=50, tolCG=1e-4) invProb = inverse_problem.BaseInvProblem(dmis, reg, opt) directiveslist = [ directives.BetaEstimate_ByEig(beta0_ratio=1e-5), directives.BetaSchedule(coolingFactor=10.0, coolingRate=2), directives.TargetMisfit(), ] inv = inversion.BaseInversion(invProb, directiveList=directiveslist) m0 = np.zeros_like(mtrue) mnormal = inv.run(m0) ######################################### # Petrophysically constrained inversion # #########################################
def setup_and_run_std_inv(mesh, dc_survey, dc_data, std_dc, conductivity_map, ind_active, starting_conductivity_model): """Code to setup and run a standard inversion. Parameters ---------- mesh : TYPE DESCRIPTION. dc_survey : TYPE DESCRIPTION. dc_data : TYPE DESCRIPTION. std_dc : TYPE DESCRIPTION. conductivity_map : TYPE DESCRIPTION. ind_active : TYPE DESCRIPTION. starting_conductivity_model : TYPE DESCRIPTION. Returns ------- save_iteration : TYPE DESCRIPTION. save_dict_iteration : TYPE DESCRIPTION. """ # Add standard deviations to data object dc_data.standard_deviation = std_dc # Define the simulation (physics of the problem) dc_simulation = dc.simulation_2d.Simulation2DNodal( mesh, survey=dc_survey, sigmaMap=conductivity_map, Solver=Solver) # Define the data misfit. dc_data_misfit = data_misfit.L2DataMisfit(data=dc_data, simulation=dc_simulation) # Define the regularization (model objective function) dc_regularization = regularization.Simple(mesh, indActive=ind_active, mref=starting_conductivity_model, alpha_s=0.01, alpha_x=1, alpha_y=1) # Define how the optimization problem is solved. Here we will use a # projected. Gauss-Newton approach that employs the conjugate gradient # solver. dc_optimization = optimization.ProjectedGNCG(maxIter=15, lower=-np.inf, upper=np.inf, maxIterLS=20, maxIterCG=10, tolCG=1e-3) # Here we define the inverse problem that is to be solved dc_inverse_problem = inverse_problem.BaseInvProblem( dc_data_misfit, dc_regularization, dc_optimization) # Define inversion directives # Apply and update sensitivity weighting as the model updates update_sensitivity_weighting = directives.UpdateSensitivityWeights() # Defining a starting value for the trade-off parameter (beta) between the # data misfit and the regularization. starting_beta = directives.BetaEstimate_ByEig(beta0_ratio=1e2) # Set the rate of reduction in trade-off parameter (beta) each time the # the inverse problem is solved. And set the number of Gauss-Newton # iterations for each trade-off paramter value. beta_schedule = directives.BetaSchedule(coolingFactor=10, coolingRate=1) # Options for outputting recovered models and predicted data for each beta. save_iteration = directives.SaveOutputEveryIteration(save_txt=False) # save results from each iteration in a dict save_dict_iteration = directives.SaveOutputDictEveryIteration( saveOnDisk=False) directives_list = [ update_sensitivity_weighting, starting_beta, beta_schedule, save_iteration, save_dict_iteration, ] # Here we combine the inverse problem and the set of directives dc_inversion = inversion.BaseInversion(dc_inverse_problem, directiveList=directives_list) # Run inversion _ = dc_inversion.run(starting_conductivity_model) return save_iteration, save_dict_iteration
def run_inversion( m0, survey, actind, mesh, wires, std, eps, maxIter=15, beta0_ratio=1e0, coolingFactor=2, coolingRate=2, maxIterLS=20, maxIterCG=10, LSshorten=0.5, eta_lower=1e-5, eta_upper=1, tau_lower=1e-6, tau_upper=10.0, c_lower=1e-2, c_upper=1.0, is_log_tau=True, is_log_c=True, is_log_eta=True, mref=None, alpha_s=1e-4, alpha_x=1e0, alpha_y=1e0, alpha_z=1e0, ): """ Run Spectral Spectral IP inversion """ dmisfit = data_misfit.L2DataMisfit(survey) uncert = abs(survey.dobs) * std + eps dmisfit.W = 1.0 / uncert # Map for a regularization # Related to inversion # Set Upper and Lower bounds e = np.ones(actind.sum()) if np.isscalar(eta_lower): eta_lower = e * eta_lower if np.isscalar(tau_lower): tau_lower = e * tau_lower if np.isscalar(c_lower): c_lower = e * c_lower if np.isscalar(eta_upper): eta_upper = e * eta_upper if np.isscalar(tau_upper): tau_upper = e * tau_upper if np.isscalar(c_upper): c_upper = e * c_upper if is_log_eta: eta_upper = np.log(eta_upper) eta_lower = np.log(eta_lower) if is_log_tau: tau_upper = np.log(tau_upper) tau_lower = np.log(tau_lower) if is_log_c: c_upper = np.log(c_upper) c_lower = np.log(c_lower) m_upper = np.r_[eta_upper, tau_upper, c_upper] m_lower = np.r_[eta_lower, tau_lower, c_lower] # Set up regularization reg_eta = regularization.Simple(mesh, mapping=wires.eta, indActive=actind) reg_tau = regularization.Simple(mesh, mapping=wires.tau, indActive=actind) reg_c = regularization.Simple(mesh, mapping=wires.c, indActive=actind) # Todo: reg_eta.alpha_s = alpha_s reg_tau.alpha_s = 0.0 reg_c.alpha_s = 0.0 reg_eta.alpha_x = alpha_x reg_tau.alpha_x = alpha_x reg_c.alpha_x = alpha_x reg_eta.alpha_y = alpha_y reg_tau.alpha_y = alpha_y reg_c.alpha_y = alpha_y reg_eta.alpha_z = alpha_z reg_tau.alpha_z = alpha_z reg_c.alpha_z = alpha_z reg = reg_eta + reg_tau + reg_c # Use Projected Gauss Newton scheme opt = optimization.ProjectedGNCG( maxIter=maxIter, upper=m_upper, lower=m_lower, maxIterLS=maxIterLS, maxIterCG=maxIterCG, LSshorten=LSshorten, ) invProb = inverse_problem.BaseInvProblem(dmisfit, reg, opt) beta = directives.BetaSchedule(coolingFactor=coolingFactor, coolingRate=coolingRate) betaest = directives.BetaEstimate_ByEig(beta0_ratio=beta0_ratio) target = directives.TargetMisfit() directiveList = [beta, betaest, target] inv = inversion.BaseInversion(invProb, directiveList=directiveList) opt.LSshorten = 0.5 opt.remember("xc") # Run inversion mopt = inv.run(m0) return mopt, invProb.dpred
# Define the regularization (model objective function) ip_regularization = regularization.Simple( mesh, indActive=ind_active, mapping=maps.IdentityMap(nP=nC), alpha_s=0.01, alpha_x=1, alpha_y=1, alpha_z=1, ) # Define how the optimization problem is solved. ip_optimization = optimization.ProjectedGNCG(maxIter=15, lower=0.0, upper=10, maxIterCG=30, tolCG=1e-2) # Here we define the inverse problem that is to be solved ip_inverse_problem = inverse_problem.BaseInvProblem(ip_data_misfit, ip_regularization, ip_optimization) ####################################################### # Define IP Inversion Directives # ------------------------------ # # Here we define the directives in the same manner as the DC inverse problem. #
def test_basic_inversion(self): """ Test to see if inversion recovers model """ h = [(2, 30)] meshObj = discretize.TensorMesh((h, h, [(2, 10)]), x0="CCN") mod = 0.00025 * np.ones(meshObj.nC) mod[(meshObj.gridCC[:, 0] > -4.0) & (meshObj.gridCC[:, 1] > -4.0) & (meshObj.gridCC[:, 0] < 4.0) & (meshObj.gridCC[:, 1] < 4.0)] = 0.001 times = np.logspace(-4, -2, 5) waveObj = vrm.waveforms.SquarePulse(delt=0.02) x, y = np.meshgrid(np.linspace(-17, 17, 16), np.linspace(-17, 17, 16)) x, y, z = mkvc(x), mkvc(y), 0.5 * np.ones(np.size(x)) receiver_list = [ vrm.Rx.Point(np.c_[x, y, z], times=times, fieldType="dbdt", orientation="z") ] txNodes = np.array([ [-20, -20, 0.001], [20, -20, 0.001], [20, 20, 0.001], [-20, 20, 0.01], [-20, -20, 0.001], ]) txList = [vrm.Src.LineCurrent(receiver_list, txNodes, 1.0, waveObj)] Survey = vrm.Survey(txList) Survey.t_active = np.zeros(Survey.nD, dtype=bool) Survey.set_active_interval(-1e6, 1e6) Problem = vrm.Simulation3DLinear(meshObj, survey=Survey, refinement_factor=2) dobs = Problem.make_synthetic_data(mod) Survey.noise_floor = 1e-11 dmis = data_misfit.L2DataMisfit(data=dobs, simulation=Problem) W = mkvc((np.sum(np.array(Problem.A)**2, axis=0)))**0.25 reg = regularization.Simple(meshObj, alpha_s=0.01, alpha_x=1.0, alpha_y=1.0, alpha_z=1.0, cell_weights=W) opt = optimization.ProjectedGNCG(maxIter=20, lower=0.0, upper=1e-2, maxIterLS=20, tolCG=1e-4) invProb = inverse_problem.BaseInvProblem(dmis, reg, opt) directives = [ BetaSchedule(coolingFactor=2, coolingRate=1), TargetMisfit() ] inv = inversion.BaseInversion(invProb, directiveList=directives) m0 = 1e-6 * np.ones(len(mod)) mrec = inv.run(m0) dmis_final = np.sum( (dmis.W.diagonal() * (mkvc(dobs) - Problem.fields(mrec)))**2) mod_err_2 = np.sqrt(np.sum((mrec - mod)**2)) / np.size(mod) mod_err_inf = np.max(np.abs(mrec - mod)) self.assertTrue(dmis_final < Survey.nD and mod_err_2 < 5e-6 and mod_err_inf < np.max(mod))
store_sensitivities="ram") wr = simulation.getJtJdiag(mstart)**0.5 wr = wr / np.max(np.abs(wr)) # Create a regularization function, in this case l2l2 reg = regularization.Sparse(mesh, indActive=surf, mapping=maps.IdentityMap(nP=nC), alpha_z=0) reg.mref = np.zeros(nC) # Specify how the optimization will proceed, set susceptibility bounds to inf opt = optimization.ProjectedGNCG(maxIter=20, lower=-np.inf, upper=np.inf, maxIterLS=20, maxIterCG=20, tolCG=1e-3) # Define misfit function (obs-calc) dmis = data_misfit.L2DataMisfit(simulation=simulation, data=data_object) # Create the default L2 inverse problem from the above objects invProb = inverse_problem.BaseInvProblem(dmis, reg, opt) # Specify how the initial beta is found betaest = directives.BetaEstimate_ByEig(beta0_ratio=2) # Target misfit to stop the inversion, # try to fit as much as possible of the signal, we don't want to lose anything IRLS = directives.Update_IRLS(f_min_change=1e-3,
W = utils.sdiag(wr) reg_simple = utils.make_SimplePGIwithRelationships_regularization( mesh=mesh, gmmref=clfmapping, gmm=clfmapping, approx_gradient=True, alpha_x=1.0, wiresmap=wires, cell_weights_list=[wr1, wr2], ) opt = optimization.ProjectedGNCG( maxIter=30, tolX=1e-6, maxIterCG=100, tolCG=1e-3, lower=-10, upper=10, ) invProb = inverse_problem.BaseInvProblem(dmis, reg_simple, opt) # directives scales = directives.ScalingMultipleDataMisfits_ByEig(chi0_ratio=np.r_[1.0, 1.0], verbose=True, n_pw_iter=10) scaling_schedule = directives.JointScalingSchedule(verbose=True) alpha0_ratio = np.r_[np.zeros(len(reg_simple.objfcts[0].objfcts)), 100.0 * np.ones(len(reg_simple.objfcts[1].objfcts)), 1.0 * np.ones(len(reg_simple.objfcts[2].objfcts)), ]
def setUp(self): np.random.seed(0) H0 = (50000.0, 90.0, 0.0) # The magnetization is set along a different # direction (induced + remanence) M = np.array([45.0, 90.0]) # Create grid of points for topography # Lets create a simple Gaussian topo # and set the active cells [xx, yy] = np.meshgrid(np.linspace(-200, 200, 50), np.linspace(-200, 200, 50)) b = 100 A = 50 zz = A * np.exp(-0.5 * ((xx / b)**2.0 + (yy / b)**2.0)) # We would usually load a topofile topo = np.c_[utils.mkvc(xx), utils.mkvc(yy), utils.mkvc(zz)] # Create and array of observation points xr = np.linspace(-100.0, 100.0, 20) yr = np.linspace(-100.0, 100.0, 20) X, Y = np.meshgrid(xr, yr) Z = A * np.exp(-0.5 * ((X / b)**2.0 + (Y / b)**2.0)) + 5 # Create a MAGsurvey xyzLoc = np.c_[utils.mkvc(X.T), utils.mkvc(Y.T), utils.mkvc(Z.T)] rxLoc = mag.Point(xyzLoc) srcField = mag.SourceField([rxLoc], parameters=H0) survey = mag.Survey(srcField) # Create a mesh h = [5, 5, 5] padDist = np.ones((3, 2)) * 100 mesh = mesh_builder_xyz(xyzLoc, h, padding_distance=padDist, depth_core=100, mesh_type="tree") mesh = refine_tree_xyz(mesh, topo, method="surface", octree_levels=[4, 4], finalize=True) self.mesh = mesh # Define an active cells from topo actv = utils.surface2ind_topo(mesh, topo) nC = int(actv.sum()) model = np.zeros((mesh.nC, 3)) # Convert the inclination declination to vector in Cartesian M_xyz = utils.mat_utils.dip_azimuth2cartesian(M[0], M[1]) # Get the indicies of the magnetized block ind = utils.model_builder.getIndicesBlock( np.r_[-20, -20, -10], np.r_[20, 20, 25], mesh.gridCC, )[0] # Assign magnetization values model[ind, :] = np.kron(np.ones((ind.shape[0], 1)), M_xyz * 0.05) # Remove air cells self.model = model[actv, :] # Create active map to go from reduce set to full self.actvMap = maps.InjectActiveCells(mesh, actv, np.nan) # Creat reduced identity map idenMap = maps.IdentityMap(nP=nC * 3) # Create the forward model operator sim = mag.Simulation3DIntegral( self.mesh, survey=survey, model_type="vector", chiMap=idenMap, actInd=actv, store_sensitivities="disk", ) self.sim = sim # Compute some data and add some random noise data = sim.make_synthetic_data(utils.mkvc(self.model), relative_error=0.0, noise_floor=5.0, add_noise=True) # This Mapping connects the regularizations for the three-component # vector model wires = maps.Wires(("p", nC), ("s", nC), ("t", nC)) # Create three regularization for the different components # of magnetization reg_p = regularization.Sparse(mesh, indActive=actv, mapping=wires.p) reg_p.mref = np.zeros(3 * nC) reg_s = regularization.Sparse(mesh, indActive=actv, mapping=wires.s) reg_s.mref = np.zeros(3 * nC) reg_t = regularization.Sparse(mesh, indActive=actv, mapping=wires.t) reg_t.mref = np.zeros(3 * nC) reg = reg_p + reg_s + reg_t reg.mref = np.zeros(3 * nC) # Data misfit function dmis = data_misfit.L2DataMisfit(simulation=sim, data=data) # dmis.W = 1./survey.std # Add directives to the inversion opt = optimization.ProjectedGNCG(maxIter=10, lower=-10, upper=10.0, maxIterLS=5, maxIterCG=5, tolCG=1e-4) invProb = inverse_problem.BaseInvProblem(dmis, reg, opt) # A list of directive to control the inverson betaest = directives.BetaEstimate_ByEig(beta0_ratio=1e1) # Here is where the norms are applied # Use pick a treshold parameter empirically based on the distribution of # model parameters IRLS = directives.Update_IRLS(f_min_change=1e-3, max_irls_iterations=0, beta_tol=5e-1) # Pre-conditioner update_Jacobi = directives.UpdatePreconditioner() sensitivity_weights = directives.UpdateSensitivityWeights( everyIter=False) inv = inversion.BaseInversion( invProb, directiveList=[sensitivity_weights, IRLS, update_Jacobi, betaest]) # Run the inversion m0 = np.ones(3 * nC) * 1e-4 # Starting model mrec_MVIC = inv.run(m0) sim.chiMap = maps.SphericalSystem(nP=nC * 3) self.mstart = sim.chiMap.inverse(mrec_MVIC) dmis.simulation.model = self.mstart beta = invProb.beta # Create a block diagonal regularization wires = maps.Wires(("amp", nC), ("theta", nC), ("phi", nC)) # Create a Combo Regularization # Regularize the amplitude of the vectors reg_a = regularization.Sparse(mesh, indActive=actv, mapping=wires.amp) reg_a.norms = np.c_[0.0, 0.0, 0.0, 0.0] # Sparse on the model and its gradients reg_a.mref = np.zeros(3 * nC) # Regularize the vertical angle of the vectors reg_t = regularization.Sparse(mesh, indActive=actv, mapping=wires.theta) reg_t.alpha_s = 0.0 # No reference angle reg_t.space = "spherical" reg_t.norms = np.c_[2.0, 0.0, 0.0, 0.0] # Only norm on gradients used # Regularize the horizontal angle of the vectors reg_p = regularization.Sparse(mesh, indActive=actv, mapping=wires.phi) reg_p.alpha_s = 0.0 # No reference angle reg_p.space = "spherical" reg_p.norms = np.c_[2.0, 0.0, 0.0, 0.0] # Only norm on gradients used reg = reg_a + reg_t + reg_p reg.mref = np.zeros(3 * nC) Lbound = np.kron(np.asarray([0, -np.inf, -np.inf]), np.ones(nC)) Ubound = np.kron(np.asarray([10, np.inf, np.inf]), np.ones(nC)) # Add directives to the inversion opt = optimization.ProjectedGNCG( maxIter=5, lower=Lbound, upper=Ubound, maxIterLS=5, maxIterCG=5, tolCG=1e-3, stepOffBoundsFact=1e-3, ) opt.approxHinv = None invProb = inverse_problem.BaseInvProblem(dmis, reg, opt, beta=beta) # Here is where the norms are applied IRLS = directives.Update_IRLS( f_min_change=1e-4, max_irls_iterations=5, minGNiter=1, beta_tol=0.5, coolingRate=1, coolEps_q=True, sphericalDomain=True, ) # Special directive specific to the mag amplitude problem. The sensitivity # weights are update between each iteration. ProjSpherical = directives.ProjectSphericalBounds() sensitivity_weights = directives.UpdateSensitivityWeights() update_Jacobi = directives.UpdatePreconditioner() self.inv = inversion.BaseInversion( invProb, directiveList=[ ProjSpherical, IRLS, sensitivity_weights, update_Jacobi ], )
def run_inversion( self, maxIter=60, m0=0.0, mref=0.0, percentage=5, floor=0.1, chifact=1, beta0_ratio=1.0, coolingFactor=1, n_iter_per_beta=1, alpha_s=1.0, alpha_x=1.0, alpha_z=1.0, use_target=False, use_tikhonov=True, use_irls=False, p_s=2, p_x=2, p_y=2, p_z=2, beta_start=None, ): self.uncertainty = percentage * abs(self.data_prop.dobs) * 0.01 + floor m0 = np.ones(self.mesh_prop.nC) * m0 mref = np.ones(self.mesh_prop.nC) * mref if ~use_tikhonov: reg = regularization.Sparse( self.mesh_prop, alpha_s=alpha_s, alpha_x=alpha_x, alpha_y=alpha_z, mref=mref, mapping=maps.IdentityMap(self.mesh_prop), cell_weights=self.mesh_prop.vol, ) else: reg = regularization.Tikhonov( self.mesh_prop, alpha_s=alpha_s, alpha_x=alpha_x, alpha_y=alpha_z, mref=mref, mapping=maps.IdentityMap(self.mesh_prop), ) dataObj = data.Data(self.survey_prop, dobs=self.dobs, noise_floor=self.uncertainty) dmis = data_misfit.L2DataMisfit(simulation=self.simulation_prop, data=dataObj) dmis.W = 1.0 / self.uncertainty opt = optimization.ProjectedGNCG(maxIter=maxIter, maxIterCG=20) opt.lower = 0.0 opt.remember("xc") opt.tolG = 1e-10 opt.eps = 1e-10 invProb = inverse_problem.BaseInvProblem(dmis, reg, opt) beta_schedule = directives.BetaSchedule(coolingFactor=coolingFactor, coolingRate=n_iter_per_beta) save = directives.SaveOutputEveryIteration() print(chifact) if use_irls: IRLS = directives.Update_IRLS( f_min_change=1e-4, minGNiter=1, silent=False, max_irls_iterations=40, beta_tol=5e-1, coolEpsFact=1.3, chifact_start=chifact, ) if beta_start is None: directives_list = [ directives.BetaEstimate_ByEig(beta0_ratio=beta0_ratio), IRLS, save, ] else: directives_list = [IRLS, save] invProb.beta = beta_start reg.norms = np.c_[p_s, p_x, p_z, 2] else: target = directives.TargetMisfit(chifact=chifact) directives_list = [ directives.BetaEstimate_ByEig(beta0_ratio=beta0_ratio), beta_schedule, save, ] if use_target: directives_list.append(target) inv = inversion.BaseInversion(invProb, directiveList=directives_list) mopt = inv.run(m0) model = opt.recall("xc") model.append(mopt) pred = [] for m in model: pred.append(self.simulation_prop.dpred(m)) return model, pred, save
def run(plotIt=True, cleanAfterRun=True): # Start by downloading files from the remote repository # directory where the downloaded files are url = "https://storage.googleapis.com/simpeg/Chile_GRAV_4_Miller/Chile_GRAV_4_Miller.tar.gz" downloads = download(url, overwrite=True) basePath = downloads.split(".")[0] # unzip the tarfile tar = tarfile.open(downloads, "r") tar.extractall() tar.close() input_file = basePath + os.path.sep + "LdM_input_file.inp" # %% User input # Plotting parameters, max and min densities in g/cc vmin = -0.6 vmax = 0.6 # weight exponent for default weighting wgtexp = 3.0 # %% # Read in the input file which included all parameters at once # (mesh, topo, model, survey, inv param, etc.) driver = GravityDriver_Inv(input_file) # %% # Now we need to create the survey and model information. # Access the mesh and survey information mesh = driver.mesh # survey = driver.survey data_object = driver.data # [survey, data_object] = driver.survey # define gravity survey locations rxLoc = survey.source_field.receiver_list[0].locations # define gravity data and errors d = data_object.dobs # Get the active cells active = driver.activeCells nC = len(active) # Number of active cells # Create active map to go from reduce set to full activeMap = maps.InjectActiveCells(mesh, active, -100) # Create static map static = driver.staticCells dynamic = driver.dynamicCells staticCells = maps.InjectActiveCells(None, dynamic, driver.m0[static], nC=nC) mstart = driver.m0[dynamic] # Get index of the center midx = int(mesh.nCx / 2) # %% # Now that we have a model and a survey we can build the linear system ... # Create the forward model operator simulation = gravity.simulation.Simulation3DIntegral(survey=survey, mesh=mesh, rhoMap=staticCells, actInd=active) # %% Create inversion objects reg = regularization.Sparse(mesh, indActive=active, mapping=staticCells, gradientType="total") reg.mref = driver.mref[dynamic] reg.norms = np.c_[0.0, 1.0, 1.0, 1.0] # reg.norms = driver.lpnorms # Specify how the optimization will proceed opt = optimization.ProjectedGNCG( maxIter=20, lower=driver.bounds[0], upper=driver.bounds[1], maxIterLS=10, maxIterCG=20, tolCG=1e-4, ) # Define misfit function (obs-calc) dmis = data_misfit.L2DataMisfit(data=data_object, simulation=simulation) # create the default L2 inverse problem from the above objects invProb = inverse_problem.BaseInvProblem(dmis, reg, opt) # Specify how the initial beta is found betaest = directives.BetaEstimate_ByEig(beta0_ratio=1e-2) # IRLS sets up the Lp inversion problem # Set the eps parameter parameter in Line 11 of the # input file based on the distribution of model (DEFAULT = 95th %ile) IRLS = directives.Update_IRLS(f_min_change=1e-4, max_irls_iterations=40, coolEpsFact=1.5, beta_tol=5e-1) # Preconditioning refreshing for each IRLS iteration update_Jacobi = directives.UpdatePreconditioner() sensitivity_weights = directives.UpdateSensitivityWeights() # Create combined the L2 and Lp problem inv = inversion.BaseInversion( invProb, directiveList=[sensitivity_weights, IRLS, update_Jacobi, betaest]) # %% # Run L2 and Lp inversion mrec = inv.run(mstart) if cleanAfterRun: os.remove(downloads) shutil.rmtree(basePath) # %% if plotIt: # Plot observed data # The sign of the data is flipped here for the change of convention # between Cartesian coordinate system (internal SimPEG format that # expects "positive up" gravity signal) and traditional gravity data # conventions (positive down). For example a traditional negative # gravity anomaly is described as "positive up" in Cartesian coordinates # and hence the sign needs to be flipped for use in SimPEG. plot2Ddata(rxLoc, -d) # %% # Write output model and data files and print misfit stats. # reconstructing l2 model mesh with air cells and active dynamic cells L2out = activeMap * invProb.l2model # reconstructing lp model mesh with air cells and active dynamic cells Lpout = activeMap * mrec # %% # Plot out sections and histograms of the smooth l2 model. # The ind= parameter is the slice of the model from top down. yslice = midx + 1 L2out[L2out == -100] = np.nan # set "air" to nan plt.figure(figsize=(10, 7)) plt.suptitle("Smooth Inversion: Depth weight = " + str(wgtexp)) ax = plt.subplot(221) dat1 = mesh.plotSlice( L2out, ax=ax, normal="Z", ind=-16, clim=(vmin, vmax), pcolorOpts={"cmap": "bwr"}, ) plt.plot( np.array([mesh.vectorCCx[0], mesh.vectorCCx[-1]]), np.array([mesh.vectorCCy[yslice], mesh.vectorCCy[yslice]]), c="gray", linestyle="--", ) plt.scatter(rxLoc[0:, 0], rxLoc[0:, 1], color="k", s=1) plt.title("Z: " + str(mesh.vectorCCz[-16]) + " m") plt.xlabel("Easting (m)") plt.ylabel("Northing (m)") plt.gca().set_aspect("equal", adjustable="box") cb = plt.colorbar(dat1[0], orientation="vertical", ticks=np.linspace(vmin, vmax, 4)) cb.set_label("Density (g/cc$^3$)") ax = plt.subplot(222) dat = mesh.plotSlice( L2out, ax=ax, normal="Z", ind=-27, clim=(vmin, vmax), pcolorOpts={"cmap": "bwr"}, ) plt.plot( np.array([mesh.vectorCCx[0], mesh.vectorCCx[-1]]), np.array([mesh.vectorCCy[yslice], mesh.vectorCCy[yslice]]), c="gray", linestyle="--", ) plt.scatter(rxLoc[0:, 0], rxLoc[0:, 1], color="k", s=1) plt.title("Z: " + str(mesh.vectorCCz[-27]) + " m") plt.xlabel("Easting (m)") plt.ylabel("Northing (m)") plt.gca().set_aspect("equal", adjustable="box") cb = plt.colorbar(dat1[0], orientation="vertical", ticks=np.linspace(vmin, vmax, 4)) cb.set_label("Density (g/cc$^3$)") ax = plt.subplot(212) mesh.plotSlice( L2out, ax=ax, normal="Y", ind=yslice, clim=(vmin, vmax), pcolorOpts={"cmap": "bwr"}, ) plt.title("Cross Section") plt.xlabel("Easting(m)") plt.ylabel("Elevation") plt.gca().set_aspect("equal", adjustable="box") cb = plt.colorbar( dat1[0], orientation="vertical", ticks=np.linspace(vmin, vmax, 4), cmap="bwr", ) cb.set_label("Density (g/cc$^3$)") # %% # Make plots of Lp model yslice = midx + 1 Lpout[Lpout == -100] = np.nan # set "air" to nan plt.figure(figsize=(10, 7)) plt.suptitle("Compact Inversion: Depth weight = " + str(wgtexp) + ": $\epsilon_p$ = " + str(round(reg.eps_p, 1)) + ": $\epsilon_q$ = " + str(round(reg.eps_q, 2))) ax = plt.subplot(221) dat = mesh.plotSlice( Lpout, ax=ax, normal="Z", ind=-16, clim=(vmin, vmax), pcolorOpts={"cmap": "bwr"}, ) plt.plot( np.array([mesh.vectorCCx[0], mesh.vectorCCx[-1]]), np.array([mesh.vectorCCy[yslice], mesh.vectorCCy[yslice]]), c="gray", linestyle="--", ) plt.scatter(rxLoc[0:, 0], rxLoc[0:, 1], color="k", s=1) plt.title("Z: " + str(mesh.vectorCCz[-16]) + " m") plt.xlabel("Easting (m)") plt.ylabel("Northing (m)") plt.gca().set_aspect("equal", adjustable="box") cb = plt.colorbar(dat[0], orientation="vertical", ticks=np.linspace(vmin, vmax, 4)) cb.set_label("Density (g/cc$^3$)") ax = plt.subplot(222) dat = mesh.plotSlice( Lpout, ax=ax, normal="Z", ind=-27, clim=(vmin, vmax), pcolorOpts={"cmap": "bwr"}, ) plt.plot( np.array([mesh.vectorCCx[0], mesh.vectorCCx[-1]]), np.array([mesh.vectorCCy[yslice], mesh.vectorCCy[yslice]]), c="gray", linestyle="--", ) plt.scatter(rxLoc[0:, 0], rxLoc[0:, 1], color="k", s=1) plt.title("Z: " + str(mesh.vectorCCz[-27]) + " m") plt.xlabel("Easting (m)") plt.ylabel("Northing (m)") plt.gca().set_aspect("equal", adjustable="box") cb = plt.colorbar(dat[0], orientation="vertical", ticks=np.linspace(vmin, vmax, 4)) cb.set_label("Density (g/cc$^3$)") ax = plt.subplot(212) dat = mesh.plotSlice( Lpout, ax=ax, normal="Y", ind=yslice, clim=(vmin, vmax), pcolorOpts={"cmap": "bwr"}, ) plt.title("Cross Section") plt.xlabel("Easting (m)") plt.ylabel("Elevation (m)") plt.gca().set_aspect("equal", adjustable="box") cb = plt.colorbar(dat[0], orientation="vertical", ticks=np.linspace(vmin, vmax, 4)) cb.set_label("Density (g/cc$^3$)")
def setUp(self): # We will assume a vertical inducing field H0 = (50000.0, 90.0, 0.0) # The magnetization is set along a different direction (induced + remanence) M = np.array([45.0, 90.0]) # Block with an effective susceptibility chi_e = 0.05 # Create grid of points for topography # Lets create a simple Gaussian topo and set the active cells [xx, yy] = np.meshgrid(np.linspace(-200, 200, 50), np.linspace(-200, 200, 50)) b = 100 A = 50 zz = A * np.exp(-0.5 * ((xx / b)**2.0 + (yy / b)**2.0)) topo = np.c_[mkvc(xx), mkvc(yy), mkvc(zz)] # Create an array of observation points xr = np.linspace(-100.0, 100.0, 20) yr = np.linspace(-100.0, 100.0, 20) X, Y = np.meshgrid(xr, yr) Z = A * np.exp(-0.5 * ((X / b)**2.0 + (Y / b)**2.0)) + 10 # Create a MAGsurvey rxLoc = np.c_[mkvc(X.T), mkvc(Y.T), mkvc(Z.T)] rxList = magnetics.receivers.Point(rxLoc) srcField = magnetics.sources.SourceField(receiver_list=[rxList], parameters=H0) survey = magnetics.survey.Survey(srcField) ############################################################################### # Inversion Mesh # Create a mesh h = [5, 5, 5] padDist = np.ones((3, 2)) * 100 mesh = mesh_builder_xyz(rxLoc, h, padding_distance=padDist, depth_core=100, mesh_type="tree") mesh = refine_tree_xyz(mesh, topo, method="surface", octree_levels=[4, 4], finalize=True) # Define an active cells from topo actv = utils.surface2ind_topo(mesh, topo) nC = int(actv.sum()) # Convert the inclination declination to vector in Cartesian M_xyz = utils.mat_utils.dip_azimuth2cartesian( np.ones(nC) * M[0], np.ones(nC) * M[1]) # Get the indicies of the magnetized block ind = utils.model_builder.getIndicesBlock( np.r_[-20, -20, -10], np.r_[20, 20, 25], mesh.gridCC, )[0] # Assign magnetization value, inducing field strength will # be applied in by the :class:`SimPEG.PF.Magnetics` problem model = np.zeros(mesh.nC) model[ind] = chi_e # Remove air cells model = model[actv] # Creat reduced identity map idenMap = maps.IdentityMap(nP=nC) # Create the forward model operator simulation = magnetics.Simulation3DIntegral( survey=survey, mesh=mesh, chiMap=idenMap, actInd=actv, store_sensitivities="forward_only", ) simulation.M = M_xyz # Compute some data and add some random noise synthetic_data = simulation.dpred(model) # Split the data in components nD = rxLoc.shape[0] std = 5 # nT synthetic_data += np.random.randn(nD) * std wd = np.ones(nD) * std # Assigne data and uncertainties to the survey data_object = data.Data(survey, dobs=synthetic_data, noise_floor=wd) ###################################################################### # Equivalent Source # Get the active cells for equivalent source is the top only surf = utils.model_utils.surface_layer_index(mesh, topo) nC = np.count_nonzero(surf) # Number of active cells mstart = np.ones(nC) * 1e-4 # Create active map to go from reduce set to full surfMap = maps.InjectActiveCells(mesh, surf, np.nan) # Create identity map idenMap = maps.IdentityMap(nP=nC) # Create static map simulation = magnetics.simulation.Simulation3DIntegral( mesh=mesh, survey=survey, chiMap=idenMap, actInd=surf, store_sensitivities="ram", ) simulation.model = mstart # Create a regularization function, in this case l2l2 reg = regularization.Sparse(mesh, indActive=surf, mapping=maps.IdentityMap(nP=nC), alpha_z=0) reg.mref = np.zeros(nC) # Specify how the optimization will proceed, set susceptibility bounds to inf opt = optimization.ProjectedGNCG( maxIter=10, lower=-np.inf, upper=np.inf, maxIterLS=5, maxIterCG=5, tolCG=1e-3, ) # Define misfit function (obs-calc) dmis = data_misfit.L2DataMisfit(simulation=simulation, data=data_object) # Create the default L2 inverse problem from the above objects invProb = inverse_problem.BaseInvProblem(dmis, reg, opt) # Specify how the initial beta is found betaest = directives.BetaEstimate_ByEig(beta0_ratio=2) # Target misfit to stop the inversion, # try to fit as much as possible of the signal, we don't want to lose anything IRLS = directives.Update_IRLS(f_min_change=1e-3, minGNiter=1, beta_tol=1e-1, max_irls_iterations=5) update_Jacobi = directives.UpdatePreconditioner() # Put all the parts together inv = inversion.BaseInversion( invProb, directiveList=[betaest, IRLS, update_Jacobi]) # Run the equivalent source inversion print("Solving for Equivalent Source") mrec = inv.run(mstart) ######################################################## # Forward Amplitude Data # ---------------------- # # Now that we have an equialent source layer, we can forward model alh three # components of the field and add them up: :math:`|B| = \sqrt{( Bx^2 + Bx^2 + Bx^2 )}` # rxList = magnetics.receivers.Point(rxLoc, components=["bx", "by", "bz"]) srcField = magnetics.sources.SourceField(receiver_list=[rxList], parameters=H0) surveyAmp = magnetics.survey.Survey(srcField) simulation = magnetics.simulation.Simulation3DIntegral( mesh=mesh, survey=surveyAmp, chiMap=idenMap, actInd=surf, is_amplitude_data=True, store_sensitivities="forward_only", ) bAmp = simulation.fields(mrec) ###################################################################### # Amplitude Inversion # ------------------- # # Now that we have amplitude data, we can invert for an effective # susceptibility. This is a non-linear inversion. # # Create active map to go from reduce space to full actvMap = maps.InjectActiveCells(mesh, actv, -100) nC = int(actv.sum()) # Create identity map idenMap = maps.IdentityMap(nP=nC) mstart = np.ones(nC) * 1e-4 # Create the forward model operator simulation = magnetics.simulation.Simulation3DIntegral( survey=surveyAmp, mesh=mesh, chiMap=idenMap, actInd=actv, is_amplitude_data=True, ) data_obj = data.Data(survey, dobs=bAmp, noise_floor=wd) # Create a sparse regularization reg = regularization.Sparse(mesh, indActive=actv, mapping=idenMap) reg.norms = np.c_[1, 0, 0, 0] reg.mref = np.zeros(nC) # Data misfit function dmis = data_misfit.L2DataMisfit(simulation=simulation, data=data_obj) # Add directives to the inversion opt = optimization.ProjectedGNCG(maxIter=10, lower=0.0, upper=1.0, maxIterLS=5, maxIterCG=5, tolCG=1e-3) invProb = inverse_problem.BaseInvProblem(dmis, reg, opt) # Here is the list of directives betaest = directives.BetaEstimate_ByEig(beta0_ratio=1) # Specify the sparse norms IRLS = directives.Update_IRLS( max_irls_iterations=5, f_min_change=1e-3, minGNiter=1, coolingRate=1, beta_search=False, ) # Special directive specific to the mag amplitude problem. The sensitivity # weights are update between each iteration. update_SensWeight = directives.UpdateSensitivityWeights() update_Jacobi = directives.UpdatePreconditioner() # Put all together self.inv = inversion.BaseInversion( invProb, directiveList=[update_SensWeight, betaest, IRLS, update_Jacobi]) self.mstart = mstart self.model = model self.sim = simulation
def setUp(self): ndv = -100 # Create a self.mesh dx = 5.0 hxind = [(dx, 5, -1.3), (dx, 5), (dx, 5, 1.3)] hyind = [(dx, 5, -1.3), (dx, 5), (dx, 5, 1.3)] hzind = [(dx, 5, -1.3), (dx, 6)] self.mesh = discretize.TensorMesh([hxind, hyind, hzind], "CCC") # Get index of the center midx = int(self.mesh.nCx / 2) midy = int(self.mesh.nCy / 2) # Lets create a simple Gaussian topo and set the active cells [xx, yy] = np.meshgrid(self.mesh.vectorNx, self.mesh.vectorNy) zz = -np.exp((xx**2 + yy**2) / 75**2) + self.mesh.vectorNz[-1] # Go from topo to actv cells topo = np.c_[utils.mkvc(xx), utils.mkvc(yy), utils.mkvc(zz)] actv = utils.surface2ind_topo(self.mesh, topo, "N") actv = np.where(actv)[0] # Create active map to go from reduce space to full self.actvMap = maps.InjectActiveCells(self.mesh, actv, -100) nC = len(actv) # Create and array of observation points xr = np.linspace(-20.0, 20.0, 20) yr = np.linspace(-20.0, 20.0, 20) X, Y = np.meshgrid(xr, yr) # Move the observation points 5m above the topo Z = -np.exp((X**2 + Y**2) / 75**2) + self.mesh.vectorNz[-1] + 5.0 # Create a MAGsurvey locXYZ = np.c_[utils.mkvc(X.T), utils.mkvc(Y.T), utils.mkvc(Z.T)] rxLoc = gravity.Point(locXYZ) srcField = gravity.SourceField([rxLoc]) survey = gravity.Survey(srcField) # We can now create a density model and generate data # Here a simple block in half-space model = np.zeros((self.mesh.nCx, self.mesh.nCy, self.mesh.nCz)) model[(midx - 2):(midx + 2), (midy - 2):(midy + 2), -6:-2] = 0.5 model = utils.mkvc(model) self.model = model[actv] # Create active map to go from reduce set to full actvMap = maps.InjectActiveCells(self.mesh, actv, ndv) # Create reduced identity map idenMap = maps.IdentityMap(nP=nC) # Create the forward model operator sim = gravity.Simulation3DIntegral( self.mesh, survey=survey, rhoMap=idenMap, actInd=actv, store_sensitivities="ram", ) # Compute linear forward operator and compute some data # computing sensitivities to ram is best using dask processes with dask.config.set(scheduler="processes"): data = sim.make_synthetic_data(self.model, relative_error=0.0, noise_floor=0.001, add_noise=True) print(sim.G) # Create a regularization reg = regularization.Sparse(self.mesh, indActive=actv, mapping=idenMap) reg.norms = np.c_[0, 0, 0, 0] reg.gradientType = "component" # reg.eps_p, reg.eps_q = 5e-2, 1e-2 # Data misfit function dmis = data_misfit.L2DataMisfit(simulation=sim, data=data) # Add directives to the inversion opt = optimization.ProjectedGNCG(maxIter=100, lower=-1.0, upper=1.0, maxIterLS=20, maxIterCG=10, tolCG=1e-3) invProb = inverse_problem.BaseInvProblem(dmis, reg, opt, beta=1e8) # Here is where the norms are applied IRLS = directives.Update_IRLS(f_min_change=1e-4, minGNiter=1) update_Jacobi = directives.UpdatePreconditioner() sensitivity_weights = directives.UpdateSensitivityWeights( everyIter=False) self.inv = inversion.BaseInversion( invProb, directiveList=[IRLS, sensitivity_weights, update_Jacobi]) self.sim = sim
def setUp(self): np.random.seed(0) # First we need to define the direction of the inducing field # As a simple case, we pick a vertical inducing field of magnitude # 50,000nT. # From old convention, field orientation is given as an # azimuth from North (positive clockwise) # and dip from the horizontal (positive downward). H0 = (50000.0, 90.0, 0.0) # Create a mesh h = [5, 5, 5] padDist = np.ones((3, 2)) * 100 nCpad = [2, 4, 2] # Create grid of points for topography # Lets create a simple Gaussian topo and set the active cells [xx, yy] = np.meshgrid(np.linspace(-200.0, 200.0, 50), np.linspace(-200.0, 200.0, 50)) b = 100 A = 50 zz = A * np.exp(-0.5 * ((xx / b)**2.0 + (yy / b)**2.0)) # We would usually load a topofile topo = np.c_[utils.mkvc(xx), utils.mkvc(yy), utils.mkvc(zz)] # Create and array of observation points xr = np.linspace(-100.0, 100.0, 20) yr = np.linspace(-100.0, 100.0, 20) X, Y = np.meshgrid(xr, yr) Z = A * np.exp(-0.5 * ((X / b)**2.0 + (Y / b)**2.0)) + 5 # Create a MAGsurvey xyzLoc = np.c_[utils.mkvc(X.T), utils.mkvc(Y.T), utils.mkvc(Z.T)] rxLoc = mag.Point(xyzLoc) srcField = mag.SourceField([rxLoc], parameters=H0) survey = mag.Survey(srcField) # self.mesh.finalize() self.mesh = meshutils.mesh_builder_xyz( xyzLoc, h, padding_distance=padDist, mesh_type="TREE", ) self.mesh = meshutils.refine_tree_xyz( self.mesh, topo, method="surface", octree_levels=nCpad, octree_levels_padding=nCpad, finalize=True, ) # Define an active cells from topo actv = utils.surface2ind_topo(self.mesh, topo) nC = int(actv.sum()) # We can now create a susceptibility model and generate data # Lets start with a simple block in half-space self.model = utils.model_builder.addBlock( self.mesh.gridCC, np.zeros(self.mesh.nC), np.r_[-20, -20, -15], np.r_[20, 20, 20], 0.05, )[actv] # Create active map to go from reduce set to full self.actvMap = maps.InjectActiveCells(self.mesh, actv, np.nan) # Creat reduced identity map idenMap = maps.IdentityMap(nP=nC) # Create the forward model operator sim = mag.Simulation3DIntegral( self.mesh, survey=survey, chiMap=idenMap, actInd=actv, store_sensitivities="ram", ) self.sim = sim data = sim.make_synthetic_data(self.model, relative_error=0.0, noise_floor=1.0, add_noise=True) # Create a regularization reg = regularization.Sparse(self.mesh, indActive=actv, mapping=idenMap) reg.norms = np.c_[0, 0, 0, 0] reg.mref = np.zeros(nC) # Data misfit function dmis = data_misfit.L2DataMisfit(simulation=sim, data=data) # Add directives to the inversion opt = optimization.ProjectedGNCG( maxIter=10, lower=0.0, upper=10.0, maxIterLS=5, maxIterCG=5, tolCG=1e-4, stepOffBoundsFact=1e-4, ) invProb = inverse_problem.BaseInvProblem(dmis, reg, opt, beta=1e6) # Here is where the norms are applied # Use pick a treshold parameter empirically based on the distribution of # model parameters IRLS = directives.Update_IRLS(f_min_change=1e-3, max_irls_iterations=20, beta_tol=1e-1, beta_search=False) update_Jacobi = directives.UpdatePreconditioner() sensitivity_weights = directives.UpdateSensitivityWeights() self.inv = inversion.BaseInversion( invProb, directiveList=[IRLS, sensitivity_weights, update_Jacobi])
data_vrm = data.Data(dobs=dobs, survey=survey_vrm, relative_error=rel_err, noise_floor=eps) # Setup and run inversion dmis = data_misfit.L2DataMisfit(simulation=problem_inv, data=data_vrm) w = utils.mkvc((np.sum(np.array(problem_inv.A)**2, axis=0)))**0.5 w = w / np.max(w) w = w reg = regularization.SimpleSmall(mesh=mesh, indActive=actCells, cell_weights=w) opt = optimization.ProjectedGNCG(maxIter=20, lower=0.0, upper=1e-2, maxIterLS=20, tolCG=1e-4) invProb = inverse_problem.BaseInvProblem(dmis, reg, opt) directives = [ directives.BetaSchedule(coolingFactor=2, coolingRate=1), directives.TargetMisfit(), ] inv = inversion.BaseInversion(invProb, directiveList=directives) xi_0 = 1e-3 * np.ones(actCells.sum()) xi_rec = inv.run(xi_0) # Predict VRM response at all times for recovered model survey_vrm.set_active_interval(0.0, 1.0) fields_pre = problem_inv.dpred(xi_rec)
# normalized by the data's standard deviation. dmis = data_misfit.L2DataMisfit(simulation=simulation, data=data_object) # Define the regularization (model objective function). Here, 'p' defines the # the norm of the smallness term and 'q' defines the norm of the smoothness # term. reg = regularization.Sparse(mesh, mapping=model_map) reg.mref = starting_model p = 0 q = 0 reg.norms = np.c_[p, q] # Define how the optimization problem is solved. Here we will use an inexact # Gauss-Newton approach that employs the conjugate gradient solver. opt = optimization.ProjectedGNCG(maxIter=100, maxIterLS=20, maxIterCG=20, tolCG=1e-3) # Define the inverse problem inv_prob = inverse_problem.BaseInvProblem(dmis, reg, opt) ####################################################################### # Define Inversion Directives # --------------------------- # # Here we define any directives that are carried out during the inversion. This # includes the cooling schedule for the trade-off parameter (beta), stopping # criteria for the inversion and saving inversion results at each iteration. # # Apply and update sensitivity weighting as the model updates
# Define the data misfit. Here the data misfit is the L2 norm of the weighted # residual between the observed data and the data predicted for a given model. # Within the data misfit, the residual between predicted and observed data are # normalized by the data's standard deviation. dmis = data_misfit.L2DataMisfit(data=data_object, simulation=simulation) dmis.W = utils.sdiag(1 / uncertainties) # Define the regularization (model objective function). reg = regularization.Sparse(mesh, indActive=ind_active, mapping=model_map) reg.norms = np.c_[0, 2, 2, 2] # Define how the optimization problem is solved. Here we will use a projected # Gauss-Newton approach that employs the conjugate gradient solver. opt = optimization.ProjectedGNCG(maxIter=100, lower=-1.0, upper=1.0, maxIterLS=20, maxIterCG=10, tolCG=1e-3) # Here we define the inverse problem that is to be solved inv_prob = inverse_problem.BaseInvProblem(dmis, reg, opt) ####################################################################### # Define Inversion Directives # --------------------------- # # Here we define any directiveas that are carried out during the inversion. This # includes the cooling schedule for the trade-off parameter (beta), stopping # criteria for the inversion and saving inversion results at each iteration. #
def run(plotIt=True): nC = 40 de = 1.0 h = np.ones(nC) * de / nC M = discretize.TensorMesh([h, h]) y = np.linspace(M.vectorCCy[0], M.vectorCCx[-1], int(np.floor(nC / 4))) rlocs = np.c_[0 * y + M.vectorCCx[-1], y] rx = tomo.Rx(rlocs) source_list = [ tomo.Src(location=np.r_[M.vectorCCx[0], yi], receiver_list=[rx]) for yi in y ] # phi model phi0 = 0 phi1 = 0.65 phitrue = utils.model_builder.defineBlock(M.gridCC, [0.4, 0.6], [0.6, 0.4], [phi1, phi0]) knownVolume = np.sum(phitrue * M.vol) print("True Volume: {}".format(knownVolume)) # Set up true conductivity model and plot the model transform sigma0 = np.exp(1) sigma1 = 1e4 if plotIt: fig, ax = plt.subplots(1, 1) sigmaMapTest = maps.SelfConsistentEffectiveMedium(nP=1000, sigma0=sigma0, sigma1=sigma1, rel_tol=1e-1, maxIter=150) testphis = np.linspace(0.0, 1.0, 1000) sigetest = sigmaMapTest * testphis ax.semilogy(testphis, sigetest) ax.set_title("Model Transform") ax.set_xlabel("$\\varphi$") ax.set_ylabel("$\sigma$") sigmaMap = maps.SelfConsistentEffectiveMedium(M, sigma0=sigma0, sigma1=sigma1) # scale the slowness so it is on a ~linear scale slownessMap = maps.LogMap(M) * sigmaMap # set up the true sig model and log model dobs sigtrue = sigmaMap * phitrue # modt = Model.BaseModel(M); slownesstrue = slownessMap * phitrue # true model (m = log(sigma)) # set up the problem and survey survey = tomo.Survey(source_list) problem = tomo.Simulation(M, survey=survey, slownessMap=slownessMap) if plotIt: fig, ax = plt.subplots(1, 1) cb = plt.colorbar(M.plotImage(phitrue, ax=ax)[0], ax=ax) survey.plot(ax=ax) cb.set_label("$\\varphi$") # get observed data data = problem.make_synthetic_data(phitrue, relative_error=0.03, add_noise=True) dpred = problem.dpred(np.zeros(M.nC)) # objective function pieces reg = regularization.Tikhonov(M) dmis = data_misfit.L2DataMisfit(simulation=problem, data=data) dmisVol = Volume(mesh=M, knownVolume=knownVolume) beta = 0.25 maxIter = 15 # without the volume regularization opt = optimization.ProjectedGNCG(maxIter=maxIter, lower=0.0, upper=1.0) opt.remember("xc") invProb = inverse_problem.BaseInvProblem(dmis, reg, opt, beta=beta) inv = inversion.BaseInversion(invProb) mopt1 = inv.run(np.zeros(M.nC) + 1e-16) print("\nTotal recovered volume (no vol misfit term in inversion): " "{}".format(dmisVol(mopt1))) # with the volume regularization vol_multiplier = 9e4 reg2 = reg dmis2 = dmis + vol_multiplier * dmisVol opt2 = optimization.ProjectedGNCG(maxIter=maxIter, lower=0.0, upper=1.0) opt2.remember("xc") invProb2 = inverse_problem.BaseInvProblem(dmis2, reg2, opt2, beta=beta) inv2 = inversion.BaseInversion(invProb2) mopt2 = inv2.run(np.zeros(M.nC) + 1e-16) print("\nTotal volume (vol misfit term in inversion): {}".format( dmisVol(mopt2))) # plot results if plotIt: fig, ax = plt.subplots(1, 1) ax.plot(data.dobs) ax.plot(dpred) ax.plot(problem.dpred(mopt1), "o") ax.plot(problem.dpred(mopt2), "s") ax.legend(["dobs", "dpred0", "dpred w/o Vol", "dpred with Vol"]) fig, ax = plt.subplots(1, 3, figsize=(16, 4)) im0 = M.plotImage(phitrue, ax=ax[0])[0] im1 = M.plotImage(mopt1, ax=ax[1])[0] im2 = M.plotImage(mopt2, ax=ax[2])[0] for im in [im0, im1, im2]: im.set_clim([0.0, phi1]) plt.colorbar(im0, ax=ax[0]) plt.colorbar(im1, ax=ax[1]) plt.colorbar(im2, ax=ax[2]) ax[0].set_title("true, vol: {:1.3e}".format(knownVolume)) ax[1].set_title("recovered(no Volume term), vol: {:1.3e} ".format( dmisVol(mopt1))) ax[2].set_title("recovered(with Volume term), vol: {:1.3e} ".format( dmisVol(mopt2))) plt.tight_layout()
# pre-conditioner update_Jacobi = directives.UpdatePreconditioner() # iteratively balance the scaling of the data misfits scaling_init = directives.ScalingMultipleDataMisfits_ByEig( chi0_ratio=[1.0, 100.0]) scale_schedule = directives.JointScalingSchedule(verbose=True) # Create inverse problem # Optimization # set lower and upper bounds lowerbound = np.r_[-2.0 * np.ones(actvMap.nP), 0.0 * np.ones(actvMap.nP)] upperbound = np.r_[0.0 * np.ones(actvMap.nP), 1e-1 * np.ones(actvMap.nP)] opt = optimization.ProjectedGNCG( maxIter=30, lower=lowerbound, upper=upperbound, maxIterLS=20, maxIterCG=100, tolCG=1e-4, ) # create inverse problem invProb = inverse_problem.BaseInvProblem(dmis, reg, opt) inv = inversion.BaseInversion( invProb, # directives: evaluate alphas (and data misfits scales) before beta directiveList=[ Alphas, scaling_init, beta, update_smallness, targets, scale_schedule,
def run(plotIt=True): H0 = (50000.0, 90.0, 0.0) # Create a mesh dx = 5.0 hxind = [(dx, 5, -1.3), (dx, 10), (dx, 5, 1.3)] hyind = [(dx, 5, -1.3), (dx, 10), (dx, 5, 1.3)] hzind = [(dx, 5, -1.3), (dx, 10)] mesh = discretize.TensorMesh([hxind, hyind, hzind], "CCC") # Lets create a simple Gaussian topo and set the active cells [xx, yy] = np.meshgrid(mesh.vectorNx, mesh.vectorNy) zz = -np.exp((xx ** 2 + yy ** 2) / 75 ** 2) + mesh.vectorNz[-1] # We would usually load a topofile topo = np.c_[utils.mkvc(xx), utils.mkvc(yy), utils.mkvc(zz)] # Go from topo to array of indices of active cells actv = utils.surface2ind_topo(mesh, topo, "N") actv = np.where(actv)[0] # Create and array of observation points xr = np.linspace(-20.0, 20.0, 20) yr = np.linspace(-20.0, 20.0, 20) X, Y = np.meshgrid(xr, yr) # Move the observation points 5m above the topo Z = -np.exp((X ** 2 + Y ** 2) / 75 ** 2) + mesh.vectorNz[-1] + 5.0 # Create a MAGsurvey rxLoc = np.c_[utils.mkvc(X.T), utils.mkvc(Y.T), utils.mkvc(Z.T)] rxLoc = magnetics.Point(rxLoc) srcField = magnetics.SourceField([rxLoc], parameters=H0) survey = magnetics.Survey(srcField) # We can now create a susceptibility model and generate data model = np.zeros(mesh.nC) # Change values in half the domain model[mesh.gridCC[:, 0] < 0] = 0.01 # Add a block in half-space model = utils.model_builder.addBlock( mesh.gridCC, model, np.r_[-10, -10, 20], np.r_[10, 10, 40], 0.05 ) model = utils.mkvc(model) model = model[actv] # Create active map to go from reduce set to full actvMap = maps.InjectActiveCells(mesh, actv, np.nan) # Create reduced identity map idenMap = maps.IdentityMap(nP=len(actv)) # Create the forward model operator prob = magnetics.Simulation3DIntegral( mesh, survey=survey, chiMap=idenMap, actInd=actv, store_sensitivities="forward_only", ) # Compute linear forward operator and compute some data data = prob.make_synthetic_data( model, relative_error=0.0, noise_floor=1, add_noise=True ) # Create a homogenous maps for the two domains domains = [mesh.gridCC[actv, 0] < 0, mesh.gridCC[actv, 0] >= 0] homogMap = maps.SurjectUnits(domains) # Create a wire map for a second model space, voxel based wires = maps.Wires(("h**o", len(domains)), ("hetero", len(actv))) # Create Sum map sumMap = maps.SumMap([homogMap * wires.h**o, wires.hetero]) # Create the forward model operator prob = magnetics.Simulation3DIntegral( mesh, survey=survey, chiMap=sumMap, actInd=actv, store_sensitivities="ram" ) # Make depth weighting wr = np.zeros(sumMap.shape[1]) print(prob.nC) # print(prob.M.shape) # why does this reset nC G = prob.G # Take the cell number out of the scaling. # Want to keep high sens for large volumes scale = utils.sdiag( np.r_[utils.mkvc(1.0 / homogMap.P.sum(axis=0)), np.ones_like(actv)] ) for ii in range(survey.nD): wr += ( (prob.G[ii, :] * prob.chiMap.deriv(np.ones(sumMap.shape[1]) * 1e-4) * scale) / data.standard_deviation[ii] ) ** 2.0 # Scale the model spaces independently wr[wires.h**o.index] /= np.max((wires.h**o * wr)) wr[wires.hetero.index] /= np.max(wires.hetero * wr) wr = wr ** 0.5 ## Create a regularization # For the homogeneous model regMesh = discretize.TensorMesh([len(domains)]) reg_m1 = regularization.Sparse(regMesh, mapping=wires.h**o) reg_m1.cell_weights = wires.h**o * wr reg_m1.norms = np.c_[0, 2, 2, 2] reg_m1.mref = np.zeros(sumMap.shape[1]) # Regularization for the voxel model reg_m2 = regularization.Sparse(mesh, indActive=actv, mapping=wires.hetero) reg_m2.cell_weights = wires.hetero * wr reg_m2.norms = np.c_[0, 1, 1, 1] reg_m2.mref = np.zeros(sumMap.shape[1]) reg = reg_m1 + reg_m2 # Data misfit function dmis = data_misfit.L2DataMisfit(simulation=prob, data=data) # Add directives to the inversion opt = optimization.ProjectedGNCG( maxIter=100, lower=0.0, upper=1.0, maxIterLS=20, maxIterCG=10, tolCG=1e-3, tolG=1e-3, eps=1e-6, ) invProb = inverse_problem.BaseInvProblem(dmis, reg, opt) betaest = directives.BetaEstimate_ByEig() # Here is where the norms are applied # Use pick a threshold parameter empirically based on the distribution of # model parameters IRLS = directives.Update_IRLS(f_min_change=1e-3, minGNiter=1) update_Jacobi = directives.UpdatePreconditioner() inv = inversion.BaseInversion(invProb, directiveList=[IRLS, betaest, update_Jacobi]) # Run the inversion m0 = np.ones(sumMap.shape[1]) * 1e-4 # Starting model prob.model = m0 mrecSum = inv.run(m0) if plotIt: mesh.plot_3d_slicer( actvMap * model, aspect="equal", zslice=30, pcolorOpts={"cmap": "inferno_r"}, transparent="slider", ) mesh.plot_3d_slicer( actvMap * sumMap * mrecSum, aspect="equal", zslice=30, pcolorOpts={"cmap": "inferno_r"}, transparent="slider", )