def setUp(self): mesh = Mesh.TensorMesh([np.ones(n) * 5 for n in [10, 11, 12]], [0, 0, -30]) x = np.linspace(5, 10, 3) XYZ = Utils.ndgrid(x, x, np.r_[0.]) srcLoc = np.r_[0, 0, 0.] rxList0 = Survey.BaseRx(XYZ, 'exi') Src0 = Survey.BaseSrc([rxList0], loc=srcLoc) rxList1 = Survey.BaseRx(XYZ, 'bxi') Src1 = Survey.BaseSrc([rxList1], loc=srcLoc) rxList2 = Survey.BaseRx(XYZ, 'bxi') Src2 = Survey.BaseSrc([rxList2], loc=srcLoc) rxList3 = Survey.BaseRx(XYZ, 'bxi') Src3 = Survey.BaseSrc([rxList3], loc=srcLoc) Src4 = Survey.BaseSrc([rxList0, rxList1, rxList2, rxList3], loc=srcLoc) srcList = [Src0, Src1, Src2, Src3, Src4] survey = Survey.BaseSurvey(srcList=srcList) prob = Problem.BaseTimeProblem(mesh, timeSteps=[(10., 3), (20., 2)]) survey.pair(prob) def alias(b, srcInd, timeInd): return self.F.mesh.edgeCurl.T * b + timeInd self.F = Problem.TimeFields(mesh, survey, knownFields={'b': 'F'}, aliasFields={'e': ['b', 'E', alias]}) self.Src0 = Src0 self.Src1 = Src1 self.mesh = mesh self.XYZ = XYZ
def test_aliasFunction(self): def alias(e, ind): self.assertTrue(ind[0] is self.Src0) return self.F.mesh.edgeCurl * e F = Problem.Fields(self.F.mesh, self.F.survey, knownFields={'e': 'E'}, aliasFields={'b': ['e', 'F', alias]}) e = np.random.rand(F.mesh.nE, 1) F[self.Src0, 'e'] = e F[self.Src0, 'b'] def alias(e, ind): self.assertTrue(type(ind) is list) self.assertTrue(ind[0] is self.Src0) self.assertTrue(ind[1] is self.Src1) return self.F.mesh.edgeCurl * e F = Problem.Fields(self.F.mesh, self.F.survey, knownFields={'e': 'E'}, aliasFields={'b': ['e', 'F', alias]}) e = np.random.rand(F.mesh.nE, 2) F[[self.Src0, self.Src1], 'e'] = e F[[self.Src0, self.Src1], 'b']
def setUp(self): mesh = Mesh.TensorMesh([np.ones(n) * 5 for n in [10, 11, 12]], [0, 0, -30]) x = np.linspace(5, 10, 3) XYZ = Utils.ndgrid(x, x, np.r_[0.]) srcLoc = np.r_[0., 0., 0.] rxList0 = Survey.BaseRx(XYZ, 'exi') Src0 = Survey.BaseSrc([rxList0], loc=srcLoc) rxList1 = Survey.BaseRx(XYZ, 'bxi') Src1 = Survey.BaseSrc([rxList1], loc=srcLoc) rxList2 = Survey.BaseRx(XYZ, 'bxi') Src2 = Survey.BaseSrc([rxList2], loc=srcLoc) rxList3 = Survey.BaseRx(XYZ, 'bxi') Src3 = Survey.BaseSrc([rxList3], loc=srcLoc) Src4 = Survey.BaseSrc([rxList0, rxList1, rxList2, rxList3], loc=srcLoc) srcList = [Src0, Src1, Src2, Src3, Src4] survey = Survey.BaseSurvey(srcList=srcList) self.D = Survey.Data(survey) self.F = Problem.Fields(mesh, survey, knownFields={ 'phi': 'CC', 'e': 'E', 'b': 'F' }, dtype={ "phi": float, "e": complex, "b": complex }) self.Src0 = Src0 self.Src1 = Src1 self.mesh = mesh self.XYZ = XYZ
def setUp(self): mesh = Mesh.TensorMesh([np.ones(n) * 5 for n in [10, 11, 12]], [0, 0, -30]) x = np.linspace(5, 10, 3) XYZ = Utils.ndgrid(x, x, np.r_[0.]) srcLoc = np.r_[0, 0, 0.] rxList0 = Survey.BaseRx(XYZ, 'exi') Src0 = Survey.BaseSrc([rxList0], loc=srcLoc) rxList1 = Survey.BaseRx(XYZ, 'bxi') Src1 = Survey.BaseSrc([rxList1], loc=srcLoc) rxList2 = Survey.BaseRx(XYZ, 'bxi') Src2 = Survey.BaseSrc([rxList2], loc=srcLoc) rxList3 = Survey.BaseRx(XYZ, 'bxi') Src3 = Survey.BaseSrc([rxList3], loc=srcLoc) Src4 = Survey.BaseSrc([rxList0, rxList1, rxList2, rxList3], loc=srcLoc) srcList = [Src0, Src1, Src2, Src3, Src4] survey = Survey.BaseSurvey(srcList=srcList) self.F = Problem.Fields( mesh, survey, knownFields={'e': 'E'}, aliasFields={ 'b': ['e', 'F', (lambda e, ind: self.F.mesh.edgeCurl * e)] }) self.Src0 = Src0 self.Src1 = Src1 self.mesh = mesh self.XYZ = XYZ
def test_aliasFunction(self): nT = self.F.survey.prob.nT + 1 count = [0] def alias(e, srcInd, timeInd): count[0] += 1 self.assertTrue(srcInd[0] is self.Src0) return self.F.mesh.edgeCurl * e F = Problem.TimeFields(self.F.mesh, self.F.survey, knownFields={'e': 'E'}, aliasFields={'b': ['e', 'F', alias]}) e = np.random.rand(F.mesh.nE, 1, nT) F[self.Src0, 'e', :] = e F[self.Src0, 'b', :] # ensure that this is called for every time separately. self.assertTrue(count[0] == nT) e = np.random.rand(F.mesh.nE, 1, 1) F[self.Src0, 'e', 1] = e count[0] = 0 F[self.Src0, 'b', 1] self.assertTrue(count[0] == 1) # ensure that this is called only once. def alias(e, srcInd, timeInd): count[0] += 1 self.assertTrue(type(srcInd) is list) self.assertTrue(srcInd[0] is self.Src0) self.assertTrue(srcInd[1] is self.Src1) return self.F.mesh.edgeCurl * e F = Problem.TimeFields(self.F.mesh, self.F.survey, knownFields={'e': 'E'}, aliasFields={'b': ['e', 'F', alias]}) e = np.random.rand(F.mesh.nE, 2, nT) F[[self.Src0, self.Src1], 'e', :] = e count[0] = 0 F[[self.Src0, self.Src1], 'b', :] # ensure that this is called for every time separately. self.assertTrue(count[0] == nT) e = np.random.rand(F.mesh.nE, 2, 1) F[[self.Src0, self.Src1], 'e', 1] = e count[0] = 0 F[[self.Src0, self.Src1], 'b', 1] self.assertTrue(count[0] == 1) # ensure that this is called only once.
def run(N=100, plotIt=True): np.random.seed(1) mesh = Mesh.TensorMesh([N]) nk = 20 jk = np.linspace(1., 60., nk) p = -0.25 q = 0.25 def g(k): return (np.exp(p * jk[k] * mesh.vectorCCx) * np.cos(np.pi * q * jk[k] * mesh.vectorCCx)) G = np.empty((nk, mesh.nC)) for i in range(nk): G[i, :] = g(i) mtrue = np.zeros(mesh.nC) mtrue[mesh.vectorCCx > 0.3] = 1. mtrue[mesh.vectorCCx > 0.45] = -0.5 mtrue[mesh.vectorCCx > 0.6] = 0 prob = Problem.LinearProblem(mesh, G=G) survey = Survey.LinearSurvey() survey.pair(prob) survey.makeSyntheticData(mtrue, std=0.01) M = prob.mesh reg = Regularization.Tikhonov(mesh, alpha_s=1., alpha_x=1.) dmis = DataMisfit.l2_DataMisfit(survey) opt = Optimization.InexactGaussNewton(maxIter=60) invProb = InvProblem.BaseInvProblem(dmis, reg, opt) directives = [ Directives.BetaEstimate_ByEig(beta0_ratio=1e-2), Directives.TargetMisfit() ] inv = Inversion.BaseInversion(invProb, directiveList=directives) m0 = np.zeros_like(survey.mtrue) mrec = inv.run(m0) if plotIt: fig, axes = plt.subplots(1, 2, figsize=(12 * 1.2, 4 * 1.2)) for i in range(prob.G.shape[0]): axes[0].plot(prob.G[i, :]) axes[0].set_title('Columns of matrix G') axes[1].plot(M.vectorCCx, survey.mtrue, 'b-') axes[1].plot(M.vectorCCx, mrec, 'r-') axes[1].legend(('True Model', 'Recovered Model')) axes[1].set_ylim([-2, 2]) return prob, survey, mesh, mrec
def get_problem_survey(self): prob = Problem.LinearProblem(self.mesh, G=self.G) survey = Survey.LinearSurvey() survey.pair(prob) return survey, prob
def setUp(self): mesh = Mesh.TensorMesh([10, 10]) self.prob = Problem.BaseTimeProblem(mesh)
def run(N=100, plotIt=True): np.random.seed(1) std_noise = 1e-2 mesh = Mesh.TensorMesh([N]) m0 = np.ones(mesh.nC) * 1e-4 mref = np.zeros(mesh.nC) nk = 20 jk = np.linspace(1., 60., nk) p = -0.25 q = 0.25 def g(k): return (np.exp(p * jk[k] * mesh.vectorCCx) * np.cos(np.pi * q * jk[k] * mesh.vectorCCx)) G = np.empty((nk, mesh.nC)) for i in range(nk): G[i, :] = g(i) mtrue = np.zeros(mesh.nC) mtrue[mesh.vectorCCx > 0.3] = 1. mtrue[mesh.vectorCCx > 0.45] = -0.5 mtrue[mesh.vectorCCx > 0.6] = 0 prob = Problem.LinearProblem(mesh, G=G) survey = Survey.LinearSurvey() survey.pair(prob) survey.dobs = prob.fields(mtrue) + std_noise * np.random.randn(nk) wd = np.ones(nk) * std_noise # Distance weighting wr = np.sum(prob.G**2., axis=0)**0.5 wr = wr / np.max(wr) dmis = DataMisfit.l2_DataMisfit(survey) dmis.Wd = 1. / wd betaest = Directives.BetaEstimate_ByEig(beta0_ratio=1e-2) reg = Regularization.Sparse(mesh) reg.mref = mref reg.cell_weights = wr reg.mref = np.zeros(mesh.nC) opt = Optimization.ProjectedGNCG(maxIter=100, lower=-2., upper=2., maxIterLS=20, maxIterCG=10, tolCG=1e-3) invProb = InvProblem.BaseInvProblem(dmis, reg, opt) update_Jacobi = Directives.Update_lin_PreCond() # Set the IRLS directive, penalize the lowest 25 percentile of model values # Start with an l2-l2, then switch to lp-norms norms = [0., 0., 2., 2.] IRLS = Directives.Update_IRLS(norms=norms, prctile=25, maxIRLSiter=15, minGNiter=3) inv = Inversion.BaseInversion(invProb, directiveList=[IRLS, betaest, update_Jacobi]) # Run inversion mrec = inv.run(m0) print("Final misfit:" + str(invProb.dmisfit.eval(mrec))) if plotIt: fig, axes = plt.subplots(1, 2, figsize=(12 * 1.2, 4 * 1.2)) for i in range(prob.G.shape[0]): axes[0].plot(prob.G[i, :]) axes[0].set_title('Columns of matrix G') axes[1].plot(mesh.vectorCCx, mtrue, 'b-') axes[1].plot(mesh.vectorCCx, reg.l2model, 'r-') # axes[1].legend(('True Model', 'Recovered Model')) axes[1].set_ylim(-1.0, 1.25) axes[1].plot(mesh.vectorCCx, mrec, 'k-', lw=2) axes[1].legend(('True Model', 'Smooth l2-l2', 'Sparse lp: {0}, lqx: {1}'.format(*reg.norms)), fontsize=12) return prob, survey, mesh, mrec
def run(N=100, plotIt=True): np.random.seed(1) std_noise = 1e-2 mesh = Mesh.TensorMesh([N]) m0 = np.ones(mesh.nC) * 1e-4 mref = np.zeros(mesh.nC) nk = 20 jk = np.linspace(1., 60., nk) p = -0.25 q = 0.25 def g(k): return ( np.exp(p*jk[k]*mesh.vectorCCx) * np.cos(np.pi*q*jk[k]*mesh.vectorCCx) ) G = np.empty((nk, mesh.nC)) for i in range(nk): G[i, :] = g(i) mtrue = np.zeros(mesh.nC) mtrue[mesh.vectorCCx > 0.3] = 1. mtrue[mesh.vectorCCx > 0.45] = -0.5 mtrue[mesh.vectorCCx > 0.6] = 0 prob = Problem.LinearProblem(mesh, G=G) survey = Survey.LinearSurvey() survey.pair(prob) survey.dobs = prob.fields(mtrue) + std_noise * np.random.randn(nk) wd = np.ones(nk) * std_noise # Distance weighting wr = np.sum(prob.getJ(m0)**2., axis=0)**0.5 wr = wr/np.max(wr) dmis = DataMisfit.l2_DataMisfit(survey) dmis.W = 1./wd betaest = Directives.BetaEstimate_ByEig(beta0_ratio=1e0) # Creat reduced identity map idenMap = Maps.IdentityMap(nP=mesh.nC) reg = Regularization.Sparse(mesh, mapping=idenMap) reg.mref = mref reg.cell_weights = wr reg.norms = np.c_[0., 0., 2., 2.] reg.mref = np.zeros(mesh.nC) opt = Optimization.ProjectedGNCG( maxIter=100, lower=-2., upper=2., maxIterLS=20, maxIterCG=10, tolCG=1e-3 ) invProb = InvProblem.BaseInvProblem(dmis, reg, opt) update_Jacobi = Directives.UpdatePreconditioner() # Set the IRLS directive, penalize the lowest 25 percentile of model values # Start with an l2-l2, then switch to lp-norms IRLS = Directives.Update_IRLS( maxIRLSiter=40, minGNiter=1, f_min_change=1e-4) saveDict = Directives.SaveOutputEveryIteration(save_txt=False) inv = Inversion.BaseInversion( invProb, directiveList=[IRLS, betaest, update_Jacobi, saveDict] ) # Run inversion mrec = inv.run(m0) print("Final misfit:" + str(invProb.dmisfit(mrec))) if plotIt: fig, axes = plt.subplots(2, 2, figsize=(12*1.2, 8*1.2)) for i in range(prob.G.shape[0]): axes[0, 0].plot(prob.G[i, :]) axes[0, 0].set_title('Columns of matrix G') axes[0, 1].plot(mesh.vectorCCx, mtrue, 'b-') axes[0, 1].plot(mesh.vectorCCx, invProb.l2model, 'r-') # axes[0, 1].legend(('True Model', 'Recovered Model')) axes[0, 1].set_ylim(-1.0, 1.25) axes[0, 1].plot(mesh.vectorCCx, mrec, 'k-', lw=2) axes[0, 1].legend( ( 'True Model', 'Smooth l2-l2', 'Sparse norms: {0}'.format(*reg.norms) ), fontsize=12 ) axes[1, 1].plot(saveDict.phi_d, 'k', lw=2) twin = axes[1, 1].twinx() twin.plot(saveDict.phi_m, 'k--', lw=2) axes[1, 1].plot( np.r_[IRLS.iterStart, IRLS.iterStart], np.r_[0, np.max(saveDict.phi_d)], 'k:' ) axes[1, 1].text( IRLS.iterStart, 0., 'IRLS Start', va='bottom', ha='center', rotation='vertical', size=12, bbox={'facecolor': 'white'} ) axes[1, 1].set_ylabel('$\phi_d$', size=16, rotation=0) axes[1, 1].set_xlabel('Iterations', size=14) axes[1, 0].axis('off') twin.set_ylabel('$\phi_m$', size=16, rotation=0) return prob, survey, mesh, mrec