def init_model(model_dir, toml_file): # Switch to the working directory os.chdir(model_dir) assert (model_dir / toml_file).exists() # Get the relevant filenames from test case params = config.ConfigParser(toml_file, model_dir) dd = params.io.input_dir data_file = params.io.data_file # Read the input data & mesh indata = inout.InputData(params) inmesh = fice.mesh.get_mesh(params) # Create the model object return model.model(inmesh, indata, params, init_fields=False, init_vel_obs=False)
def run_momsolve(config_file): # Read run config file params = ConfigParser(config_file) log = inout.setup_logging(params) input_data = inout.InputData(params) # Get model mesh mesh = fice_mesh.get_mesh(params) # Initialize model mdl = model.model(mesh, input_data, params, init_vel_obs=False) # Get alpha from file mdl.alpha_from_data() try: Bglen = mdl.input_data.interpolate("Bglen", mdl.M) mdl.init_beta(mdl.bglen_to_beta(Bglen), False) except (AttributeError, KeyError) as e: log.warning('Using default bglen (constant)') # Forward Solve slvr = solver.ssa_solver(mdl) slvr.def_mom_eq() slvr.solve_mom_eq() # Output model variables in ParaView+Fenics friendly format outdir = params.io.output_dir h5file = HDF5File(mesh.mpi_comm(), str(Path(outdir)/'U.h5'), 'w') h5file.write(slvr.U, 'U') h5file.write(mesh, 'mesh') h5file.attributes('mesh')['periodic'] = params.mesh.periodic_bc inout.write_variable(slvr.U, params)
def run_inv(config_file): """Run the inversion part of the simulation""" # Read run config file params = ConfigParser(config_file) inout.setup_logging(params) inout.log_preamble("inverse", params) # Load the static model data (geometry, smb, etc) input_data = inout.InputData(params) # Get the model mesh mesh = fice_mesh.get_mesh(params) mdl = model.model(mesh, input_data, params) # pts_lengthscale = params.obs.pts_len mdl.gen_alpha() # Add random noise to Beta field iff we're inverting for it mdl.bglen_from_data() mdl.init_beta(mdl.bglen_to_beta(mdl.bglen), pert=False) # Next line will output the initial guess for alpha fed into the inversion # File(os.path.join(outdir,'alpha_initguess.pvd')) << mdl.alpha ##################### # Run the Inversion # ##################### slvr = solver.ssa_solver(mdl) slvr.inversion() ############################################## # Write out variables in outdir and # # diagnostics folder # ############################################# phase_name = params.inversion.phase_name phase_suffix = params.inversion.phase_suffix outdir = Path(params.io.output_dir) / phase_name / phase_suffix diag_dir = Path(params.io.diagnostics_dir) # Required for next phase (HDF5): invout_file = params.io.inversion_file phase_suffix = params.inversion.phase_suffix if len(phase_suffix) > 0: invout_file = params.io.run_name + phase_suffix + '_invout.h5' invout = HDF5File(mesh.mpi_comm(), str(outdir / invout_file), 'w') invout.parameters.add("gamma_alpha", slvr.gamma_alpha) invout.parameters.add("delta_alpha", slvr.delta_alpha) invout.parameters.add("gamma_beta", slvr.gamma_beta) invout.parameters.add("delta_beta", slvr.delta_beta) invout.parameters.add("delta_beta_gnd", slvr.delta_beta_gnd) invout.parameters.add("timestamp", str(datetime.datetime.now())) invout.write(mdl.alpha, 'alpha') invout.write(mdl.beta, 'beta') # For visualisation (XML & VTK): if params.io.write_diagnostics: inout.write_variable(slvr.U, params, outdir=diag_dir, phase_name=phase_name, phase_suffix=phase_suffix) inout.write_variable(mdl.beta, params, outdir=diag_dir, phase_name=phase_name, phase_suffix=phase_suffix) mdl.beta_bgd.rename("beta_bgd", "") inout.write_variable(mdl.beta_bgd, params, outdir=diag_dir, phase_name=phase_name, phase_suffix=phase_suffix) inout.write_variable(mdl.bed, params, outdir=diag_dir, phase_name=phase_name, phase_suffix=phase_suffix) H = project(mdl.H, mdl.M) H.rename("thick", "") inout.write_variable(H, params, outdir=diag_dir, phase_name=phase_name, phase_suffix=phase_suffix) fl_ex = project(slvr.float_conditional(H), mdl.M) inout.write_variable(fl_ex, params, name='float', outdir=diag_dir, phase_name=phase_name, phase_suffix=phase_suffix) inout.write_variable(mdl.mask_vel_M, params, name="mask_vel", outdir=diag_dir, phase_name=phase_name, phase_suffix=phase_suffix) inout.write_variable(mdl.u_obs_Q, params, outdir=diag_dir, phase_name=phase_name, phase_suffix=phase_suffix) inout.write_variable(mdl.v_obs_Q, params, outdir=diag_dir, phase_name=phase_name, phase_suffix=phase_suffix) inout.write_variable(mdl.u_std_Q, params, outdir=diag_dir, phase_name=phase_name, phase_suffix=phase_suffix) inout.write_variable(mdl.v_std_Q, params, outdir=diag_dir, phase_name=phase_name, phase_suffix=phase_suffix) U_obs = project((mdl.v_obs_Q**2 + mdl.u_obs_Q**2)**(1.0 / 2.0), mdl.M) U_obs.rename("uv_obs", "") inout.write_variable(U_obs, params, name="uv_obs", outdir=diag_dir, phase_name=phase_name, phase_suffix=phase_suffix) inout.write_variable(mdl.alpha, params, outdir=diag_dir, phase_name=phase_name, phase_suffix=phase_suffix) Bglen = project(slvr.beta_to_bglen(slvr.beta), mdl.M) Bglen.rename("Bglen", "") inout.write_variable(Bglen, params, outdir=diag_dir, phase_name=phase_name, phase_suffix=phase_suffix) inout.write_variable(slvr.bmelt, params, name="bmelt", outdir=diag_dir, phase_name=phase_name, phase_suffix=phase_suffix) inout.write_variable(slvr.smb, params, name="smb", outdir=diag_dir, phase_name=phase_name, phase_suffix=phase_suffix) inout.write_variable(mdl.surf, params, name="surf", outdir=diag_dir, phase_name=phase_name, phase_suffix=phase_suffix) return mdl
def run_invsigma(config_file): """Compute control sigma values from eigendecomposition""" comm = MPI.comm_world rank = comm.rank # Read run config file params = ConfigParser(config_file) # Setup logging log = inout.setup_logging(params) inout.log_preamble("inv sigma", params) outdir = params.io.output_dir diags_dir = params.io.diagnostics_dir # Load the static model data (geometry, smb, etc) input_data = inout.InputData(params) # Eigen decomposition params phase_suffix_e = params.eigendec.phase_suffix eigendir = Path(outdir)/params.eigendec.phase_name/phase_suffix_e lamfile = params.io.eigenvalue_file vecfile = params.io.eigenvecs_file threshlam = params.eigendec.eigenvalue_thresh if len(phase_suffix_e) > 0: lamfile = params.io.run_name + phase_suffix_e + '_eigvals.p' vecfile = params.io.run_name + phase_suffix_e + '_vr.h5' # Get model mesh mesh = fice_mesh.get_mesh(params) # Define the model (only need alpha & beta though) mdl = model.model(mesh, input_data, params, init_fields=True) # Load alpha/beta fields mdl.alpha_from_inversion() mdl.beta_from_inversion() mdl.bglen_from_data(mask_only=True) # Setup our solver object slvr = solver.ssa_solver(mdl, mixed_space=params.inversion.dual) cntrl = slvr.get_control()[0] space = slvr.get_control_space() # sigma_old, sigma_prior_old = [Function(space) for i in range(3)] x, y, z = [Function(space) for i in range(3)] # Regularization operator using inversion delta/gamma values Prior = mdl.get_prior() reg_op = Prior(slvr, space) # Load the eigenvalues with open(os.path.join(eigendir, lamfile), 'rb') as ff: eigendata = pickle.load(ff) lam = eigendata[0].real.astype(np.float64) nlam = len(lam) # Check if eigendecomposition successfully produced num_eig # or if some are NaN if np.any(np.isnan(lam)): nlam = np.argwhere(np.isnan(lam))[0][0] lam = lam[:nlam] # Read in the eigenvectors and check they are normalised # w.r.t. the prior (i.e. the B matrix in our GHEP) eps = params.constants.float_eps W = [] with HDF5File(comm, os.path.join(eigendir, vecfile), 'r') as hdf5data: for i in range(nlam): w = Function(space) hdf5data.read(w, f'v/vector_{i}') print(f"Getting eigenvector {i} of {nlam}") # # Test norm in prior == 1.0 # reg_op.action(w.vector(), y.vector()) # norm_in_prior = w.vector().inner(y.vector()) # assert (abs(norm_in_prior - 1.0) < eps) W.append(w) # Which eigenvalues are larger than our threshold? pind = np.flatnonzero(lam > threshlam) lam = lam[pind] W = [W[i] for i in pind] # this is a diagonal matrix but we only ever address it element-wise # bit of a waste of space. D = np.diag(lam / (lam + 1)) # TODO make this a model method cntrl_names = [] if params.inversion.alpha_active: cntrl_names.append("alpha") if params.inversion.beta_active: cntrl_names.append("beta") dual = params.inversion.dual ############################################ # Isaac Eq. 20 # P2 = prior # P1 = WDW # Note - don't think we're considering the cross terms # in the posterior covariance. # Generate patches of cells for computing invsigma clust_fun, npatches = patch_fun(mesh, params) # Create standard & mixed DG spaces dg_space = FunctionSpace(mesh, 'DG', 0) if(dual): dg_el = FiniteElement("DG", mesh.ufl_cell(), 0) mixedEl = dg_el * dg_el dg_out_space = FunctionSpace(mesh, mixedEl) else: dg_out_space = dg_space sigmas = [Function(dg_space) for i in range(len(cntrl_names))] sigma_priors = [Function(dg_space) for i in range(len(cntrl_names))] indic_1 = Function(dg_space) indic = Function(dg_out_space) test = TestFunction(space) neg_flag = 0 for i in range(npatches): print(f"Working on patch {i+1} of {npatches}") # Create DG indicator function for patch i indic_1.vector()[:] = (clust_fun.vector()[:] == i).astype(int) indic_1.vector().apply("insert") # Loop alpha & beta as appropriate for j in range(len(cntrl_names)): if(dual): indic.vector()[:] = 0.0 indic.vector().apply("insert") assign(indic.sub(j), indic_1) else: assign(indic, indic_1) clust_lump = assemble(inner(indic, test)*dx) patch_area = clust_lump.sum() # Duplicate work here... clust_lump /= patch_area # Prior variance reg_op.inv_action(clust_lump, x.vector()) cov_prior = x.vector().inner(clust_lump) # P_i^T W D W^T P_i # P_i is clust_lump # P_i^T has dims [1 x M], W has dims [M x N] # where N is num eigs & M is size of ev function space PiW = np.asarray([clust_lump.inner(w.vector()) for w in W]) # PiW & PiWD are [1 x N] PiWD = PiW * D.diagonal() # PiWDWPi, [1 x N] * [N x 1] PiWDWPi = np.inner(PiWD, PiW) # np.inner OK here because already parallel reduced cov_reduction = PiWDWPi cov_post = cov_prior - cov_reduction if cov_post < 0: log.warning(f'WARNING: Negative Sigma: {cov_post}') log.warning('Setting as Zero and Continuing.') neg_flag = 1 continue # NB: "+=" here but each DOF will only be contributed to *once* # Essentially we are constructing the sigmas functions from # non-overlapping patches. sigmas[j].vector()[:] += indic_1.vector()[:] * np.sqrt(cov_post) sigmas[j].vector().apply("insert") sigma_priors[j].vector()[:] += indic_1.vector()[:] * np.sqrt(cov_prior) sigma_priors[j].vector().apply("insert") if neg_flag: log.warning('Negative value(s) of sigma encountered.' 'Examine the range of eigenvalues and check if ' 'the threshlam paramater is set appropriately.') # # Previous approach for comparison # ##################################### # # Isaac Eq. 20 # # P2 = prior # # P1 = WDW # # Note - don't think we're considering the cross terms # # in the posterior covariance. # # TODO - this isn't particularly well parallelised - can it be improved? # neg_flag = 0 # for j in range(space.dim()): # # Who owns this DOF? # own_idx = y.vector().owns_index(j) # ownership = np.where(comm.allgather(own_idx))[0] # assert len(ownership) == 1 # idx_root = ownership[0] # # Prior (P2) # y.vector().zero() # y.vector().vec().setValue(j, 1.0) # y.vector().apply('insert') # reg_op.inv_action(y.vector(), x.vector()) # P2 = x # # WDW (P1) ~ lam * V_r**2 # tmp2 = np.asarray([D[i, i] * w.vector().vec().getValue(j) for i, w in enumerate(W)]) # tmp2 = comm.bcast(tmp2, root=idx_root) # P1 = Function(space) # for tmp, w in zip(tmp2, W): # P1.vector().axpy(tmp, w.vector()) # P_vec = P2.vector() - P1.vector() # # Extract jth component & save # # TODO why does this need to be communicated here? surely owning proc # # just inserts? # dprod = comm.bcast(P_vec.vec().getValue(j), root=idx_root) # dprod_prior = comm.bcast(P2.vector().vec().getValue(j), root=idx_root) # if dprod < 0: # log.warning(f'WARNING: Negative Sigma: {dprod}') # log.warning('Setting as Zero and Continuing.') # neg_flag = 1 # continue # sigma_old.vector().vec().setValue(j, np.sqrt(dprod)) # sigma_prior_old.vector().vec().setValue(j, np.sqrt(dprod_prior)) # sigma_old.vector().apply("insert") # sigma_prior_old.vector().apply("insert") # For testing - whole thing at once: # wdw = (np.matrix(W) * np.matrix(D) * np.matrix(W).T) # wdw[:,0] == P1 for j = 0 # if neg_flag: # log.warning('Negative value(s) of sigma encountered.' # 'Examine the range of eigenvalues and check if ' # 'the threshlam paramater is set appropriately.') # Write sigma & sigma_prior to files # sigma_var_name = "_".join((cntrl.name(), "sigma")) # sigma_prior_var_name = "_".join((cntrl.name(), "sigma_prior")) # sigma_old.rename(sigma_var_name, "") # sigma_prior_old.rename(sigma_prior_var_name, "") # inout.write_variable(sigma_old, params, # name=sigma_var_name+"_old") # inout.write_variable(sigma_prior_old, params, # name=sigma_prior_var_name+"_old") for i, name in enumerate(cntrl_names): sigmas[i].rename("sigma_"+name, "") sigma_priors[i].rename("sigma_prior_"+name, "") phase_suffix_sigma = params.inv_sigma.phase_suffix inout.write_variable(sigmas[i], params, outdir=outdir, phase_name=params.inv_sigma.phase_name, phase_suffix=phase_suffix_sigma) inout.write_variable(sigma_priors[i], params, outdir=outdir, phase_name=params.inv_sigma.phase_name, phase_suffix=phase_suffix_sigma) mdl.cntrl_sigma = sigmas mdl.cntrl_sigma_prior = sigma_priors return mdl
def run_forward(config_file): # Read run config file params = ConfigParser(config_file) log = inout.setup_logging(params) inout.log_preamble("forward", params) outdir = params.io.output_dir diag_dir = params.io.diagnostics_dir phase_name = params.time.phase_name # Load the static model data (geometry, smb, etc) input_data = inout.InputData(params) # Get model mesh mesh = fice_mesh.get_mesh(params) # Define the model mdl = model.model(mesh, input_data, params) mdl.alpha_from_inversion() mdl.beta_from_inversion() # Solve slvr = solver.ssa_solver(mdl, mixed_space=params.inversion.dual) slvr.save_ts_zero() cntrl = slvr.get_control() qoi_func = slvr.get_qoi_func() # TODO here - cntrl now returns a list - so compute_gradient returns a list of tuples # Run the forward model Q = slvr.timestep(adjoint_flag=1, qoi_func=qoi_func) # Run the adjoint model, computing gradient of Qoi w.r.t cntrl dQ_ts = compute_gradient(Q, cntrl) # Isaac 27 # Output model variables in ParaView+Fenics friendly format # Output QOI & DQOI (needed for next steps) inout.write_qval(slvr.Qval_ts, params) inout.write_dqval(dQ_ts, [var.name() for var in cntrl], params) # Output final velocity, surface & thickness (visualisation) inout.write_variable(slvr.U, params, name="U_fwd", outdir=diag_dir, phase_name=phase_name, phase_suffix=params.time.phase_suffix) inout.write_variable(mdl.surf, params, name="surf_fwd", outdir=diag_dir, phase_name=phase_name, phase_suffix=params.time.phase_suffix) H = project(mdl.H, mdl.Q) inout.write_variable(H, params, name="H_fwd", outdir=diag_dir, phase_name=phase_name, phase_suffix=params.time.phase_suffix) return mdl
def run_errorprop(config_file): # Read run config file params = ConfigParser(config_file) log = inout.setup_logging(params) inout.log_preamble("errorprop", params) outdir = params.io.output_dir # Load the static model data (geometry, smb, etc) input_data = inout.InputData(params) lamfile = params.io.eigenvalue_file vecfile = params.io.eigenvecs_file threshlam = params.eigendec.eigenvalue_thresh dqoi_h5file = params.io.dqoi_h5file # Get model mesh mesh = fice_mesh.get_mesh(params) # Define the model mdl = model.model(mesh, input_data, params) # Load alpha/beta fields mdl.alpha_from_inversion() mdl.beta_from_inversion() # Regularization operator using inversion delta/gamma values # TODO - this won't handle dual inversion case if params.inversion.alpha_active: delta = params.inversion.delta_alpha gamma = params.inversion.gamma_alpha cntrl = mdl.alpha elif params.inversion.beta_active: delta = params.inversion.delta_beta gamma = params.inversion.gamma_beta cntrl = mdl.beta if params.inversion.alpha_active and params.inversion.beta_active: log.warning( "Dual inversion but error propagation isn't implemented yet!" "Doing alpha only!") reg_op = prior.laplacian(delta, gamma, cntrl.function_space()) space = cntrl.function_space() x, y, z = [Function(space) for i in range(3)] # Loads eigenvalues from file with open(os.path.join(outdir, lamfile), 'rb') as ff: eigendata = pickle.load(ff) lam = eigendata[0].real.astype(np.float64) nlam = len(lam) # and eigenvectors from .h5 file eps = params.constants.float_eps W = [] with HDF5File(MPI.comm_world, os.path.join(outdir, vecfile), 'r') as hdf5data: for i in range(nlam): w = Function(space) hdf5data.read(w, f'v/vector_{i}') # Test norm in prior == 1.0 reg_op.action(w.vector(), y.vector()) norm_in_prior = w.vector().inner(y.vector()) assert (abs(norm_in_prior - 1.0) < eps) W.append(w) # take only the largest eigenvalues pind = np.flatnonzero(lam > threshlam) lam = lam[pind] W = [W[i] for i in pind] D = np.diag(lam / (lam + 1)) # D_r Isaac 20 # File containing dQoi_dCntrl (i.e. Jacobian of parameter to observable (Qoi)) hdf5data = HDF5File(MPI.comm_world, os.path.join(outdir, dqoi_h5file), 'r') dQ_cntrl = Function(space) run_length = params.time.run_length num_sens = params.time.num_sens t_sens = np.flip(np.linspace(run_length, 0, num_sens)) sigma = np.zeros(num_sens) sigma_prior = np.zeros(num_sens) for j in range(num_sens): hdf5data.read(dQ_cntrl, f'dQd{cntrl.name()}/vector_{j}') # TODO - is a mass matrix operation required here? # qd_cntrl - should be gradients tmp1 = np.asarray([w.vector().inner(dQ_cntrl.vector()) for w in W]) tmp2 = np.dot(D, tmp1) P1 = Function(space) for tmp, w in zip(tmp2, W): P1.vector().axpy(tmp, w.vector()) reg_op.inv_action(dQ_cntrl.vector(), x.vector()) P2 = x # .vector().get_local() P_vec = P2.vector() - P1.vector() variance = P_vec.inner(dQ_cntrl.vector()) sigma[j] = np.sqrt(variance) # Prior only variance_prior = P2.vector().inner(dQ_cntrl.vector()) sigma_prior[j] = np.sqrt(variance_prior) # Test that eigenvectors are prior inverse orthogonal # y.vector().set_local(W[:,398]) # y.vector().apply('insert') # reg_op.action(y.vector(), x.vector()) # #mass.mult(x.vector(),z.vector()) # q = np.dot(y.vector().get_local(),x.vector().get_local()) # Output model variables in ParaView+Fenics friendly format sigma_file = params.io.sigma_file sigma_prior_file = params.io.sigma_prior_file pickle.dump([sigma, t_sens], open(os.path.join(outdir, sigma_file), "wb")) pickle.dump([sigma_prior, t_sens], open(os.path.join(outdir, sigma_prior_file), "wb")) # This simplifies testing - is it OK? Should we hold all data in the solver object? mdl.Q_sigma = sigma mdl.Q_sigma_prior = sigma_prior mdl.t_sens = t_sens return mdl
def run_invsigma(config_file): """Compute control sigma values from eigendecomposition""" comm = MPI.comm_world rank = comm.rank # Read run config file params = ConfigParser(config_file) # Setup logging log = inout.setup_logging(params) inout.log_preamble("inv sigma", params) outdir = params.io.output_dir # Load the static model data (geometry, smb, etc) input_data = inout.InputData(params) eigendir = outdir lamfile = params.io.eigenvalue_file vecfile = params.io.eigenvecs_file threshlam = params.eigendec.eigenvalue_thresh # Get model mesh mesh = fice_mesh.get_mesh(params) # Define the model (only need alpha & beta though) mdl = model.model(mesh, input_data, params, init_fields=False) # Load alpha/beta fields mdl.alpha_from_inversion() mdl.beta_from_inversion() # Regularization operator using inversion delta/gamma values # TODO - this won't handle dual inversion case if params.inversion.alpha_active: delta = params.inversion.delta_alpha gamma = params.inversion.gamma_alpha cntrl = mdl.alpha elif params.inversion.beta_active: delta = params.inversion.delta_beta gamma = params.inversion.gamma_beta cntrl = mdl.beta space = cntrl.function_space() sigma, sigma_prior, x, y, z = [Function(space) for i in range(5)] reg_op = prior.laplacian(delta, gamma, space) # Load the eigenvalues with open(os.path.join(eigendir, lamfile), 'rb') as ff: eigendata = pickle.load(ff) lam = eigendata[0].real.astype(np.float64) nlam = len(lam) # Read in the eigenvectors and check they are normalised # w.r.t. the prior (i.e. the B matrix in our GHEP) eps = params.constants.float_eps W = [] with HDF5File(comm, os.path.join(eigendir, vecfile), 'r') as hdf5data: for i in range(nlam): w = Function(space) hdf5data.read(w, f'v/vector_{i}') # Test norm in prior == 1.0 reg_op.action(w.vector(), y.vector()) norm_in_prior = w.vector().inner(y.vector()) assert (abs(norm_in_prior - 1.0) < eps) W.append(w) # Which eigenvalues are larger than our threshold? pind = np.flatnonzero(lam > threshlam) lam = lam[pind] W = [W[i] for i in pind] D = np.diag(lam / (lam + 1)) neg_flag = 0 # Isaac Eq. 20 # P2 = prior # P1 = WDW # Note - don't think we're considering the cross terms # in the posterior covariance. # TODO - this isn't particularly well parallelised - can it be improved? for j in range(space.dim()): # Who owns this index? own_idx = y.vector().owns_index(j) ownership = np.where(comm.allgather(own_idx))[0] assert len(ownership) == 1 idx_root = ownership[0] y.vector().zero() y.vector().vec().setValue(j, 1.0) y.vector().apply('insert') tmp2 = np.asarray( [D[i, i] * w.vector().vec().getValue(j) for i, w in enumerate(W)]) tmp2 = comm.bcast(tmp2, root=idx_root) P1 = Function(space) for tmp, w in zip(tmp2, W): P1.vector().axpy(tmp, w.vector()) reg_op.inv_action(y.vector(), x.vector()) P2 = x P_vec = P2.vector() - P1.vector() dprod = comm.bcast(P_vec.vec().getValue(j), root=idx_root) dprod_prior = comm.bcast(P2.vector().vec().getValue(j), root=idx_root) if dprod < 0: log.warning(f'WARNING: Negative Sigma: {dprod}') log.warning('Setting as Zero and Continuing.') neg_flag = 1 continue sigma.vector().vec().setValue(j, np.sqrt(dprod)) sigma_prior.vector().vec().setValue(j, np.sqrt(dprod_prior)) sigma.vector().apply("insert") sigma_prior.vector().apply("insert") # For testing - whole thing at once: # wdw = (np.matrix(W) * np.matrix(D) * np.matrix(W).T) # wdw[:,0] == P1 for j = 0 if neg_flag: log.warning('Negative value(s) of sigma encountered.' 'Examine the range of eigenvalues and check if ' 'the threshlam paramater is set appropriately.') # Write sigma & sigma_prior to files sigma_var_name = "_".join((cntrl.name(), "sigma")) sigma_prior_var_name = "_".join((cntrl.name(), "sigma_prior")) sigma.rename(sigma_var_name, "") sigma_prior.rename(sigma_prior_var_name, "") inout.write_variable(sigma, params, name=sigma_var_name) inout.write_variable(sigma_prior, params, name=sigma_prior_var_name) mdl.cntrl_sigma = sigma mdl.cntrl_sigma_prior = sigma_prior return mdl
def run_errorprop(config_file): # Read run config file params = ConfigParser(config_file) log = inout.setup_logging(params) inout.log_preamble("errorprop", params) outdir = params.io.output_dir # Load the static model data (geometry, smb, etc) input_data = inout.InputData(params) #Eigen value params phase_eigen = params.eigendec.phase_name phase_suffix_e = params.eigendec.phase_suffix lamfile = params.io.eigenvalue_file vecfile = params.io.eigenvecs_file threshlam = params.eigendec.eigenvalue_thresh # Qoi forward params phase_time = params.time.phase_name phase_suffix_qoi = params.time.phase_suffix dqoi_h5file = params.io.dqoi_h5file if len(phase_suffix_e) > 0: lamfile = params.io.run_name + phase_suffix_e + '_eigvals.p' vecfile = params.io.run_name + phase_suffix_e + '_vr.h5' if len(phase_suffix_qoi) > 0: dqoi_h5file = params.io.run_name + phase_suffix_qoi + '_dQ_ts.h5' # Get model mesh mesh = fice_mesh.get_mesh(params) # Define the model mdl = model.model(mesh, input_data, params) # Load alpha/beta fields mdl.alpha_from_inversion() mdl.beta_from_inversion() mdl.bglen_from_data(mask_only=True) # Setup our solver object slvr = solver.ssa_solver(mdl, mixed_space=params.inversion.dual) cntrl = slvr.get_control()[0] space = slvr.get_control_space() # Regularization operator using inversion delta/gamma values Prior = mdl.get_prior() reg_op = Prior(slvr, space) x, y, z = [Function(space) for i in range(3)] # Loads eigenvalues from file outdir_e = Path(outdir) / phase_eigen / phase_suffix_e with open(outdir_e / lamfile, 'rb') as ff: eigendata = pickle.load(ff) lam = eigendata[0].real.astype(np.float64) nlam = len(lam) # Check if eigendecomposition successfully produced num_eig # or if some are NaN if np.any(np.isnan(lam)): nlam = np.argwhere(np.isnan(lam))[0][0] lam = lam[:nlam] # and eigenvectors from .h5 file eps = params.constants.float_eps W = [] with HDF5File(MPI.comm_world, str(outdir_e / vecfile), 'r') as hdf5data: for i in range(nlam): w = Function(space) hdf5data.read(w, f'v/vector_{i}') # Test norm in prior == 1.0 reg_op.action(w.vector(), y.vector()) norm_in_prior = w.vector().inner(y.vector()) assert (abs(norm_in_prior - 1.0) < eps) W.append(w) # take only the largest eigenvalues pind = np.flatnonzero(lam > threshlam) lam = lam[pind] nlam = len(lam) W = [W[i] for i in pind] D = np.diag(lam / (lam + 1)) # D_r Isaac 20 # File containing dQoi_dCntrl (i.e. Jacobian of parameter to observable (Qoi)) outdir_qoi = Path(outdir) / phase_time / phase_suffix_qoi hdf5data = HDF5File(MPI.comm_world, str(outdir_qoi / dqoi_h5file), 'r') dQ_cntrl = Function(space) run_length = params.time.run_length num_sens = params.time.num_sens t_sens = np.flip(np.linspace(run_length, 0, num_sens)) sigma = np.zeros(num_sens) sigma_prior = np.zeros(num_sens) for j in range(num_sens): hdf5data.read(dQ_cntrl, f'dQd{cntrl.name()}/vector_{j}') # TODO - is a mass matrix operation required here? # qd_cntrl - should be gradients tmp1 = np.asarray([w.vector().inner(dQ_cntrl.vector()) for w in W]) tmp2 = np.dot(D, tmp1) P1 = Function(space) for tmp, w in zip(tmp2, W): P1.vector().axpy(tmp, w.vector()) reg_op.inv_action(dQ_cntrl.vector(), x.vector()) P2 = x # .vector().get_local() P_vec = P2.vector() - P1.vector() variance = P_vec.inner(dQ_cntrl.vector()) sigma[j] = np.sqrt(variance) # Prior only variance_prior = P2.vector().inner(dQ_cntrl.vector()) sigma_prior[j] = np.sqrt(variance_prior) # Look at the last sampled time and check how sigma QoI converges # with addition of more eigenvectors sigma_conv = [] sigma_steps = [] P1 = Function(space) # How many steps? conv_res = 100 conv_int = int(np.ceil(nlam / conv_res)) for i in range(0, nlam, conv_int): # Reuse tmp1/tmp2 from above because its the last sens for j in range(i, min(i + conv_int, nlam)): P1.vector().axpy(tmp2[j], W[j].vector()) P_vec = P2.vector() - P1.vector() variance = P_vec.inner(dQ_cntrl.vector()) sigma_conv.append(np.sqrt(variance)) sigma_steps.append(min(i + conv_int, nlam)) # Save plots in diagnostics phase_err = params.error_prop.phase_name phase_suffix_err = params.error_prop.phase_suffix diag_dir = Path(params.io.diagnostics_dir) / phase_err / phase_suffix_err outdir_err = Path(params.io.output_dir) / phase_err / phase_suffix_err # if(MPI.comm_world.rank == 0): plt.semilogy(sigma_steps, sigma_conv) plt.title("Convergence of sigmaQoI") plt.ylabel("sigma QoI") plt.xlabel("Num eig") plt.savefig( os.path.join( str(diag_dir), "_".join( (params.io.run_name, phase_suffix_err + "sigmaQoI_conv.pdf")))) plt.close() sigmaqoi_file = os.path.join( str(outdir_err), "_".join( (params.io.run_name, phase_suffix_err + "sigma_qoi_convergence.p"))) with open(sigmaqoi_file, 'wb') as pfile: pickle.dump([sigma_steps, sigma_conv], pfile) # Test that eigenvectors are prior inverse orthogonal # y.vector().set_local(W[:,398]) # y.vector().apply('insert') # reg_op.action(y.vector(), x.vector()) # #mass.mult(x.vector(),z.vector()) # q = np.dot(y.vector().get_local(),x.vector().get_local()) # Output model variables in ParaView+Fenics friendly format sigma_file = params.io.sigma_file sigma_prior_file = params.io.sigma_prior_file if len(phase_suffix_err) > 0: sigma_file = params.io.run_name + phase_suffix_err + '_sigma.p' sigma_prior_file = params.io.run_name + phase_suffix_err + '_sigma_prior.p' with open(os.path.join(outdir_err, sigma_file), "wb") as sigfile: pickle.dump([sigma, t_sens], sigfile) with open(os.path.join(outdir_err, sigma_prior_file), "wb") as sigpfile: pickle.dump([sigma_prior, t_sens], sigpfile) # This simplifies testing - is it OK? Should we hold all data in the solver object? mdl.Q_sigma = sigma mdl.Q_sigma_prior = sigma_prior mdl.t_sens = t_sens return mdl
def run_forward(config_file): #Read run config file params = ConfigParser(config_file) log = inout.setup_logging(params) inout.log_preamble("forward", params) outdir = params.io.output_dir # Load the static model data (geometry, smb, etc) input_data = inout.InputData(params) # Get model mesh mesh = fice_mesh.get_mesh(params) # Define the model mdl = model.model(mesh, input_data, params) mdl.alpha_from_inversion() mdl.beta_from_inversion() # Solve slvr = solver.ssa_solver(mdl) slvr.save_ts_zero() cntrl = slvr.get_control() qoi_func = slvr.get_qoi_func() #TODO here - cntrl now returns a list - so compute_gradient returns a list of tuples #Run the forward model Q = slvr.timestep(adjoint_flag=1, qoi_func=qoi_func) #Run the adjoint model, computing gradient of Qoi w.r.t cntrl dQ_ts = compute_gradient(Q, cntrl) #Isaac 27 #Uncomment for Taylor Verification, Comment above two lines # param['num_sens'] = 1 # J = slvr.timestep(adjoint_flag=1, cst_func=slvr.comp_Q_vaf) # dJ = compute_gradient(J, slvr.alpha) # # # def forward_ts(alpha_val=None): # slvr.reset_ts_zero() # if alpha_val: # slvr.alpha = alpha_val # return slvr.timestep(adjoint_flag=1, cst_func=slvr.comp_Q_vaf) # # # min_order = taylor_test(lambda alpha : forward_ts(alpha_val = alpha), slvr.alpha, # J_val = J.value(), dJ = dJ, seed = 1e-2, size = 6) # sys.exit(os.EX_OK) # Output model variables in ParaView+Fenics friendly format outdir = params.io.output_dir # Output QOI & DQOI (needed for next steps) inout.write_qval(slvr.Qval_ts, params) inout.write_dqval(dQ_ts, [var.name() for var in cntrl], params) # Output final velocity, surface & thickness (visualisation) inout.write_variable(slvr.U, params, name="U_fwd") inout.write_variable(mdl.surf, params, name="surf_fwd") H = project(mdl.H, mdl.Q) inout.write_variable(H, params, name="H_fwd") return mdl
def run_eigendec(config_file): """ Run the eigendecomposition phase of the model. 1. Define the model domain & fields 2. Runs the forward model w/ alpha/beta from run_inv 3. Computes the Hessian of the *misfit* cost functional (J) 4. Performs the generalized eigendecomposition with A = H_mis, B = prior_action """ # Read run config file params = ConfigParser(config_file) log = inout.setup_logging(params) inout.log_preamble("eigendecomp", params) dd = params.io.input_dir outdir = params.io.output_dir # Load the static model data (geometry, smb, etc) input_data = inout.InputData(params) # Get model mesh mesh = fice_mesh.get_mesh(params) # Define the model mdl = model.model(mesh, input_data, params) # Load alpha/beta fields mdl.alpha_from_inversion() mdl.beta_from_inversion() # Setup our solver object slvr = solver.ssa_solver(mdl) # TODO generalise - get_control returns a list cntrl = slvr.get_control()[0] space = cntrl.function_space() msft_flag = params.eigendec.misfit_only if msft_flag: slvr.zero_inv_params() # Hessian Action slvr.set_hessian_action(cntrl) # Mass matrix solver xg, xb = Function(space), Function(space) # test, trial = TestFunction(space), TrialFunction(space) # mass = assemble(inner(test, trial) * slvr.dx) # mass_solver = KrylovSolver("cg", "sor") # mass_solver.parameters.update({"absolute_tolerance": 1.0e-32, # "relative_tolerance": 1.0e-14}) # mass_solver.set_operator(mass) # Regularization operator using inversion delta/gamma values # TODO - this won't handle dual inversion case if params.inversion.alpha_active: delta = params.inversion.delta_alpha gamma = params.inversion.gamma_alpha elif params.inversion.beta_active: delta = params.inversion.delta_beta gamma = params.inversion.gamma_beta reg_op = prior.laplacian(delta, gamma, space) # Uncomment to get low-level SLEPc/PETSc output # set_log_level(10) @count_calls() # @timer def ghep_action(x): """Hessian action w/o preconditioning""" _, _, ddJ_val = slvr.ddJ.action(cntrl, x) # reg_op.inv_action(ddJ_val.vector(), xg.vector()) <- gnhep_prior return function_get_values(ddJ_val) @count_calls() def prior_action(x): """Define the action of the B matrix (prior)""" reg_op.action(x.vector(), xg.vector()) return function_get_values(xg) def prior_approx_action(x): """Only used for checking B' orthonormality""" reg_op.approx_action(x.vector(), xg.vector()) return function_get_values(xg) def slepc_config_callback(config): log.info("Got to the callback") # KSP corresponds to B-matrix inversion # Set it to precondition only because we # supply the inverse in LaplacianPC ksp = config.getST().getKSP() ksp.setType(PETSc.KSP.Type.PREONLY) pc = ksp.getPC() pc.setType(PETSc.PC.Type.PYTHON) pc.setPythonContext(prior.LaplacianPC(reg_op)) # A_matrix already defined so just grab it A_matrix, _ = config.getOperators() (n, N), (n_col, N_col) = A_matrix.getSizes() assert n == n_col assert N == N_col del n_col, N_col comm = A_matrix.getComm() B_matrix = PETSc.Mat().createPython(((n, N), (n, N)), PythonMatrix(prior_action, space), comm=comm) B_matrix.setUp() config.view() # TODO - should this go to log? config.setOperators(A_matrix, B_matrix) # opts = {'prior': gnhep_prior_action, 'mass': gnhep_mass_action} # gnhep_func = opts[params.eigendec.precondition_by] num_eig = params.eigendec.num_eig n_iter = params.eigendec.power_iter # <- not used yet # Hessian eigendecomposition using SLEPSc eig_algo = params.eigendec.eig_algo if eig_algo == "slepc": # Eigendecomposition lam, vr = eigendecompose( space, ghep_action, tolerance=1.0e-10, N_eigenvalues=num_eig, problem_type=SLEPc.EPS.ProblemType.GHEP, # solver_type=SLEPc.EPS.Type.ARNOLDI, configure=slepc_config_callback) # Check orthonormality of EVs if num_eig is not None and num_eig < 100: # Check for B (not B') orthogonality & normalisation for i in range(num_eig): reg_op.action(vr[i].vector(), xg.vector()) norm = xg.vector().inner(Vector(vr[i].vector()))**0.5 print("EV %s norm %s" % (i, norm)) for i in range(num_eig): reg_op.action(vr[i].vector(), xg.vector()) for j in range(i + 1, num_eig): inn = xg.vector().inner(Vector(vr[j].vector())) print("EV %s %s inner %s" % (i, j, inn)) # Uses extreme amounts of disk space; suitable for ismipc only # #Save eigenfunctions # vtkfile = File(os.path.join(outdir,'vr.pvd')) # for v in vr: # v.rename('v', v.label()) # vtkfile << v # # vtkfile = File(os.path.join(outdir,'vi.pvd')) # for v in vi: # v.rename('v', v.label()) # vtkfile << v ev_file = params.io.eigenvecs_file with HDF5File(slvr.mesh.mpi_comm(), os.path.join(outdir, ev_file), 'w') as hdf5file: for i, v in enumerate(vr): hdf5file.write(v, 'v', i) hdf5file.parameters.add("num_eig", num_eig) hdf5file.parameters.add("eig_algo", eig_algo) hdf5file.parameters.add("timestamp", str(datetime.datetime.now())) else: raise NotImplementedError slvr.eigenvals = lam slvr.eigenfuncs = vr # Save eigenvals and some associated info - TODO HDF5File? fileout = params.io.eigenvalue_file pfile = open(os.path.join(outdir, fileout), "wb") pickle.dump([lam, num_eig, n_iter, eig_algo, msft_flag, outdir, dd], pfile) pfile.close() # Plot of eigenvals lamr = lam.real lpos = np.argwhere(lamr > 0) lneg = np.argwhere(lamr < 0) lind = np.arange(0, len(lamr)) plt.semilogy(lind[lpos], lamr[lpos], '.') plt.semilogy(lind[lneg], np.abs(lamr[lneg]), '.') plt.savefig(os.path.join(outdir, 'lambda.pdf')) # Note - for now this does nothing, but eventually if the whole series # of runs were done without re-initializing solver, it'd be important to # put the inversion params back if msft_flag: slvr.set_inv_params() return mdl
def main(config_file): print("===WARNING=== - this code has not been fully adapted to new config format") print("Consult TODOs in run_balancemeltrates.py") init_yr = 5 #TODO - where in the config? #Read run config file params = ConfigParser(config_file) log = inout.setup_logging(params) inout.log_preamble("balance meltrates", params) dd = params.io.input_dir outdir = params.io.output_dir run_length = params.time.run_length n_steps = params.time.total_steps assert init_yr < run_length # #Load Data # param = pickle.load( open( os.path.join(dd,'param.p'), "rb" ) ) # param['outdir'] = outdir # param['sliding_law'] = sl # param['picard_params'] = {"nonlinear_solver":"newton", # "newton_solver":{"linear_solver":"umfpack", # "maximum_iterations":25, # "absolute_tolerance":1.0e-3, # "relative_tolerance":5.0e-2, # "convergence_criterion":"incremental", # "error_on_nonconvergence":False, # "lu_solver":{"same_nonzero_pattern":False, "symmetric":False, "reuse_factorization":False}}} #Load Data mesh = Mesh(os.path.join(outdir, 'mesh.xml')) M = FunctionSpace(mesh, 'DG', 0) #TODO - what's the logic here?: Q = FunctionSpace(mesh, 'Lagrange', 1)# if os.path.isfile(os.path.join(dd,'param.p')) else M mask = Function(M,os.path.join(outdir,'mask.xml')) if os.path.isfile(os.path.join(outdir,'data_mesh.xml')): data_mesh = Mesh(os.path.join(outdir,'data_mesh.xml')) Mdata = FunctionSpace(data_mesh, 'DG', 0) data_mask = Function(Mdata, os.path.join(outdir,'data_mask.xml')) else: data_mesh = mesh data_mask = mask if not params.mesh.periodic_bc: Qp = Q V = VectorFunctionSpace(mesh,'Lagrange',1,dim=2) else: Qp = fice_mesh.get_periodic_space(params, mesh, dim=1) V = fice_mesh.get_periodic_space(params, mesh, dim=2) #Load fields U = Function(V,os.path.join(outdir,'U.xml')) alpha = Function(Qp,os.path.join(outdir,'alpha.xml')) beta = Function(Qp,os.path.join(outdir,'beta.xml')) bed = Function(Q,os.path.join(outdir,'bed.xml')) smb = Function(M,os.path.join(outdir, 'smb.xml')) thick = Function(M,os.path.join(outdir,'thick.xml')) mask_vel = Function(M,os.path.join(outdir,'mask_vel.xml')) u_obs = Function(M,os.path.join(outdir,'u_obs.xml')) v_obs = Function(M,os.path.join(outdir,'v_obs.xml')) u_std = Function(M,os.path.join(outdir,'u_std.xml')) v_std = Function(M,os.path.join(outdir,'v_std.xml')) uv_obs = Function(M,os.path.join(outdir,'uv_obs.xml')) mdl = model.model(mesh, data_mask, params, init_fields=False) # TODO initialization mdl.init_bed(bed) mdl.init_thick(thick) mdl.gen_surf() mdl.init_mask(mask) mdl.init_vel_obs(u_obs,v_obs,mask_vel,u_std,v_std) mdl.init_lat_dirichletbc() mdl.init_bmelt(Constant(0.0)) mdl.init_alpha(alpha) mdl.init_beta(beta, False) mdl.init_smb(smb) mdl.label_domain() #Solve slvr = solver.ssa_solver(mdl) slvr.save_ts_zero() slvr.timestep(save = 1, adjoint_flag=0) #Balance melt rates #Load time series of ice thicknesses hdf = HDF5File(slvr.mesh.mpi_comm(), os.path.join(outdir, 'H_ts.h5'), "r") attr = hdf.attributes("H") nsteps = attr['count'] #model time step dt = params.time.dt #Model iterations to difference between iter_s = np.ceil(init_yr/dt) #Iteration closest to 5yr iter_f = nsteps - 1 #Final iteration dT = dt*(iter_f - iter_s) #Time diff in years between iterations #Read iteration data HS = Function(slvr.M) HF = Function(slvr.M) hdf.read(HS, "H/vector_{0}".format(int(iter_s))) hdf.read(HF, "H/vector_{0}".format(int(iter_f))) #Mask out grounded region rhow = params.constants.rhow rhoi = params.constants.rhoi H_s = -rhow/rhoi * bed fl_ex = conditional(slvr.H_init <= H_s, Constant(1.0), Constant(0.0)) #Calculate bmelt bmelt = project(ufl.Max(fl_ex*(HF - HS)/dT, Constant(0.0)), slvr.M) #Output model variables in ParaView+Fenics friendly format pickle.dump( mdl.param, open( os.path.join(outdir,'bmeltrate_param.p'), "wb" ) ) # File(os.path.join(outdir,'mesh.xml')) << mdl.mesh vtkfile = File(os.path.join(outdir,'bmelt.pvd')) xmlfile = File(os.path.join(outdir,'bmelt.xml')) vtkfile << bmelt xmlfile << bmelt return mdl
def run_eigendec(config_file): """ Run the eigendecomposition phase of the model. 1. Define the model domain & fields 2. Runs the forward model w/ alpha/beta from run_inv 3. Computes the Hessian of the *misfit* cost functional (J) 4. Performs the generalized eigendecomposition with A = H_mis, B = prior_action """ # Read run config file params = ConfigParser(config_file) log = inout.setup_logging(params) inout.log_preamble("eigendecomp", params) # Load the static model data (geometry, smb, etc) input_data = inout.InputData(params) # Get mesh & define model mesh = fice_mesh.get_mesh(params) mdl = model.model(mesh, input_data, params) # Load alpha/beta fields mdl.alpha_from_inversion() mdl.beta_from_inversion() mdl.bglen_from_data(mask_only=True) # Setup our solver object slvr = solver.ssa_solver(mdl, mixed_space=params.inversion.dual) cntrl = slvr.get_control()[0] space = slvr.get_control_space() # Regularization operator using inversion delta/gamma values Prior = mdl.get_prior() reg_op = Prior(slvr, space) msft_flag = params.eigendec.misfit_only if msft_flag: slvr.zero_inv_params() # Hessian Action slvr.set_hessian_action(cntrl) # Mass matrix solver xg, xb = Function(space), Function(space) # test, trial = TestFunction(space), TrialFunction(space) # mass = assemble(inner(test, trial) * slvr.dx) # mass_solver = KrylovSolver("cg", "sor") # mass_solver.parameters.update({"absolute_tolerance": 1.0e-32, # "relative_tolerance": 1.0e-14}) # mass_solver.set_operator(mass) # Uncomment to get low-level SLEPc/PETSc output # set_log_level(10) @count_calls() # @timer def ghep_action(x): """Hessian action w/o preconditioning""" _, _, ddJ_val = slvr.ddJ.action(cntrl, x) # reg_op.inv_action(ddJ_val.vector(), xg.vector()) <- gnhep_prior return function_get_values(ddJ_val) @count_calls() def prior_action(x): """Define the action of the B matrix (prior)""" reg_op.action(x.vector(), xg.vector()) return function_get_values(xg) # opts = {'prior': gnhep_prior_action, 'mass': gnhep_mass_action} # gnhep_func = opts[params.eigendec.precondition_by] num_eig = params.eigendec.num_eig n_iter = params.eigendec.power_iter # <- not used yet # Hessian eigendecomposition using SLEPSc eig_algo = params.eigendec.eig_algo if eig_algo == "slepc": results = { } # Create this empty dict & pass it to slepc_monitor_callback to fill # Eigendecomposition import slepc4py.SLEPc as SLEPc esolver = eigendecompose( space, ghep_action, tolerance=params.eigendec.tol, max_it=params.eigendec.max_iter, N_eigenvalues=num_eig, problem_type=SLEPc.EPS.ProblemType.GHEP, solver_type=SLEPc.EPS.Type.KRYLOVSCHUR, configure=slepc_config_callback(reg_op, prior_action, space), monitor=slepc_monitor_callback(params, space, results)) log.info("Finished eigendecomposition") vr = results['vr'] lam = results['lam'] # Check the eigenvectors & eigenvalues if (params.eigendec.test_ed): ED.test_eigendecomposition(esolver, results, space, params) if num_eig > 100: log.warning( "Requesting inner product of more than 100 EVs, this is expensive!" ) # Check for B (not B') orthogonality & normalisation for i in range(num_eig): reg_op.action(vr[i].vector(), xg.vector()) norm = xg.vector().inner(Vector(vr[i].vector()))**0.5 if (abs(1.0 - norm) > params.eigendec.tol): raise Exception(f"Eigenvector norm is {norm}") for i in range(num_eig): reg_op.action(vr[i].vector(), xg.vector()) for j in range(i + 1, num_eig): inn = xg.vector().inner(Vector(vr[j].vector())) if (abs(inn) > params.eigendec.tol): raise Exception( f"Eigenvectors {i} & {j} inner product nonzero: {inn}" ) # Uses extreme amounts of disk space; suitable for ismipc only # #Save eigenfunctions # vtkfile = File(os.path.join(outdir,'vr.pvd')) # for v in vr: # v.rename('v', v.label()) # vtkfile << v # # vtkfile = File(os.path.join(outdir,'vi.pvd')) # for v in vi: # v.rename('v', v.label()) # vtkfile << v else: raise NotImplementedError slvr.eigenvals = lam slvr.eigenfuncs = vr # Plot of eigenvals lpos = np.argwhere(lam > 0) lneg = np.argwhere(lam < 0) lind = np.arange(0, len(lam)) plt.semilogy(lind[lpos], lam[lpos], '.') plt.semilogy(lind[lneg], np.abs(lam[lneg]), '.') diag_dir = Path( params.io.diagnostics_dir ) / params.eigendec.phase_name / params.eigendec.phase_suffix plt.savefig(diag_dir / 'lambda.pdf') plt.close() # Note - for now this does nothing, but eventually if the whole series # of runs were done without re-initializing solver, it'd be important to # put the inversion params back if msft_flag: slvr.set_inv_params() return mdl