def setup_logging(level, logfile=None, logfile_level=None, verbatim_filename=False): # log level and format configuration log_format = "%(asctime)s - %(name)s - %(levelname)s - %(message)s" logging.basicConfig(level=level, format=log_format) # FEniCS logging dolfin.set_log_active(True) dolfin.set_log_level(logging.WARNING) fenics_logger = logging.getLogger("FFC") fenics_logger.setLevel(logging.WARNING) fenics_logger = logging.getLogger("UFL") fenics_logger.setLevel(logging.WARNING) # explicitly set levels for certain loggers logging.getLogger("spuq").setLevel(level) logging.getLogger("spuq.application.egsz.multi_operator").setLevel(logging.WARNING) #logging.getLogger("spuq.application.egsz.marking").setLevel(logging.INFO) # output to logfile, if set if logfile: # replace .py or .conf with .log, if not prohibited if not verbatim_filename: logfile = basename(logfile, ".conf") logfile = basename(logfile, ".py") logfile = logfile + ".log" # create a new file handler (obsolete: specify mode "w" for overwriting, instead of the default "a") ch = logging.FileHandler(logfile, mode="a") ch.setLevel(logfile_level if logfile_level else level) ch.setFormatter(logging.Formatter(log_format)) # add to root logger root = logging.getLogger() root.addHandler(ch)
def save_results(problem, solver, num_dofs, mesh_size, time_step, functional, error): 'Save results to file.' # Print summary if MPI.process_number() == 0: print '' print 'Problem |', problem print 'Solver |', solver print 'Unknowns |', num_dofs print 'Mesh size |', mesh_size print 'Time step |', time_step print 'Functional |', functional print 'Error |', error # Print DOLFIN summary set_log_active(True) list_timings() # Append to file, let each dx, dt have its own log file results_dir = problem.options['results_dir'] dx = problem.options['refinement_level'] dt = problem.options['dt_division'] name = '%s_%s_results_dx%d_dt%d.log' % (str(problem), str(solver), dx, dt) filename = os.path.join(results_dir, name) # create the dir for results if needed if not os.path.exists(os.path.dirname(filename)): os.makedirs(os.path.dirname(filename)) with open(filename, 'a') as f: f.write('%s, %s, %s, %d, %.15g, %.15g, %.15g, %s\n' % (time.asctime(), problem, solver, num_dofs, mesh_size, time_step, functional, str(error)))
def save_results(problem, solver, num_dofs, mesh_size, time_step, functional, error): 'Save results to file.' # Print summary if MPI.process_number() == 0 : print '' print 'Problem |', problem print 'Solver |', solver print 'Unknowns |', num_dofs print 'Mesh size |', mesh_size print 'Time step |', time_step print 'Functional |', functional print 'Error |', error # Print DOLFIN summary set_log_active(True) list_timings() # Append to file, let each dx, dt have its own log file results_dir = problem.options['results_dir'] dx = problem.options['refinement_level'] dt = problem.options['dt_division'] name = '%s_%s_results_dx%d_dt%d.log' % (str(problem), str(solver), dx, dt) filename = os.path.join(results_dir, name) # create the dir for results if needed if not os.path.exists(os.path.dirname(filename)): os.makedirs(os.path.dirname(filename)) with open(filename, 'a') as f: f.write('%s, %s, %s, %d, %.15g, %.15g, %.15g, %s\n' % (time.asctime(), problem, solver, num_dofs, mesh_size, time_step, functional, str(error)))
def setup_logging(level): # log level and format configuration log_format = "%(asctime)s - %(name)s - %(levelname)s - %(message)s" logging.basicConfig(filename=__file__[:-2] + 'log', level=level, format=log_format) # FEniCS logging from dolfin import (set_log_level, set_log_active, INFO, DEBUG, WARNING) set_log_active(True) set_log_level(WARNING) fenics_logger = logging.getLogger("FFC") fenics_logger.setLevel(logging.WARNING) fenics_logger = logging.getLogger("UFL") fenics_logger.setLevel(logging.WARNING) # module logger logger = logging.getLogger(__name__) logging.getLogger("spuq.application.egsz.multi_operator").disabled = True #logging.getLogger("spuq.application.egsz.marking").setLevel(logging.INFO) # add console logging output ch = logging.StreamHandler() ch.setLevel(level) ch.setFormatter(logging.Formatter(log_format)) logger.addHandler(ch) logging.getLogger("spuq").addHandler(ch) return logger
def make_logger(name, level=logging.INFO): import logging import dolfin mpi_filt = lambda: None def log_if_proc0(record): if dolfin.mpi_comm_world().rank == 0: return 1 else: return 0 mpi_filt.filter = log_if_proc0 logger = logging.getLogger(name) logger.setLevel(log_level) ch = logging.StreamHandler() ch.setLevel(0) formatter = logging.Formatter("%(message)s") ch.setFormatter(formatter) logger.addHandler(ch) logger.addFilter(mpi_filt) dolfin.set_log_active(False) dolfin.set_log_level(dolfin.WARNING) return logger
def solve_nonlinear(self, inputs, outputs,): self.unfiltered_density.vector().set_local(inputs['density_unfiltered']) self.filtered_density.vector().set_local(outputs['density']) residual_form = self.options['residual'](self.unfiltered_density, self.filtered_density, C=self.filter_strength) J = df.derivative(residual_form, self.filtered_density) df.set_log_active(False) df.set_log_active(True) df.solve(residual_form==0, self.filtered_density, J=J, solver_parameters={"newton_solver":{"maximum_iterations":1, "error_on_nonconvergence":False}}) outputs['density'] = self.filtered_density.vector().get_local()
def solve(solver_name, problem_name, options): 'Solve the problem by solver with options.' # Set cpp_compiler options parameters["form_compiler"]["cpp_optimize"] = True # Set debug level set_log_active(options['debug']) # Set refinement level options['N'] = mesh_sizes[options['refinement_level']] # Create problem and solver problem = Problem(problem_name, options) solver = Solver(solver_name, options) time_step = solver.get_timestep(problem)[0] if MPI.process_number() == 0 and options['verbose']: print 'Problem: ' + str(problem) print 'Solver: ' + str(solver) # Solve problem with solver wct = time.time() u, p = solver.solve(problem) # Compute elapsed time wct = time.time() - wct # Compute number of degrees of freedom num_dofs = u.vector().size() + p.vector().size() # Get the mesh size mesh_size = u.function_space().mesh().hmin() # Get functional value and error functional, error = solver.eval() # Save results cpu_time = solver.cputime() save_results(problem, solver, num_dofs, mesh_size, time_step, functional, error) return 0
def make_logger(name, level=logging.INFO): import logging mpi_filt = lambda: None def log_if_proc0(record): if dolfin.MPI.rank(dolfin.mpi_comm_world()) == 0: return 1 else: return 0 mpi_filt.filter = log_if_proc0 logger = logging.getLogger(name) logger.setLevel(level) ch = logging.StreamHandler() ch.setLevel(0) formatter = logging.Formatter( "%(message)s") #'\n%(name)s - %(levelname)s - %(message)s\n' ch.setFormatter(formatter) logger.addHandler(ch) logger.addFilter(mpi_filt) dolfin.set_log_active(False) dolfin.set_log_level(dolfin.WARNING) # ffc_logger = logging.getLogger('FFC') # ffc_logger.setLevel(DEBUG) # ffc_logger.addFilter(mpi_filt) # ufl_logger = logging.getLogger('UFL') # ufl_logger.setLevel(DEBUG) # ufl_logger.addFilter(mpi_filt) # from haosolver import logger as hao_logger # hao_logger.setLevel(DEBUG) return logger
def make_logger(name, level=logging.INFO): def log_if_process0(record): if dolfin.MPI.rank(dolfin.mpi_comm_world()) == 0: return 1 else: return 0 mpi_filt = Object() mpi_filt.filter = log_if_process0 logger = logging.getLogger(name) logger.setLevel(level) ch = logging.StreamHandler() ch.setLevel(0) # formatter = logging.Formatter('%(message)s') formatter = logging.Formatter(('%(asctime)s - ' '%(name)s - ' '%(levelname)s - ' '%(message)s')) ch.setFormatter(formatter) logger.addHandler(ch) logger.addFilter(mpi_filt) dolfin.set_log_active(False) dolfin.set_log_level(dolfin.WARNING) ffc_logger = logging.getLogger('FFC') ffc_logger.setLevel(dolfin.WARNING) ffc_logger.addFilter(mpi_filt) ufl_logger = logging.getLogger('UFL') ufl_logger.setLevel(dolfin.WARNING) ufl_logger.addFilter(mpi_filt) return logger
def verboseness(self, v): self._verbose = v df.set_log_active(self._verbose >= self._VERBOSE_DOLFIN)
def problem3(): patient = LVTestPatient("benchmark") setup_general_parameters() params = setup_adjoint_contraction_parameters() params["phase"] == "all" active_model = "active_stress" params["active_model"] = active_model params["T_ref"] = 60.0 params["base_bc"] = "fixed" # material_model = "guccione" material_model = "holzapfel_ogden" # material_model = "neo_hookean" solver_parameters, pressure, paramvec = make_solver_params(params, patient) V_real = df.FunctionSpace(solver_parameters["mesh"], "R", 0) gamma = df.Function(V_real, name="gamma") target_gamma = df.Function(V_real, name="target gamma") matparams = setup_material_parameters(material_model) if material_model == "guccione": matparams["C"] = 2.0 matparams["bf"] = 8.0 matparams["bt"] = 2.0 matparams["bfs"] = 4.0 args = (patient.fiber, gamma, matparams, active_model, patient.sheet, patient.sheet_normal, params["T_ref"]) if material_model == "guccione": material = Guccione(*args) else: material = HolzapfelOgden(*args) solver_parameters["material"] = material solver = LVSolver(solver_parameters) solver.parameters["solve"]["snes_solver"]["report"] = True solver.solve() p_end = 15.0 g_end = 1.0 N = 5 df.set_log_active(True) df.set_log_level(df.INFO) f = df.XDMFFile(df.mpi_comm_world(), "benchmark_3.xdmf") u, p = solver.get_state().split(deepcopy=True) U = df.Function(u.function_space(), name="displacement") for (plv, g) in zip(np.linspace(0, p_end, N), np.linspace(0, g_end, N)): t = df.Timer("Test Material Model") t.start() iterate_pressure(solver, plv, pressure) u, p = solver.get_state().split(deepcopy=True) U.assign(u) f.write(U) target_gamma.assign(df.Constant(g)) iterate_gamma(solver, target_gamma, gamma) u, p = solver.get_state().split(deepcopy=True) U.assign(u) f.write(U)
def solve_nonlinear(self, inputs, outputs): pde_problem = self.options['pde_problem'] state_name = self.options['state_name'] problem_type = self.options['problem_type'] visualization = self.options['visualization'] state_function = pde_problem.states_dict[state_name]['function'] for argument_name, argument_function in iteritems( self.argument_functions_dict): density_func = argument_function mesh = state_function.function_space().mesh() sub_domains = df.MeshFunction('size_t', mesh, mesh.topology().dim() - 1) upper_edge = TractionBoundary() upper_edge.mark(sub_domains, 6) dss = df.Measure('ds')(subdomain_data=sub_domains) tractionBC = dss(6) self.itr = self.itr + 1 state_function = pde_problem.states_dict[state_name]['function'] residual_form = get_residual_form( state_function, df.TestFunction(state_function.function_space()), density_func, density_func.function_space(), tractionBC, # df.Constant((0.0, -9.e-1)) df.Constant((0.0, -9.e-1)), int(self.itr)) self._set_values(inputs, outputs) self.derivative_form = df.derivative(residual_form, state_function) df.set_log_level(df.LogLevel.ERROR) df.set_log_active(True) # df.solve(residual_form==0, state_function, bcs=pde_problem.bcs_list, J=self.derivative_form) if problem_type == 'linear_problem': df.solve(residual_form == 0, state_function, bcs=pde_problem.bcs_list, J=self.derivative_form, solver_parameters={ "newton_solver": { "maximum_iterations": 60, "error_on_nonconvergence": False } }) elif problem_type == 'nonlinear_problem': problem = df.NonlinearVariationalProblem(residual_form, state_function, pde_problem.bcs_list, self.derivative_form) solver = df.NonlinearVariationalSolver(problem) solver.parameters['nonlinear_solver'] = 'snes' solver.parameters["snes_solver"]["line_search"] = 'bt' solver.parameters["snes_solver"][ "linear_solver"] = 'mumps' # "cg" "gmres" solver.parameters["snes_solver"]["maximum_iterations"] = 500 solver.parameters["snes_solver"]["relative_tolerance"] = 5e-13 solver.parameters["snes_solver"]["absolute_tolerance"] = 5e-13 # solver.parameters["snes_solver"]["linear_solver_"]["maximum_iterations"]=1000 solver.parameters["snes_solver"]["error_on_nonconvergence"] = False solver.solve() elif problem_type == 'nonlinear_problem_load_stepping': num_steps = 3 state_function.vector().set_local( np.zeros((state_function.function_space().dim()))) for i in range(num_steps): v = df.TestFunction(state_function.function_space()) if i < (num_steps - 1): residual_form = get_residual_form( state_function, v, density_func, density_func.function_space(), tractionBC, # df.Constant((0.0, -9.e-1)) df.Constant((0.0, -9.e-1 / num_steps * (i + 1))), int(self.itr)) else: residual_form = get_residual_form( state_function, v, density_func, density_func.function_space(), tractionBC, # df.Constant((0.0, -9.e-1)) df.Constant((0.0, -9.e-1 / num_steps * (i + 1))), int(self.itr)) problem = df.NonlinearVariationalProblem( residual_form, state_function, pde_problem.bcs_list, self.derivative_form) solver = df.NonlinearVariationalSolver(problem) solver.parameters['nonlinear_solver'] = 'snes' solver.parameters["snes_solver"]["line_search"] = 'bt' solver.parameters["snes_solver"][ "linear_solver"] = 'mumps' # "cg" "gmres" solver.parameters["snes_solver"]["maximum_iterations"] = 500 solver.parameters["snes_solver"]["relative_tolerance"] = 1e-15 solver.parameters["snes_solver"]["absolute_tolerance"] = 1e-15 # solver.parameters["snes_solver"]["linear_solver_"]["maximum_iterations"]=1000 solver.parameters["snes_solver"][ "error_on_nonconvergence"] = False solver.solve() # option to store the visualization results if visualization == 'True': for argument_name, argument_function in iteritems( self.argument_functions_dict): df.File('solutions_iterations_3d/{}_{}.pvd'.format( argument_name, self.itr)) << argument_function self.L = -residual_form self.itr = self.itr + 1 outputs[state_name] = state_function.vector().get_local()
import sys src_directory = '../../../' sys.path.append(src_directory) import src.model import src.solvers import src.physical_constants import src.helper from pylab import sin, cos, exp, deg2rad from dolfin import Expression, File, set_log_active set_log_active(True) theta = deg2rad(-3.0) L = 100000. H = 1000.0 a0 = 100 sigma = 10000 class Surface(Expression): def __init__(self): pass def eval(self, values, x): values[0] = sin(theta) / cos(theta) * x[0] class Bed(Expression): def __init__(self): pass def eval(self, values, x): y_0 = -H + a0 * (exp(-((x[0]-L/2.)**2 + (x[1]-L/2.)**2) / sigma**2)) values[0] = sin(theta)/cos(theta) * (x[0] + sin(theta)*y_0) + cos(theta)*y_0
def main(): # Define mesh domain_subdivisions = N.array( N.ceil(N.sqrt(2) * domain_size / max_edge_len), N.uint) print 'Numer of domain subdomain_subdivisions: ', domain_subdivisions mesh = dolfin.UnitCube(*domain_subdivisions) # Transform mesh to correct dimensions mesh.coordinates()[:] *= domain_size # Centred around [0,0,0] mesh.coordinates()[:] -= domain_size / 2 ## Translate mesh slightly so that source coordinate lies at ## centroid of an element io = mesh.intersection_operator() source_elnos = io.all_intersected_entities(source_point) closest_elno = source_elnos[(N.argmin([ source_point.distance(dolfin.Cell(mesh, i).midpoint()) for i in source_elnos ]))] centre_pt = dolfin.Cell(mesh, closest_elno).midpoint() centre_coord = N.array([centre_pt.x(), centre_pt.y(), centre_pt.z()]) # There seems to be an issue with the intersect operator if the # mesh coordinates are changed after calling it for the first # time. Since we called it to find the centroid, we should init a # new mesh mesh_coords = mesh.coordinates().copy() mesh = dolfin.UnitCube(*domain_subdivisions) mesh.coordinates()[:] = mesh_coords mesh.coordinates()[:] -= centre_coord ## # Define function space V = dolfin.FunctionSpace(mesh, "Nedelec 1st kind H(curl)", order) k_0 = 2 * N.pi * freq / c0 # Freespace wave number # Definite test- and trial functions u = dolfin.TrialFunction(V) v = dolfin.TestFunction(V) # Define the bilinear forms m = eps_r * inner(v, u) * dx # Mass form s = (1 / mu_r) * dot(curl(v), curl(u)) * dx # Stiffness form n = V.cell().n # Get the surface normal s_0 = inner(cross(n, v), cross(n, u)) * ds # ABC boundary condition form # Assemble forms using uBLASS matrices so that we can easily export to scipy print 'Assembling forms' M = dolfin.uBLASSparseMatrix() S = dolfin.uBLASSparseMatrix() S_0 = dolfin.uBLASSparseMatrix() dolfin.assemble(m, tensor=M, mesh=mesh) dolfin.assemble(s, tensor=S, mesh=mesh) dolfin.assemble(s_0, tensor=S_0, mesh=mesh) print 'Number of degrees of freedom: ', M.size(0) # Set up RHS b = N.zeros(M.size(0), dtype=N.complex128) dofnos, rhs_contrib = calc_pointsource_contrib(V, source_coord, source_value) rhs_contrib = 1j * k_0 * Z0 * rhs_contrib b[dofnos] += rhs_contrib Msp = dolfin_ublassparse_to_scipy_csr(M) Ssp = dolfin_ublassparse_to_scipy_csr(S) S_0sp = dolfin_ublassparse_to_scipy_csr(S_0) # A is the system matrix that must be solved A = Ssp - k_0**2 * Msp + 1j * k_0 * S_0sp solved = False if solver == 'iterative': # solve using scipy bicgstab print 'solve using scipy bicgstab' x = solve_sparse_system(A, b) elif solver == 'direct': import scipy.sparse.linalg A_lu = scipy.sparse.linalg.factorized(A.T) x = A_lu(b) else: raise ValueError("solver must have value 'iterative' or 'direct'") dolfin.set_log_active(False) # evaluation seems to make a lot of noise u_re = dolfin.Function(V) u_im = dolfin.Function(V) # N.require is important, since dolfin seems to expect a contiguous array u_re.vector()[:] = N.require(N.real(x), requirements='C') u_im.vector()[:] = N.require(N.imag(x), requirements='C') E_field = N.zeros((len(field_pts), 3), dtype=N.complex128) for i, fp in enumerate(field_pts): try: E_field[i, :] = u_re(fp) + 1j * u_im(fp) except (RuntimeError, StandardError): E_field[i, :] = N.nan + 1j * N.nan import pylab as P r1 = field_pts[:] / lam x1 = r1[:, 0] E_ana = N.abs(analytical_result) E_num = E_field P.figure() P.plot(x1, N.abs(E_num[:, 0]), '-g', label='x_num') P.plot(x1, N.abs(E_num[:, 1]), '-b', label='y_num') P.plot(x1, N.abs(E_num[:, 2]), '-r', label='z_num') P.plot(analytical_pts, E_ana, '--r', label='z_ana') P.ylabel('E-field Magnitude') P.xlabel('Distance (wavelengths)') P.legend(loc='best') P.grid(True) P.show()
import dolfin as df from punc import * import numpy as np from matplotlib import pyplot as plt df.set_log_active(False) #============================================================================== # INITIALIZING FENICS #------------------------------------------------------------------------------ n_dims = 2 # Number of dimensions Ld = 6.28 * np.ones(n_dims) # Length of domain Nr = 32 * np.ones(n_dims, dtype=int) # Number of 'rectangles' in mesh periodic = np.ones(n_dims, dtype=bool) # Get the mesh: # mesh, facet_func = load_mesh("../mesh/2D/nothing_in_square") # mesh, facet_func = load_mesh("../mesh/2D/nonuniform_in_square") mesh, facet_func = simple_mesh(Ld, Nr) ext_bnd_id, int_bnd_ids = get_mesh_ids(facet_func) Ld = get_mesh_size(mesh) # Get the size of the simulation domain ext_bnd = ExteriorBoundaries(facet_func, ext_bnd_id) V = df.FunctionSpace(mesh, 'CG', 1, constrained_domain=PeriodicBoundary(Ld, periodic))
from domains import build_domain_dict, build_mesh from transforms import build_transform_dict, transform_mesh from solver import refine_mesh, Solver # , USE_EIGEN from boundary import build_bc_dict, bcApplyChoices, marked_boundary, \ mark_conditions from longcalc import LongCalculation, pickle_mesh, pickle_solutions from tools import tooltips, openDialog, addContextMenu from solutiontab import SolutionTab from dolfin import set_log_active, parameters parameters['allow_extrapolation'] = True # solver will have an option to switch to EIGEN # if USE_EIGEN: # parameters['linear_algebra_backend'] = 'Eigen' # set_log_level(50) set_log_active(False) # fix for missing qt plugins in app if not QApplication.libraryPaths(): QApplication.addLibraryPath(str(os.environ['RESOURCEPATH'] + '/qt_plugins')) class MainWindow(QMainWindow, Ui_MainWindow): """ Main application window. """ def __init__(self): """ Initialize main window. """ QMainWindow.__init__(self) self.mesh = None self.dim = 2
def solve_linear(self, d_outputs, d_residuals, mode): option = self.options['option'] dR_du_sparse = self.dR_du_sparse if option==1: ksp = PETSc.KSP().create() ksp.setType(PETSc.KSP.Type.GMRES) ksp.setTolerances(rtol=5e-14) ksp.setOperators(dR_du_sparse) ksp.setFromOptions() pc = ksp.getPC() pc.setType("ilu") size = len(self.fea.VC.dofmap().dofs()) dR = PETSc.Vec().create() dR.setSizes(size) dR.setType('seq') dR.setValues(range(size), d_residuals['density']) dR.setUp() du = PETSc.Vec().create() du.setSizes(size) du.setType('seq') du.setValues(range(size), d_outputs['density']) du.setUp() if mode == 'fwd': ksp.solve(dR,du) d_outputs['density'] = du.getValues(range(size)) else: ksp.solveTranspose(du,dR) d_residuals['density'] = dR.getValues(range(size)) print('d_residual[density]', d_residuals['density']) elif option==2: # print('option 2') rhs_ = df.Function(self.function_space) dR = df.Function(self.function_space) rhs_.vector().set_local(d_outputs['density']) A = self.A Am = df.as_backend_type(A).mat() ATm = Am.transpose() AT = df.PETScMatrix(ATm) df.solve(AT,dR.vector(),rhs_.vector()) # cannot directly use fea.u here, the update for the solution is not compatible d_residuals['density'] = dR.vector().get_local() elif option==3: A = self.A Am = df.as_backend_type(A).mat() ATm = Am.transpose() ATm_csr = csr_matrix(ATm.getValuesCSR()[::-1], shape=Am.size) lu = splu(ATm_csr.tocsc()) d_residuals['density'] = lu.solve(d_outputs['density'],trans='T') elif option==4: rhs_ = df.Function(self.function_space) dR = df.Function(self.function_space) rhs_.vector().set_local(d_outputs['density']) A = self.A Am = df.as_backend_type(A).mat() ATm = Am.transpose() AT = df.PETScMatrix(ATm) df.set_log_active(True) solver = df.KrylovSolver('gmres', 'ilu') prm = solver.parameters prm["maximum_iterations"]=1000000 prm["divergence_limit"] = 1e2 # info(parameters,True) solver.solve(AT,dR.vector(),rhs_.vector()) d_residuals['displacements'] = dR.vector().get_local()
def solve_nonlinear(self, inputs, outputs): pde_problem = self.options['pde_problem'] state_name = self.options['state_name'] problem_type = self.options['problem_type'] visualization = self.options['visualization'] state_function = pde_problem.states_dict[state_name]['function'] for argument_name, argument_function in iteritems( self.argument_functions_dict): density_func = argument_function self.itr = self.itr + 1 state_function = pde_problem.states_dict[state_name]['function'] residual_form = pde_problem.states_dict[state_name]['residual_form'] self._set_values(inputs, outputs) self.derivative_form = df.derivative(residual_form, state_function) df.set_log_active(True) if problem_type == 'linear_problem': if state_name == 'density': print('this is a variational density filter') df.solve(residual_form == 0, state_function, J=self.derivative_form, solver_parameters={ "newton_solver": { "maximum_iterations": 1, "error_on_nonconvergence": False } }) else: df.solve(residual_form == 0, state_function, bcs=pde_problem.bcs_list, J=self.derivative_form, solver_parameters={ "newton_solver": { "maximum_iterations": 1, "error_on_nonconvergence": False } }) elif problem_type == 'nonlinear_problem': state_function.vector().set_local( np.zeros((state_function.function_space().dim()))) problem = df.NonlinearVariationalProblem(residual_form, state_function, pde_problem.bcs_list, self.derivative_form) solver = df.NonlinearVariationalSolver(problem) solver.parameters['nonlinear_solver'] = 'snes' solver.parameters["snes_solver"]["relative_tolerance"] = 5e-100 solver.parameters["snes_solver"]["absolute_tolerance"] = 5e-50 solver.parameters["snes_solver"]["line_search"] = 'bt' solver.parameters["snes_solver"][ "linear_solver"] = 'mumps' # "cg" "gmres" solver.parameters["snes_solver"]["maximum_iterations"] = 500 solver.parameters["snes_solver"]["relative_tolerance"] = 5e-100 solver.parameters["snes_solver"]["absolute_tolerance"] = 5e-50 # solver.parameters["snes_solver"]["linear_solver_"]["maximum_iterations"]=1000 solver.parameters["snes_solver"]["error_on_nonconvergence"] = False solver.solve() elif problem_type == 'nonlinear_problem_load_stepping': num_steps = 4 state_function.vector().set_local( np.zeros((state_function.function_space().dim()))) for i in range(num_steps): v = df.TestFunction(state_function.function_space()) if i < (num_steps - 1): residual_form = get_residual_form( state_function, v, density_func, density_func.function_space, tractionBC, # df.Constant((0.0, -9.e-1)) df.Constant((0.0, -9.e-1 / num_steps * (i + 1))), 'False') else: residual_form = get_residual_form( state_function, v, density_func, density_func.function_space, tractionBC, # df.Constant((0.0, -9.e-1)) df.Constant((0.0, -9.e-1 / num_steps * (i + 1))), 'vol') problem = df.NonlinearVariationalProblem( residual_form, state_function, pde_problem.bcs_list, self.derivative_form) solver = df.NonlinearVariationalSolver(problem) solver.parameters['nonlinear_solver'] = 'snes' solver.parameters["snes_solver"]["line_search"] = 'bt' solver.parameters["snes_solver"][ "linear_solver"] = 'mumps' # "cg" "gmres" solver.parameters["snes_solver"]["maximum_iterations"] = 500 solver.parameters["snes_solver"]["relative_tolerance"] = 1e-15 solver.parameters["snes_solver"]["absolute_tolerance"] = 1e-15 # solver.parameters["snes_solver"]["linear_solver_"]["maximum_iterations"]=1000 solver.parameters["snes_solver"][ "error_on_nonconvergence"] = False solver.solve() # option to store the visualization results if (visualization == 'True') and (self.itr % 50 == 0): for argument_name, argument_function in iteritems( self.argument_functions_dict): if argument_name == 'density': df.File('solutions_iterations_40ramp/{}_{}.pvd'.format( argument_name, self.itr)) << df.project( argument_function / (1 + 8. * (1. - argument_function)), argument_function.function_space()) else: df.File('solutions_iterations_40ramp/{}_{}.pvd'.format( argument_name, self.itr)) << argument_function df.File('solutions_iterations_40ramp/{}_{}.pvd'.format( state_name, self.itr)) << state_function self.L = -residual_form outputs[state_name] = state_function.vector().get_local() self._set_values(inputs, outputs)
def run(self, species="Oxygen", increment=0): # Specify the test/trial function space df.set_log_active(False) V = df.FunctionSpace(self.mesh, "Lagrange", 1) # Specify the boundary conditions, Dirichlet on all domain faces def u0_boundary(x, on_boundary): return on_boundary # Define the problem u = df.TrialFunction(V) v = df.TestFunction(V) if species == "Oxygen": bc = df.DirichletBC(V, df.Constant(1), u0_boundary) f = df.Constant(0.0) a = self.Dc * df.inner(df.nabla_grad(u), df.nabla_grad(v)) * df.dx L = f * v * df.dx elif species == "Factor": bc = df.DirichletBC(V, df.Constant(0), u0_boundary) f = df.Constant(0) a = (self.decayRate * u * v + self.Dv * df.inner(df.nabla_grad(u), df.nabla_grad(v))) * df.dx L = f * v * df.dx # Assemble the system A, b = df.assemble_system(a, L, bc) if species == "Oxygen": # Add vessel source terms vesselSources = self.sources[1] for eachSource in vesselSources: location = [ point - self.spacing / 2.0 for point in eachSource[0] ] if self.extents[2] > 1: delta = df.PointSource( V, df.Point(location[0], location[1], location[2]), self.permeability * eachSource[1]) else: delta = df.PointSource(V, df.Point(location[0], location[1]), self.permeability * eachSource[1]) try: delta.apply(b) except: pass # Add cell sink terms cellSources = self.sources[0] for eachSource in cellSources: location = [ point - self.spacing / 2.0 for point in eachSource[0] ] if self.extents[2] > 1: delta = df.PointSource( V, df.Point(location[0], location[1], location[2]), -self.consumptionRate * eachSource[1]) else: delta = df.PointSource( V, df.Point(location[0], location[1]), -self.consumptionRate * eachSource[1]) try: delta.apply(b) except: pass elif species == "Factor": # Add cell source terms cellSources = self.sources[0] for eachSource in cellSources: location = [ point - self.spacing / 2.0 for point in eachSource[0] ] if self.extents[2] > 1: delta = df.PointSource( V, df.Point(location[0], location[1], location[2]), self.factorSensitvity * eachSource[1]) else: delta = df.PointSource( V, df.Point(location[0], location[1]), self.factorSensitvity * eachSource[1]) try: delta.apply(b) except: pass # Set up solution vector u = df.Function(V) U = u.vector() # Set up and run solver solver = df.KrylovSolver("cg", "ilu") solver.solve(A, U, b) self.result = [] for eachEntry in self.sources[0]: location = [point - self.spacing / 2.0 for point in eachEntry[0]] if self.extents[2] <= 1: location = location[:2] try: result = u(location) except: if species == "Oxygen": result = 1.0 else: result = 0.0 self.result.append(result) self.write_output(species, increment) return self.result
# # mshr is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with mshr. If not, see <http://www.gnu.org/licenses/>. # import dolfin import pygmsh # from mshr import Sphere, Cylinder, CSGCGALDomain3D, generate_mesh # dolfin.set_log_level(dolfin.TRACE) dolfin.set_log_active(True) dolfin.set_log_level(4) # Define 3D geometry # sphere = Sphere(dolfin.Point(0, 0, 0), 0.5) # cone = Cylinder(dolfin.Point(0, 0, 0), dolfin.Point(0, 0, -1), .35, .1) # geometry = cone + sphere geom = pygmsh.opencascade.Geometry(characteristic_length_min=0.1, characteristic_length_max=0.1) sphere_a = geom.add_ball([0., 0., 0.], 0.5) cone = geom.add_cylinder([0., 0., 0.], [0., 0., 1.], .1) figure = geom.boolean_union([sphere_a, cone]) mesh = pygmsh.generate_mesh(geom, verbose=True)
import sys src_directory = '../../../' sys.path.append(src_directory) from src.utilities import DataInput, DataOutput from data.data_factory import DataFactory from meshes.mesh_factory import MeshFactory from src.physics import VelocityBalance_2 from dolfin import Mesh, set_log_active set_log_active(True) thklim = 50.0 bedmap1 = DataFactory.get_bedmap1(thklim=thklim) bedmap2 = DataFactory.get_bedmap2(thklim=thklim) # load a mesh : mesh = Mesh("meshes/2dmesh.xml") db1 = DataInput(None, bedmap1, mesh=mesh) db2 = DataInput(None, bedmap2, mesh=mesh) h = db2.get_projection("h_n") H = db2.get_projection("H_n") adot = db1.get_projection("adot") prb = VelocityBalance_2(mesh, H, h, adot, 12.0) prb.solve_forward() # File ouput
def strain(v): return dl.sym(dl.nabla_grad(v)) F = ( (2./Re)*dl.inner(strain(v),strain(v_test))+ dl.inner (dl.nabla_grad(v)*v, v_test) - (q * dl.div(v_test)) + ( dl.div(v) * q_test) ) * dl.dx dl.solve(F == 0, vq, bcs, solver_parameters={"newton_solver": {"relative_tolerance":1e-4, "maximum_iterations":100, "linear_solver":"default"}}) return v if __name__ == "__main__": dl.set_log_active(False) np.random.seed(1) sep = "\n"+"#"*80+"\n" mesh = dl.refine( dl.Mesh("ad_20.xml") ) rank = dl.MPI.rank(mesh.mpi_comm()) nproc = dl.MPI.size(mesh.mpi_comm()) if rank == 0: print( sep, "Set up the mesh and finite element spaces.\n","Compute wind velocity", sep ) Vh = dl.FunctionSpace(mesh, "Lagrange", 2) ndofs = Vh.dim() if rank == 0: print( "Number of dofs: {0}".format( ndofs ) ) if rank == 0:
"facet_function": ffun, "facet_normal": N, "state_space": "P_2:P_1", "compressibility": { "type": "incompressible", "lambda": 0.0 }, "material": material, "bc": { "dirichlet": make_dirichlet_bcs, "neumann": [[T, 1]] } } df.parameters["adjoint"]["stop_annotating"] = True df.set_log_active(True) df.set_log_level(df.INFO) solver = LVSolver(params) solver.solve() uh, ph = solver.get_state().split(deepcopy=True) F = df.grad(uh) + df.Identity(2) J = df.project(df.det(F), DG1) err_u.append(df.errornorm(u_exact, uh, "H1", mesh=mesh)) err_p.append(df.errornorm(p_exact, ph, "L2", mesh=mesh)) err_J.append(df.errornorm(df.Expression("1.0"), J, "L2", mesh=mesh)) if 0: u = df.interpolate(u_exact, P2) p = df.interpolate(p_exact, P1)
def closed_loop(parameters, advanced_parameters, CL_parameters): #################### ### Gernal setup ### #################### setup_general_parameters() df.PETScOptions.set('ksp_type', 'preonly') df.PETScOptions.set('pc_factor_mat_solver_package', 'mumps') df.PETScOptions.set("mat_mumps_icntl_7", 6) ############ ### MESH ### ############ patient = parameters['patient'] meshname = parameters['mesh_name'] mesh = parameters['mesh'] mesh = patient.mesh X = df.SpatialCoordinate(mesh) N = df.FacetNormal(mesh) # Cycle lenght BCL = CL_parameters['BCL'] t = CL_parameters['t'] # Time increment dt = CL_parameters['dt'] # End-Diastolic volume ED_vol = CL_parameters['ED_vol'] ##################################### # Parameters for Windkessel model ### ##################################### # Aorta compliance (reduce) Cao = CL_parameters['Cao'] # Venous compliace Cven = CL_parameters['Cven'] # Dead volume Vart0 = CL_parameters['Vart0'] Vven0 = CL_parameters['Vven0'] # Aortic resistance Rao = CL_parameters['Rao'] Rven = CL_parameters['Rven'] # Peripheral resistance (increase) Rper = CL_parameters['Rper'] V_ven = CL_parameters['V_ven'] V_art = CL_parameters['V_art'] # scale geometry to match hemodynamics parameters mesh.coordinates()[:] *= CL_parameters['mesh_scaler'] ###################### ### Material model ### ###################### material_model = advanced_parameters['material_model'] #################### ### Active model ### #################### active_model = advanced_parameters['active_model'] T_ref = advanced_parameters['T_ref'] # These can be used to adjust the contractility gamma_base = parameters['gamma']['gamma_base'] gamma_mid = parameters['gamma']['gamma_mid'] gamma_apical = parameters['gamma']['gamma_apical'] gamma_apex = parameters['gamma']['gamma_apex'] gamma_arr = np.array(gamma_base + gamma_mid + gamma_apical + gamma_apex) # gamma_arr = np.ones(17) ############## ### OUTPUT ### ############## dir_results = "results" if not os.path.exists(dir_results): os.makedirs(dir_results) disp_file = df.XDMFFile(df.mpi_comm_world(), "{}/displacement.xdmf".format(dir_results)) pv_data = {"pressure":[], "volume":[]} output = "/".join([dir_results, "output_{}_ed{}.h5".format(meshname, ED_vol)]) G = RegionalParameter(patient.sfun) G.vector()[:] = gamma_arr G_ = df.project(G.get_function(), G.get_ind_space()) f_gamma = df.XDMFFile(df.mpi_comm_world(), "{}/activation.xdmf".format(dir_results)) f_gamma.write(G_) ######################## ### Setup Parameters ### ######################## params = setup_application_parameters(material_model) params.remove("Material_parameters") matparams = df.Parameters("Material_parameters") for k, v in advanced_parameters['mat_params'].iteritems(): matparams.add(k,v) params.add(matparams) params["base_bc"] = parameters['BC_type'] params["base_spring_k"] = advanced_parameters['spring_constant'] # params["base_bc"] = "fixed" params["active_model"] = active_model params["T_ref"] = T_ref params["gamma_space"] = "regional" ###################### ### Initialization ### ###################### # Solver paramters check_patient_attributes(patient) solver_parameters, _, _ = make_solver_params(params, patient) # Cavity volume V0 = df.Expression("vol",vol = 0, name = "Vtarget", degree=1) solver_parameters["volume"] = V0 # Solver solver = LVSolver3Field(solver_parameters, use_snes=True) df.set_log_active(True) solver.parameters["solve"]["snes_solver"]["report"] =True solver.parameters["solve"]["snes_solver"]['maximum_iterations'] = 50 # Surface measure ds = df.Measure("exterior_facet", domain = solver.parameters["mesh"], subdomain_data = solver.parameters["facet_function"]) dsendo = ds(solver.parameters["markers"]["ENDO"][0]) # Set cavity volume V0.vol = df.assemble(solver._V_u*dsendo) print V0.vol # Initial solve solver.solve() # Save initial state w = solver.get_state() u, p, pinn = w.split(deepcopy=True) U_save = df.Function(u.function_space(), name = "displacement") U_save.assign(u) disp_file.write(U_save) file_format = "a" if os.path.isfile('pv_data_plot.txt') else "w" pv_data_plot = open('pv_data_plot.txt', file_format) pv_data_plot.write('{},'.format(float(pinn)/1000.0)) pv_data_plot.write('{}\n'.format(V0.vol)) pv_data["pressure"].append(float(pinn)/1000.0) pv_data["volume"].append(V0.vol) # Active contraction from force import ca_transient V_real = df.FunctionSpace(mesh, "R", 0) gamma = solver.parameters["material"].get_gamma() # times = np.linspace(0,200,200) # target_gamma = ca_transient(times) # plt.plot(target_gamma) # plt.show() # exit()g ####################### ### Inflation Phase ### ####################### inflate = True # Check if inflation allready exist if os.path.isfile(output): with df.HDF5File(df.mpi_comm_world(), output, "r") as h5file: if h5file.has_dataset("inflation"): h5file.read(solver.get_state(), "inflation") print ("\nInflation phase fetched from output file.") inflate = False if inflate: print ("\nInflate geometry to volume : {}\n".format(ED_vol)) initial_step = int((ED_vol - V0.vol) / 10.0) +1 control_values, prev_states = iterate("expression", solver, V0, "vol", ED_vol, continuation=False, initial_number_of_steps=initial_step, log_level=10) # Store outout for i, wi in enumerate(prev_states): ui, pi, pinni = wi.split(deepcopy=True) U_save.assign(ui) disp_file.write(U_save) print ("V = {}".format(control_values[i])) print ("P = {}".format(float(pinni)/1000.0)) pv_data_plot.write('{},'.format(float(pinni)/1000.0)) pv_data_plot.write('{}\n'.format(control_values[i])) pv_data["pressure"].append(float(pinni)/1000.0) pv_data["volume"].append(control_values[i]) with df.HDF5File(df.mpi_comm_world(), output, "w") as h5file: h5file.write(solver.get_state(), "inflation") # Store ED solution w = solver.get_state() u, p, pinn = w.split(deepcopy=True) U_save.assign(u) disp_file.write(U_save) pv_data_plot.write('{},'.format(float(pinn)/1000.0)) pv_data_plot.write('{}\n'.format(ED_vol)) pv_data["pressure"].append(float(pinn)/1000.0) pv_data["volume"].append(ED_vol) print ("\nInflation succeded! Current pressure: {} kPa\n\n".format(float(pinn)/1000.0)) pv_data_plot.close() ######################### ### Closed loop cycle ### ######################### while (t < BCL): w = solver.get_state() u, p, pinn = w.split(deepcopy=True) p_cav = float(pinn) V_cav = df.assemble(solver._V_u*dsendo) if t + dt > BCL: dt = BCL - t t = t + dt target_gamma = ca_transient(t) # Update windkessel model Part = 1.0/Cao*(V_art - Vart0); Pven = 1.0/Cven*(V_ven - Vven0); PLV = float(p_cav); print ("PLV = {}".format(PLV)) print ("Part = {}".format(Part)) # Flux trough aortic valve if(PLV <= Part): Qao = 0.0; else: Qao = 1.0/Rao*(PLV - Part); # Flux trough mitral valve if(PLV >= Pven): Qmv = 0.0; else: Qmv = 1.0/Rven*(Pven - PLV); Qper = 1.0/Rper*(Part - Pven); V_cav = V_cav + dt*(Qmv - Qao); V_art = V_art + dt*(Qao - Qper); V_ven = V_ven + dt*(Qper - Qmv); # Update cavity volume V0.vol = V_cav # Iterate active contraction if t <= 150: target_gamma_ = target_gamma * gamma_arr _, states = iterate("gamma", solver, target_gamma_, gamma, initial_number_of_steps = 1) else: solver.solve() # Adapt time step if len(states) == 1: dt *= 1.7 else: dt *= 0.5 dt = min(dt, 10) # Store data ui, pi, pinni = solver.get_state().split(deepcopy=True) U_save.assign(ui) disp_file.write(U_save) Pcav = float(pinni)/1000.0 pv_data_plot = open('pv_data_plot.txt', 'a') pv_data_plot.write('{},'.format(Pcav)) pv_data_plot.write('{}\n'.format(V_cav)) pv_data_plot.close() pv_data["pressure"].append(Pcav) pv_data["volume"].append(V_cav) msg = ("\n\nTime:\t{}".format(t) + \ "\ndt:\t{}".format(dt) +\ "\ngamma:\t{}".format(target_gamma) +\ "\nV_cav:\t{}".format(V_cav) + \ "\nV_art:\t{}".format(V_art) + \ "\nV_ven:\t{}".format(V_ven) + \ "\nPart:\t{}".format(Part) + \ "\nPven:\t{}".format(Pven) + \ "\nPLV:\t{}".format(Pcav) + \ "\nQper:\t{}".format(Qper) + \ "\nQao:\t{}".format(Qao) + \ "\nQmv:\t{}\n\n".format(Qmv)) print ((msg)) #============================================================================== # fig = plt.figure() # ax = fig.gca() # ax.plot(pv_data["volume"], pv_data["pressure"]) # ax.set_ylabel("Pressure (kPa)") # ax.set_xlabel("Volume (ml)") # # # fig.savefig("/".join([dir_results, "pv_loop.png"])) # plt.show() #============================================================================== return #import threading #thread1 = threading.Thread(target = closed_loop) #thread1.start()
import numpy as np import dolfin as dl ###################### expr_label = 'expr_7' expr_dir = expr_label #dl.parameters['num_threads'] =1 max_num_threads = 1 dl.set_log_level(20) dl.set_log_active(active=False) # write out the param file to the expr dir ###################### Geometry and Mesh ################ dim = 3 xl = 10000 #Wedth yl = 10000 #Length zl = 500 #Depth cl_d = 0.0 aq_d = 400 uz_d = 100 offset = 10.0 #screen interval screen = 237.7 well_center = dl.Point(xl / 2.0, yl / 2.0) nx = 10 #int(xl/1000) ny = 10 #int(yl/1000) nz = 3 #int(zl/100) mesh = dl.BoxMesh(dl.Point(0.0, 0.0, 0.0), dl.Point(xl, yl, zl), nx, ny, nz) ## initial mesh #dl.plot(mesh, interactive = False) #if proc_id == 0:
from domains import build_domain_dict, build_mesh from transforms import build_transform_dict, transform_mesh from solver import refine_mesh, Solver # , USE_EIGEN from boundary import build_bc_dict, bcApplyChoices, marked_boundary, \ mark_conditions from longcalc import LongCalculation, pickle_mesh, pickle_solutions from tools import tooltips, openDialog, addContextMenu from solutiontab import SolutionTab from dolfin import set_log_active, parameters parameters['allow_extrapolation'] = True # solver will have an option to switch to EIGEN # if USE_EIGEN: # parameters['linear_algebra_backend'] = 'Eigen' # set_log_level(50) set_log_active(False) # fix for missing qt plugins in app if not QApplication.libraryPaths(): QApplication.addLibraryPath(str(os.environ['RESOURCEPATH'] + '/qt_plugins')) class MainWindow(QMainWindow, Ui_MainWindow): """ Main application window. """ def __init__(self): """ Initialize main window. """ QMainWindow.__init__(self) self.mesh = None self.dim = 2 self.solver = None
import sys src_directory = '../../../' sys.path.append(src_directory) import src.model import src.helper import src.solvers import src.physical_constants import pylab import dolfin import pickle from meshes.mesh_factory import MeshFactory dolfin.set_log_active(True) L = 750000.0 S_0 = 10.0 S_b = 1e-5 R_el = 450000.0 M_max = 0.5 T_min = 223.15 S_T = 1.67e-5 class Surface(dolfin.Expression): def eval(self,values,x): values[0] = S_0 class Bed(dolfin.Expression): def eval(self,values,x): values[0] = 0.0
def main(): # Define mesh domain_subdivisions = N.array(N.ceil(N.sqrt(2)*domain_size/max_edge_len), N.uint) print 'Numer of domain subdomain_subdivisions: ', domain_subdivisions mesh = dolfin.UnitCube(*domain_subdivisions) # Transform mesh to correct dimensions mesh.coordinates()[:] *= domain_size # Centred around [0,0,0] mesh.coordinates()[:] -= domain_size/2 ## Translate mesh slightly so that source coordinate lies at ## centroid of an element source_elnos = mesh.all_intersected_entities(source_point) closest_elno = source_elnos[(N.argmin([source_point.distance(dolfin.Cell(mesh, i).midpoint()) for i in source_elnos]))] centre_pt = dolfin.Cell(mesh, closest_elno).midpoint() centre_coord = N.array([centre_pt.x(), centre_pt.y(), centre_pt.z()]) # There seems to be an issue with the intersect operator if the # mesh coordinates are changed after calling it for the first # time. Since we called it to find the centroid, we should init a # new mesh mesh_coords = mesh.coordinates().copy() mesh = dolfin.UnitCube(*domain_subdivisions) mesh.coordinates()[:] = mesh_coords mesh.coordinates()[:] -= centre_coord ## # Define function space V = dolfin.FunctionSpace(mesh, "Nedelec 1st kind H(curl)", order) k_0 = 2*N.pi*freq/c0 # Freespace wave number # Definite test- and trial functions u = dolfin.TrialFunction(V) v = dolfin.TestFunction(V) # Define the bilinear forms m = eps_r*inner(v, u)*dx # Mass form s = (1/mu_r)*dot(curl(v), curl(u))*dx # Stiffness form n = V.cell().n # Get the surface normal s_0 = inner(cross(n, v), cross(n, u))*ds # ABC boundary condition form # Assemble forms using uBLASS matrices so that we can easily export to scipy print 'Assembling forms' M = dolfin.uBLASSparseMatrix() S = dolfin.uBLASSparseMatrix() S_0 = dolfin.uBLASSparseMatrix() dolfin.assemble(m, tensor=M, mesh=mesh) dolfin.assemble(s, tensor=S, mesh=mesh) dolfin.assemble(s_0, tensor=S_0, mesh=mesh) print 'Number of degrees of freedom: ', M.size(0) # Set up RHS b = N.zeros(M.size(0), dtype=N.complex128) dofnos, rhs_contrib = calc_pointsource_contrib(V, source_coord, source_value) rhs_contrib = 1j*k_0*Z0*rhs_contrib b[dofnos] += rhs_contrib Msp = dolfin_ublassparse_to_scipy_csr(M) Ssp = dolfin_ublassparse_to_scipy_csr(S) S_0sp = dolfin_ublassparse_to_scipy_csr(S_0) # A is the system matrix that must be solved A = Ssp - k_0**2*Msp + 1j*k_0*S_0sp solved = False; if solver == 'iterative': # solve using scipy bicgstab print 'solve using scipy bicgstab' x = solve_sparse_system ( A, b ) elif solver == 'direct': import scipy.sparse.linalg A_lu = scipy.sparse.linalg.factorized(A.T) x = A_lu(b) else: raise ValueError("solver must have value 'iterative' or 'direct'") dolfin.set_log_active(False) # evaluation seems to make a lot of noise u_re = dolfin.Function(V) u_im = dolfin.Function(V) # N.require is important, since dolfin seems to expect a contiguous array u_re.vector()[:] = N.require(N.real(x), requirements='C') u_im.vector()[:] = N.require(N.imag(x), requirements='C') E_field = N.zeros((len(field_pts), 3), dtype=N.complex128) for i, fp in enumerate(field_pts): try: E_field[i,:] = u_re(fp) + 1j*u_im(fp) except (RuntimeError, StandardError): E_field[i,:] = N.nan + 1j*N.nan import pylab as P r1 = field_pts[:]/lam x1 = r1[:,0] E_ana = N.abs(analytical_result) E_num = E_field P.figure() P.plot(x1, N.abs(E_num[:,0]), '-g', label='x_num') P.plot(x1, N.abs(E_num[:,1]), '-b', label='y_num') P.plot(x1, N.abs(E_num[:,2]), '-r', label='z_num') P.plot(analytical_pts, E_ana, '--r', label='z_ana') P.ylabel('E-field Magnitude') P.xlabel('Distance (wavelengths)') P.legend(loc='best') P.grid(True) P.show()
dl.solve(F == 0, vq, bcs, solver_parameters={ "newton_solver": { "relative_tolerance": 1e-4, "maximum_iterations": 100 } }) return v if __name__ == "__main__": dl.set_log_active(False) np.random.seed(1) sep = "\n" + "#" * 80 + "\n" print sep, "Set up the mesh and finite element spaces.\n", "Compute wind velocity", sep mesh = dl.refine(dl.Mesh("ad_20.xml")) wind_velocity = computeVelocityField(mesh) Vh = dl.FunctionSpace(mesh, "Lagrange", 2) print "Number of dofs: {0}".format(Vh.dim()) print sep, "Set up Prior Information and model", sep true_initial_condition = dl.interpolate( dl.Expression( 'min(0.5,exp(-100*(pow(x[0]-0.35,2) + pow(x[1]-0.7,2))))'), Vh).vector()
basis = FEniCSBasis(V) pde.assemble_rhs(basis) pde.assemble_lhs(basis) pde.assemble_operator(basis) def test_navierlame_construct(): lmbda = dolfin.Constant(2400) mu = dolfin.Constant(400) pde = fem.FEMNavierLame(lmbda, mu) N = 25 mesh = UnitSquare(N, N) V = pde.weak_form.function_space(mesh, 1) u = dolfin.TrialFunction(V) coeff = (lmbda, mu) basis = FEniCSBasis(V) pde.assemble_rhs(basis, coeff) pde.assemble_lhs(basis, coeff) pde.assemble_operator(basis, coeff) logging.getLogger("spuq").setLevel(logging.WARNING) logging.getLogger("nose.config").setLevel(logging.WARNING) logging.getLogger("FFC").setLevel(logging.WARNING) logging.getLogger("UFL").setLevel(logging.WARNING) dolfin.set_log_active(False) test_main()