def test_interpolation_two_meshes(self): from sfepy import data_dir from sfepy.discrete import Variables from sfepy.discrete.fem import Mesh, FEDomain, Field m1 = Mesh.from_file(data_dir + '/meshes/3d/block.mesh') m2 = Mesh.from_file(data_dir + '/meshes/3d/cube_medium_tetra.mesh') m2.coors[:] *= 2.0 bbox = m1.get_bounding_box() dd = bbox[1, :] - bbox[0, :] data = nm.sin(4.0 * nm.pi * m1.coors[:,0:1] / dd[0]) \ * nm.cos(4.0 * nm.pi * m1.coors[:,1:2] / dd[1]) variables1 = { 'u': ('unknown field', 'scalar_tp', 0), 'v': ('test field', 'scalar_tp', 'u'), } variables2 = { 'u': ('unknown field', 'scalar_si', 0), 'v': ('test field', 'scalar_si', 'u'), } d1 = FEDomain('d1', m1) omega1 = d1.create_region('Omega', 'all') field1 = Field.from_args('scalar_tp', nm.float64, (1, 1), omega1, approx_order=1) ff1 = {field1.name: field1} d2 = FEDomain('d2', m2) omega2 = d2.create_region('Omega', 'all') field2 = Field.from_args('scalar_si', nm.float64, (1, 1), omega2, approx_order=0) ff2 = {field2.name: field2} vv1 = Variables.from_conf(transform_variables(variables1), ff1) u1 = vv1['u'] u1.set_from_mesh_vertices(data) vv2 = Variables.from_conf(transform_variables(variables2), ff2) u2 = vv2['u'] # Performs interpolation, if other field differs from self.field # or, in particular, is defined on a different mesh. u2.set_from_other(u1, strategy='interpolation', close_limit=0.1) fname = in_dir(self.options.out_dir) u1.save_as_mesh(fname('test_mesh_interp_block_scalar.vtk')) u2.save_as_mesh(fname('test_mesh_interp_cube_scalar.vtk')) return True
def __init__(self, equations): Container.__init__(self, equations) self.variables = Variables(self.collect_variables()) self.materials = Materials(self.collect_materials()) self.domain = self.get_domain() self.active_bcs = set() self.collect_conn_info()
def do_interpolation(m2, m1, data, field_name, force=False): """Interpolate data from m1 to m2. """ from sfepy.discrete import Variables from sfepy.discrete.fem import FEDomain, Field fields = { 'scalar_si': ((1, 1), 'Omega', 2), 'vector_si': ((3, 1), 'Omega', 2), 'scalar_tp': ((1, 1), 'Omega', 1), 'vector_tp': ((3, 1), 'Omega', 1), } d1 = FEDomain('d1', m1) omega1 = d1.create_region('Omega', 'all') f = fields[field_name] field1 = Field.from_args('f', nm.float64, f[0], d1.regions[f[1]], approx_order=f[2]) ff = {field1.name: field1} vv = Variables.from_conf(transform_variables(variables), ff) u1 = vv['u'] u1.set_from_mesh_vertices(data) d2 = FEDomain('d2', m2) omega2 = d2.create_region('Omega', 'all') field2 = Field.from_args('f', nm.float64, f[0], d2.regions[f[1]], approx_order=f[2]) ff2 = {field2.name: field2} vv2 = Variables.from_conf(transform_variables(variables), ff2) u2 = vv2['u'] if not force: # Performs interpolation, if other field differs from self.field # or, in particular, is defined on a different mesh. u2.set_from_other(u1, strategy='interpolation', close_limit=0.5) else: coors = u2.field.get_coor() vals = u1.evaluate_at(coors, close_limit=0.5) u2.set_data(vals) return u1, u2
def test_interpolation_two_meshes(self): from sfepy import data_dir from sfepy.discrete import Variables from sfepy.discrete.fem import Mesh, FEDomain, Field m1 = Mesh.from_file(data_dir + '/meshes/3d/block.mesh') m2 = Mesh.from_file(data_dir + '/meshes/3d/cube_medium_tetra.mesh') m2.coors[:] *= 2.0 bbox = m1.get_bounding_box() dd = bbox[1,:] - bbox[0,:] data = nm.sin(4.0 * nm.pi * m1.coors[:,0:1] / dd[0]) \ * nm.cos(4.0 * nm.pi * m1.coors[:,1:2] / dd[1]) variables1 = { 'u' : ('unknown field', 'scalar_tp', 0), 'v' : ('test field', 'scalar_tp', 'u'), } variables2 = { 'u' : ('unknown field', 'scalar_si', 0), 'v' : ('test field', 'scalar_si', 'u'), } d1 = FEDomain('d1', m1) omega1 = d1.create_region('Omega', 'all') field1 = Field.from_args('scalar_tp', nm.float64, (1,1), omega1, approx_order=1) ff1 = {field1.name : field1} d2 = FEDomain('d2', m2) omega2 = d2.create_region('Omega', 'all') field2 = Field.from_args('scalar_si', nm.float64, (1,1), omega2, approx_order=0) ff2 = {field2.name : field2} vv1 = Variables.from_conf(transform_variables(variables1), ff1) u1 = vv1['u'] u1.set_from_mesh_vertices(data) vv2 = Variables.from_conf(transform_variables(variables2), ff2) u2 = vv2['u'] # Performs interpolation, if other field differs from self.field # or, in particular, is defined on a different mesh. u2.set_from_other(u1, strategy='interpolation', close_limit=0.1) fname = in_dir(self.options.out_dir) u1.save_as_mesh(fname('test_mesh_interp_block_scalar.vtk')) u2.save_as_mesh(fname('test_mesh_interp_cube_scalar.vtk')) return True
def standalone_setup(self): from sfepy.discrete import create_adof_conns, Variables conn_info = {'aux': self.get_conn_info()} adcs = create_adof_conns(conn_info, None) variables = Variables(self.get_variables()) variables.set_adof_conns(adcs) materials = self.get_materials(join=True) for mat in materials: mat.time_update(None, [Struct(terms=[self])])
def standalone_setup(self): from sfepy.discrete import create_adof_conns, Variables conn_info = {'aux' : self.get_conn_info()} adcs = create_adof_conns(conn_info, None) variables = Variables(self.get_variables()) variables.set_adof_conns(adcs) materials = self.get_materials(join=True) for mat in materials: mat.time_update(None, [Struct(terms=[self])])
def from_conf(conf, options): from sfepy.discrete import FieldVariable, Variables, Problem from sfepy.discrete.fem import Mesh, FEDomain, Field mesh = Mesh.from_file(data_dir + '/meshes/2d/square_unit_tri.mesh') domain = FEDomain('domain', mesh) omega = domain.create_region('Omega', 'all') domain.create_region('Left', 'vertices in (x < -0.499)', 'facet') domain.create_region( 'LeftStrip', 'vertices in (x < -0.499)' ' & (y > -0.199) & (y < 0.199)', 'facet') domain.create_region('LeftFix', 'r.Left -v r.LeftStrip', 'facet') domain.create_region('Right', 'vertices in (x > 0.499)', 'facet') domain.create_region( 'RightStrip', 'vertices in (x > 0.499)' ' & (y > -0.199) & (y < 0.199)', 'facet') domain.create_region('RightFix', 'r.Right -v r.RightStrip', 'facet') fu = Field.from_args('fu', nm.float64, 'vector', omega, approx_order=2) u = FieldVariable('u', 'unknown', fu) fp = Field.from_args('fp', nm.float64, 'scalar', omega, approx_order=2) p = FieldVariable('p', 'unknown', fp) pb = Problem('test', domain=domain, fields=[fu, fp], auto_conf=False) test = Test(problem=pb, variables=Variables([u, p]), conf=conf, options=options) return test
def do_interpolation(m2, m1, data, field_name, force=False): """Interpolate data from m1 to m2. """ from sfepy.discrete import Variables from sfepy.discrete.fem import FEDomain, Field fields = { 'scalar_si' : ((1,1), 'Omega', 2), 'vector_si' : ((3,1), 'Omega', 2), 'scalar_tp' : ((1,1), 'Omega', 1), 'vector_tp' : ((3,1), 'Omega', 1), } d1 = FEDomain('d1', m1) omega1 = d1.create_region('Omega', 'all') f = fields[field_name] field1 = Field.from_args('f', nm.float64, f[0], d1.regions[f[1]], approx_order=f[2]) ff = {field1.name : field1} vv = Variables.from_conf(transform_variables(variables), ff) u1 = vv['u'] u1.set_from_mesh_vertices(data) d2 = FEDomain('d2', m2) omega2 = d2.create_region('Omega', 'all') field2 = Field.from_args('f', nm.float64, f[0], d2.regions[f[1]], approx_order=f[2]) ff2 = {field2.name : field2} vv2 = Variables.from_conf(transform_variables(variables), ff2) u2 = vv2['u'] if not force: # Performs interpolation, if other field differs from self.field # or, in particular, is defined on a different mesh. u2.set_from_other(u1, strategy='interpolation', close_limit=0.5) else: coors = u2.field.get_coor() vals = u1.evaluate_at(coors, close_limit=0.5) u2.set_data(vals) return u1, u2
def test_evaluate_at(self): from sfepy import data_dir from sfepy.discrete.fem import Mesh from sfepy.discrete import Variables from sfepy.discrete.fem import FEDomain, Field meshes = { 'tp': Mesh.from_file(data_dir + '/meshes/3d/block.mesh'), } datas = gen_datas(meshes) fields = { 'scalar_tp': ((1, 1), 'Omega', 1), 'vector_tp': ((3, 1), 'Omega', 1), } ok = True for field_name in ['scalar_tp', 'vector_tp']: d = FEDomain('d', meshes['tp']) d.create_region('Omega', 'all') f = fields[field_name] field = Field.from_args('f', nm.complex128, f[0], d.regions[f[1]], approx_order=f[2]) ff = {field.name: field} vv = Variables.from_conf(transform_variables(variables), ff) u = vv['u'] bbox = d.get_mesh_bounding_box() t = nm.expand_dims(nm.linspace(0, 1, 100), 1) coors = nm.expand_dims(bbox[1] - bbox[0], 0) * t + bbox[0] data_r = datas[field_name] data_i = 2. / (1 + datas[field_name]) u.set_from_mesh_vertices(data_r) vals_r = u.evaluate_at(coors) u.set_from_mesh_vertices(data_i) vals_i = u.evaluate_at(coors) u.set_from_mesh_vertices(data_r + data_i * 1j) vals = u.evaluate_at(coors) _ok = nm.allclose(vals_r + vals_i * 1j, vals, rtol=0.0, atol=1e-12) _ok = _ok and nm.abs(vals).sum() > 1 self.report('evaluating complex field %s: %s' % (field_name, _ok)) ok = ok and _ok return ok
def test_evaluate_at(self): from sfepy import data_dir from sfepy.discrete.fem import Mesh from sfepy.discrete import Variables from sfepy.discrete.fem import FEDomain, Field meshes = { 'tp' : Mesh.from_file(data_dir + '/meshes/3d/block.mesh'), } datas = gen_datas(meshes) fields = { 'scalar_tp' : ((1,1), 'Omega', 1), 'vector_tp' : ((3,1), 'Omega', 1), } ok = True for field_name in ['scalar_tp', 'vector_tp']: d = FEDomain('d', meshes['tp']) d.create_region('Omega', 'all') f = fields[field_name] field = Field.from_args('f', nm.complex128, f[0], d.regions[f[1]], approx_order=f[2]) ff = {field.name : field} vv = Variables.from_conf(transform_variables(variables), ff) u = vv['u'] bbox = d.get_mesh_bounding_box() t = nm.expand_dims(nm.linspace(0, 1, 100), 1) coors = nm.expand_dims(bbox[1] - bbox[0], 0) * t + bbox[0] data_r = datas[field_name] data_i = 2. / (1 + datas[field_name]) u.set_from_mesh_vertices(data_r) vals_r = u.evaluate_at(coors) u.set_from_mesh_vertices(data_i) vals_i = u.evaluate_at(coors) u.set_from_mesh_vertices(data_r + data_i * 1j) vals = u.evaluate_at(coors) _ok = nm.allclose(vals_r + vals_i * 1j, vals, rtol=0.0, atol=1e-12) _ok = _ok and nm.abs(vals).sum() > 1 self.report('evaluating complex field %s: %s' % (field_name, _ok)) ok = ok and _ok return ok
def test_pbc( self ): from sfepy.discrete import Variables, Conditions problem = self.problem conf = self.conf ebcs = Conditions.from_conf(conf.ebcs, problem.domain.regions) epbcs = Conditions.from_conf(conf.epbcs, problem.domain.regions) variables = Variables.from_conf(conf.variables, problem.fields) variables.equation_mapping(ebcs, epbcs, None, problem.functions) state = variables.create_state_vector() variables.apply_ebc(state) return variables.has_ebc(state)
def test_pbc(self): from sfepy.discrete import Variables, Conditions problem = self.problem conf = self.conf ebcs = Conditions.from_conf(conf.ebcs, problem.domain.regions) epbcs = Conditions.from_conf(conf.epbcs, problem.domain.regions) variables = Variables.from_conf(conf.variables, problem.fields) variables.equation_mapping(ebcs, epbcs, None, problem.functions) state = variables.create_state_vector() variables.apply_ebc(state) return variables.has_ebc(state)
def test_consistency_d_dw(self): from sfepy.discrete import Variables ok = True pb = self.problem for aux in test_terms: term_template, (prefix, par_name, d_vars, dw_vars) = aux print term_template, prefix, par_name, d_vars, dw_vars term1 = term_template % ((prefix, ) + d_vars) variables = Variables.from_conf(self.conf.variables, pb.fields) for var_name in d_vars: var = variables[var_name] n_dof = var.field.n_nod * var.field.shape[0] aux = nm.arange(n_dof, dtype=nm.float64) var.set_data(aux) if prefix == 'd': val1 = pb.evaluate(term1, var_dict=variables.as_dict()) else: val1 = pb.evaluate(term1, call_mode='d_eval', var_dict=variables.as_dict()) self.report('%s: %s' % (term1, val1)) term2 = term_template % (('dw', ) + dw_vars[:2]) vec, vv = pb.evaluate(term2, mode='weak', var_dict=variables.as_dict(), ret_variables=True) pvec = vv.get_state_part_view(vec, dw_vars[2]) val2 = nm.dot(variables[par_name](), pvec) self.report('%s: %s' % (term2, val2)) err = nm.abs(val1 - val2) / nm.abs(val1) _ok = err < 1e-12 self.report('relative difference: %e -> %s' % (err, _ok)) ok = ok and _ok return ok
def test_invariance_qp(self): from sfepy import data_dir from sfepy.discrete import Variables, Integral from sfepy.discrete.fem import Mesh, FEDomain, Field from sfepy.terms import Term from sfepy.discrete.common.mappings import get_physical_qps mesh = Mesh.from_file(data_dir + '/meshes/3d/block.mesh') bbox = mesh.get_bounding_box() dd = bbox[1,:] - bbox[0,:] data = nm.sin(4.0 * nm.pi * mesh.coors[:,0:1] / dd[0]) \ * nm.cos(4.0 * nm.pi * mesh.coors[:,1:2] / dd[1]) variables = { 'u' : ('unknown field', 'scalar_tp', 0), 'v' : ('test field', 'scalar_tp', 'u'), } domain = FEDomain('domain', mesh) omega = domain.create_region('Omega', 'all') field = Field.from_args('scalar_tp', nm.float64, 1, omega, approx_order=1) ff = {field.name : field} vv = Variables.from_conf(transform_variables(variables), ff) u = vv['u'] u.set_from_mesh_vertices(data) integral = Integral('i', order=2) term = Term.new('ev_volume_integrate(u)', integral, omega, u=u) term.setup() val1 = term.evaluate(mode='qp') val1 = val1.ravel() qps = get_physical_qps(omega, integral) coors = qps.values val2 = u.evaluate_at(coors).ravel() self.report('max. difference:', nm.abs(val1 - val2).max()) ok = nm.allclose(val1, val2, rtol=0.0, atol=1e-12) self.report('invariance in qp: %s' % ok) return ok
def test_consistency_d_dw(self): from sfepy.discrete import Variables ok = True pb = self.problem for aux in test_terms: term_template, (prefix, par_name, d_vars, dw_vars) = aux print term_template, prefix, par_name, d_vars, dw_vars term1 = term_template % ((prefix,) + d_vars) variables = Variables.from_conf(self.conf.variables, pb.fields) for var_name in d_vars: var = variables[var_name] n_dof = var.field.n_nod * var.field.shape[0] aux = nm.arange(n_dof, dtype=nm.float64) var.set_data(aux) if prefix == 'd': val1 = pb.evaluate(term1, var_dict=variables.as_dict()) else: val1 = pb.evaluate(term1, call_mode='d_eval', var_dict=variables.as_dict()) self.report('%s: %s' % (term1, val1)) term2 = term_template % (('dw',) + dw_vars[:2]) vec, vv = pb.evaluate(term2, mode='weak', var_dict=variables.as_dict(), ret_variables=True) pvec = vv.get_state_part_view(vec, dw_vars[2]) val2 = nm.dot(variables[par_name](), pvec) self.report('%s: %s' % (term2, val2)) err = nm.abs(val1 - val2) / nm.abs(val1) _ok = err < 1e-12 self.report('relative difference: %e -> %s' % (err, _ok)) ok = ok and _ok return ok
def __init__(self, dim, approx_order, **kwargs): """ Creates Struct object with all the data necessary to test terms :param dim: dimension :param approx_order: approximation order :param kwargs: velo, diffusion or penalty for prepare_materials :return: term test scope """ if dim == 1: (field, regions), mesh = prepare_dgfield_1D(approx_order) elif dim == 2: (field, regions), mesh = prepare_field_2D(approx_order) self.field = field self.regions = regions self.mesh = mesh self.n_cell = field.n_cell self.n_nod = field.n_nod self.n_el_nod = field.n_el_nod self.u, self.v = self.prepare_variables(field) self.u.data = [(nm.zeros(self.n_nod))] self.variables = Variables([ self.u, self.v]) self.integral = Integral('i', order=approx_order * 2) self.a, self.D, self.Cw = self.prepare_materials(field, **kwargs) if dim == 1: velo = nm.array(1.0) elif dim == 2: velo = nm.array([1.0, 0]) self.burg_velo = velo.T / nm.linalg.norm(velo) self.nonlin = Material('nonlin', values={'.fun': self.burg_fun, '.dfun': self.burg_fun_d}) self.out = nm.zeros((self.n_cell, 1, self.n_el_nod, 1))
def test_sensitivity(self): from sfepy.discrete import Variables from sfepy.mesh.splinebox import SplineBox tolerance = 1e-4 ok = True pb = self.problem variables = Variables.from_conf(self.conf.variables, pb.fields) for var_name in variables.names: var = variables[var_name] n_dof = var.field.n_nod * var.field.shape[0] aux = nm.arange(n_dof, dtype=nm.float64) var.set_data(aux) mesh = pb.domain.mesh bbox = nm.array(mesh.get_bounding_box()).T spbox = SplineBox(bbox, mesh.coors) dvel_modes = [ # expand inner cylinder, no volume change [([20, 21, 22, 23], (-1, -1, 0)), ([24, 25, 26, 27], (-1, 1, 0)), ([36, 37, 38, 39], (1, -1, 0)), ([40, 41, 42, 43], (1, 1, 0))], # volume change [(range(16, 32), (0.2, 0, 0)), (range(32, 48), (0.4, 0, 0)), (range(48, 52), (0.6, 0.2, 0.2)), (range(52, 56), (0.8, 0.2, 0.3)), (range(56, 60), (1.0, 0.2, 0.4)), (range(60, 64), (1.2, 0.2, 0.5))], ] r4 = range(4) cp_pos = {i*16 + j*4 + k: (i, j, k) for k in r4 for j in r4 for i in r4} # compute design velocities dvels = [] for dv_mode in dvel_modes: dvel = 0 for pts, dir in dv_mode: for pt in pts: dvel += spbox.evaluate_derivative(cp_pos[pt], dir) dvels.append(dvel) for tname_sa, tname, rname, mat, var1, var2 in test_terms: args = [] if mat is None else [mat] args += [var1] if var2 is None else [var1, var2] term = '%s.i.%s(%s)' % (tname, rname, ', '.join(args)) term_sa = '%s.i.%s(%s)' % (tname_sa, rname, ', '.join(args + ['V'])) val = pb.evaluate(term, var_dict=variables.as_dict()) self.report('%s: %s' % (tname, val)) dt = 1e-6 for ii, dvel in enumerate(dvels): val = pb.evaluate(term, var_dict=variables.as_dict()) variables['V'].set_data(dvel) val_sa = pb.evaluate(term_sa, var_dict=variables.as_dict()) self.report('%s - mesh_velocity mode %d' % (tname_sa, ii)) # mesh perturbation + new_coors = modify_mesh(dt/2., spbox, dvel_modes[ii], cp_pos) pb.set_mesh_coors(new_coors, update_fields=True) val1 = pb.evaluate(term, var_dict=variables.as_dict()) # mesh perturbation - new_coors = modify_mesh(-dt/2., spbox, dvel_modes[ii], cp_pos) pb.set_mesh_coors(new_coors, update_fields=True) val2 = pb.evaluate(term, var_dict=variables.as_dict()) val_fd = (val1 - val2) / dt err = nm.abs(val_sa - val_fd) / nm.linalg.norm(val_sa) self.report('term: %s' % val) self.report('sensitivity term: %s' % val_sa) self.report('finite differences: %s' % val_fd) self.report('relative error: %s' % err) _ok = err < tolerance ok = ok and _ok return ok
def save_basis_on_mesh(mesh, options, output_dir, lin, permutations=None, suffix=''): if permutations is not None: mesh = mesh.copy() gel = GeometryElement(mesh.descs[0]) perms = gel.get_conn_permutations()[permutations] conn = mesh.cmesh.get_cell_conn() n_el, n_ep = conn.num, gel.n_vertex offsets = nm.arange(n_el) * n_ep conn.indices[:] = conn.indices.take((perms + offsets[:, None]).ravel()) domain = FEDomain('domain', mesh) omega = domain.create_region('Omega', 'all') field = Field.from_args('f', nm.float64, shape=1, region=omega, approx_order=options.max_order, poly_space_base=options.basis) var = FieldVariable('u', 'unknown', field) if options.plot_dofs: import sfepy.postprocess.plot_dofs as pd import sfepy.postprocess.plot_cmesh as pc ax = pc.plot_wireframe(None, mesh.cmesh) ax = pd.plot_global_dofs(ax, field.get_coor(), field.econn) ax = pd.plot_local_dofs(ax, field.get_coor(), field.econn) if options.dofs is not None: ax = pd.plot_nodes(ax, field.get_coor(), field.econn, field.poly_space.nodes, get_dofs(options.dofs, var.n_dof)) pd.plt.show() output('dofs: %d' % var.n_dof) vec = nm.empty(var.n_dof, dtype=var.dtype) n_digit, _format = get_print_info(var.n_dof, fill='0') name_template = os.path.join(output_dir, 'dof_%s%s.vtk' % (_format, suffix)) for ip in get_dofs(options.dofs, var.n_dof): output('dof %d...' % ip) vec.fill(0.0) vec[ip] = 1.0 var.set_data(vec) if options.derivative == 0: out = var.create_output(vec, linearization=lin) else: out = create_expression_output('ev_grad.ie.Elements(u)', 'u', 'f', {'f' : field}, None, Variables([var]), mode='qp', verbose=False, min_level=lin.min_level, max_level=lin.max_level, eps=lin.eps) name = name_template % ip ensure_path(name) out['u'].mesh.write(name, out=out) output('...done (%s)' % name)
def create_evaluable(expression, fields, materials, variables, integrals, regions=None, ebcs=None, epbcs=None, lcbcs=None, ts=None, functions=None, auto_init=False, mode='eval', extra_args=None, verbose=True, kwargs=None): """ Create evaluable object (equations and corresponding variables) from the `expression` string. Parameters ---------- expression : str The expression to evaluate. fields : dict The dictionary of fields used in `variables`. materials : Materials instance The materials used in the expression. variables : Variables instance The variables used in the expression. integrals : Integrals instance The integrals to be used. regions : Region instance or list of Region instances The region(s) to be used. If not given, the regions defined within the fields domain are used. ebcs : Conditions instance, optional The essential (Dirichlet) boundary conditions for 'weak' mode. epbcs : Conditions instance, optional The periodic boundary conditions for 'weak' mode. lcbcs : Conditions instance, optional The linear combination boundary conditions for 'weak' mode. ts : TimeStepper instance, optional The time stepper. functions : Functions instance, optional The user functions for boundary conditions, materials etc. auto_init : bool Set values of all variables to all zeros. mode : one of 'eval', 'el_avg', 'qp', 'weak' The evaluation mode - 'weak' means the finite element assembling, 'qp' requests the values in quadrature points, 'el_avg' element averages and 'eval' means integration over each term region. extra_args : dict, optional Extra arguments to be passed to terms in the expression. verbose : bool If False, reduce verbosity. kwargs : dict, optional The variables (dictionary of (variable name) : (Variable instance)) to be used in the expression. Returns ------- equation : Equation instance The equation that is ready to be evaluated. variables : Variables instance The variables used in the equation. """ if kwargs is None: kwargs = {} if regions is not None: if isinstance(regions, Region): regions = [regions] regions = OneTypeList(Region, regions) else: regions = fields[fields.keys()[0]].domain.regions # Create temporary variables. aux_vars = Variables(variables) if extra_args is None: extra_args = kwargs else: extra_args = copy(extra_args) extra_args.update(kwargs) if ts is not None: extra_args.update({'ts' : ts}) equations = Equations.from_conf({'tmp' : expression}, aux_vars, regions, materials, integrals, user=extra_args, verbose=verbose) equations.collect_conn_info() # The true variables used in the expression. variables = equations.variables if auto_init: for var in variables: var.init_data(step=0) if mode == 'weak': equations.time_update(ts, ebcs, epbcs, lcbcs, functions, verbose=verbose) else: setup_extra_data(equations.conn_info) return equations, variables
class Equations(Container): @staticmethod def from_conf(conf, variables, regions, materials, integrals, user=None, verbose=True): objs = OneTypeList(Equation) conf = copy(conf) ii = 0 for name, desc in conf.iteritems(): if verbose: output('equation "%s":' % name) output(desc) eq = Equation.from_desc(name, desc, variables, regions, materials, integrals, user=user) objs.append(eq) ii += 1 obj = Equations(objs) return obj def __init__(self, equations): Container.__init__(self, equations) self.variables = Variables(self.collect_variables()) self.materials = Materials(self.collect_materials()) self.domain = self.get_domain() self.active_bcs = set() self.collect_conn_info() def create_subequations(self, var_names, known_var_names=None): """ Create sub-equations containing only terms with the given virtual variables. Parameters ---------- var_names : list The list of names of virtual variables. known_var_names : list The list of names of (already) known state variables. Returns ------- subequations : Equations instance The sub-equations. """ from sfepy.discrete import FieldVariable known_var_names = get_default(known_var_names, []) objs = [] for iv, var_name in enumerate(var_names): terms = [ term.copy(name=term.name) for eq in self for term in eq.terms if term.get_virtual_name() == var_name ] # Make parameter variables from known state variables in terms # arguments. for known_name in known_var_names: for term in terms: if known_name in term.arg_names: ii = term.arg_names.index(known_name) state = self.variables[known_name] par = FieldVariable(known_name, 'parameter', state.field, primary_var_name='(set-to-None)') term.args[ii] = par term._kwargs[known_name] = par par.set_data(state()) new_terms = Terms(terms) objs.append(Equation('eq_%d' % iv, new_terms)) subequations = Equations(objs) return subequations def get_domain(self): domain = None for eq in self: for term in eq.terms: if term.has_region: domain = term.region.domain return domain def collect_materials(self): """ Collect materials present in the terms of all equations. """ materials = [] for eq in self: materials.extend(eq.collect_materials()) # Make the list items unique. materials = list(set(materials)) return materials def reset_materials(self): """ Clear material data so that next materials.time_update() is performed even for stationary materials. """ self.materials.reset() def collect_variables(self): """ Collect variables present in the terms of all equations. """ variables = [] for eq in self: variables.extend(eq.collect_variables()) # Make the list items unique. variables = list(set(variables)) return variables def get_variable(self, name): var = self.variables.get(name, msg_if_none='unknown variable! (%s)' % name) return var def collect_conn_info(self): """ Collect connectivity information as defined by the equations. """ self.conn_info = {} for eq in self: eq.collect_conn_info(self.conn_info) return self.conn_info def get_variable_names(self): """ Return the list of names of all variables used in equations. """ vns = set() for eq in self: for term in eq.terms: vns.update(term.get_variable_names()) return list(vns) def get_variable_dependencies(self): """ For each virtual variable get names of state/parameter variables that are present in terms with that virtual variable. The virtual variables define the actual equations and their dependencies define the variables needed to evaluate the equations. Returns ------- deps : dict The dependencies as a dictionary with virtual variable names as keys and sets of state/parameter variables as values. """ deps = {} for eq in self: for term in eq.terms: dep_list = deps.setdefault(term.get_virtual_name(), set()) dep_list.update(term.get_state_names()) return deps def invalidate_term_caches(self): """ Invalidate evaluate caches of variables present in equations. """ for var in self.variables: var.invalidate_evaluate_cache() def print_terms(self): """ Print names of equations and their terms. """ output('equations:') for eq in self: output(' %s:' % eq.name) for term in eq.terms: output(' %+.2e * %s.%d.%s(%s)' % (term.sign, term.name, term.integral.order, term.region.name, term.arg_str)) def time_update(self, ts, ebcs=None, epbcs=None, lcbcs=None, functions=None, problem=None, active_only=True, verbose=True): """ Update the equations for current time step. The update involves creating the mapping of active DOFs from/to all DOFs for all state variables, the setup of linear combination boundary conditions operators and the setup of active DOF connectivities. Parameters ---------- ts : TimeStepper instance The time stepper. ebcs : Conditions instance, optional The essential (Dirichlet) boundary conditions. epbcs : Conditions instance, optional The periodic boundary conditions. lcbcs : Conditions instance, optional The linear combination boundary conditions. functions : Functions instance, optional The user functions for boundary conditions, materials, etc. problem : Problem instance, optional The problem that can be passed to user functions as a context. active_only : bool If True, the active DOF connectivities and matrix graph have reduced size and are created with the reduced (active DOFs only) numbering. verbose : bool If False, reduce verbosity. Returns ------- graph_changed : bool The flag set to True if the current time step set of active boundary conditions differs from the set of the previous time step. """ self.variables.time_update(ts, functions, verbose=verbose) active_bcs = self.variables.equation_mapping(ebcs, epbcs, ts, functions, problem=problem, active_only=active_only) graph_changed = active_only and (active_bcs != self.active_bcs) self.active_bcs = active_bcs if graph_changed or not self.variables.adof_conns: adcs = create_adof_conns(self.conn_info, self.variables.adi.indx, active_only=active_only) self.variables.set_adof_conns(adcs) self.variables.setup_lcbc_operators(lcbcs, ts, functions) for eq in self: for term in eq.terms: term.time_update(ts) return graph_changed def time_update_materials(self, ts, mode='normal', problem=None, verbose=True): """ Update data materials for current time and possibly also state. Parameters ---------- ts : TimeStepper instance The time stepper. mode : 'normal', 'update' or 'force' The update mode, see :func:`sfepy.discrete.materials.Material.time_update()`. problem : Problem instance, optional The problem that can be passed to user functions as a context. verbose : bool If False, reduce verbosity. """ self.materials.time_update(ts, self, mode=mode, problem=problem, verbose=verbose) def setup_initial_conditions(self, ics, functions=None): self.variables.setup_initial_conditions(ics, functions) def get_graph_conns(self, any_dof_conn=False, rdcs=None, cdcs=None, active_only=True): """ Get DOF connectivities needed for creating tangent matrix graph. Parameters ---------- any_dof_conn : bool By default, only volume DOF connectivities are used, with the exception of trace surface DOF connectivities. If True, any kind of DOF connectivities is allowed. rdcs, cdcs : arrays, optional Additional row and column DOF connectivities, corresponding to the variables used in the equations. active_only : bool If True, the active DOF connectivities have reduced size and are created with the reduced (active DOFs only) numbering. Returns ------- rdcs, cdcs : arrays The row and column DOF connectivities defining the matrix graph blocks. """ if rdcs is None: rdcs = [] cdcs = [] elif cdcs is None: cdcs = copy(rdcs) else: assert_(len(rdcs) == len(cdcs)) if rdcs is cdcs: # Make sure the lists are not the same object. rdcs = copy(rdcs) adcs = self.variables.adof_conns # Only volume dof connectivities are used, with the exception of trace # surface dof connectivities. shared = set() for key, ii, info in iter_dict_of_lists(self.conn_info, return_keys=True): rvar, cvar = info.virtual, info.state if (rvar is None) or (cvar is None): continue is_surface = rvar.is_surface or cvar.is_surface dct = info.dc_type.type if not (dct in ('volume', 'scalar', 'plate') or is_surface or info.is_trace or any_dof_conn): continue rreg_name = info.get_region_name(can_trace=False) creg_name = info.get_region_name() rname = rvar.get_primary_name() rkey = (rname, rreg_name, dct, False) ckey = (cvar.name, creg_name, dct, info.is_trace) dc_key = (rkey, ckey) if not dc_key in shared: rdc = adcs[rkey] cdc = adcs[ckey] if not active_only: ii = nm.where(rdc < 0) rdc = rdc.copy() rdc[ii] = -1 - rdc[ii] ii = nm.where(cdc < 0) cdc = cdc.copy() cdc[ii] = -1 - cdc[ii] rdcs.append(rdc) cdcs.append(cdc) shared.add(dc_key) return rdcs, cdcs def create_matrix_graph(self, any_dof_conn=False, rdcs=None, cdcs=None, shape=None, active_only=True, verbose=True): """ Create tangent matrix graph, i.e. preallocate and initialize the sparse storage needed for the tangent matrix. Order of DOF connectivities is not important. Parameters ---------- any_dof_conn : bool By default, only volume DOF connectivities are used, with the exception of trace surface DOF connectivities. If True, any kind of DOF connectivities is allowed. rdcs, cdcs : arrays, optional Additional row and column DOF connectivities, corresponding to the variables used in the equations. shape : tuple, optional The required shape, if it is different from the shape determined by the equations variables. This may be needed if additional row and column DOF connectivities are passed in. active_only : bool If True, the matrix graph has reduced size and is created with the reduced (active DOFs only) numbering. verbose : bool If False, reduce verbosity. Returns ------- matrix : csr_matrix The matrix graph in the form of a CSR matrix with preallocated structure and zero data. """ if not self.variables.has_virtuals(): output('no matrix (no test variables)!') return None shape = get_default(shape, self.variables.get_matrix_shape()) output('matrix shape:', shape, verbose=verbose) if nm.prod(shape) == 0: output('no matrix (zero size)!') return None rdcs, cdcs = self.get_graph_conns(any_dof_conn=any_dof_conn, rdcs=rdcs, cdcs=cdcs, active_only=active_only) if not len(rdcs): output('no matrix (empty dof connectivities)!') return None output('assembling matrix graph...', verbose=verbose) tt = time.clock() nnz, prow, icol = create_mesh_graph(shape[0], shape[1], len(rdcs), rdcs, cdcs) output('...done in %.2f s' % (time.clock() - tt), verbose=verbose) output('matrix structural nonzeros: %d (%.2e%% fill)' \ % (nnz, float(nnz) / nm.prod(shape)), verbose=verbose) data = nm.zeros((nnz, ), dtype=self.variables.dtype) matrix = sp.csr_matrix((data, icol, prow), shape) return matrix def init_time(self, ts): pass def advance(self, ts): for eq in self: for term in eq.terms: term.advance(ts) self.variables.advance(ts) ## # Interface to self.variables. def create_state_vector(self): return self.variables.create_state_vector() def create_stripped_state_vector(self): return self.variables.create_stripped_state_vector() def strip_state_vector(self, vec, follow_epbc=False): """ Strip a full vector by removing EBC dofs. Notes ----- If 'follow_epbc' is True, values of EPBC master dofs are not simply thrown away, but added to the corresponding slave dofs, just like when assembling. For vectors with state (unknown) variables it should be set to False, for assembled vectors it should be set to True. """ return self.variables.strip_state_vector(vec, follow_epbc=follow_epbc) def make_full_vec(self, svec, force_value=None): """ Make a full DOF vector satisfying E(P)BCs from a reduced DOF vector. """ return self.variables.make_full_vec(svec, force_value) def set_variables_from_state(self, vec, step=0): """ Set data (vectors of DOF values) of variables. Parameters ---------- data : array The state vector. step : int The time history step, 0 (default) = current. """ self.variables.set_data(vec, step=step) def get_state_parts(self, vec=None): """ Return parts of a state vector corresponding to individual state variables. Parameters ---------- vec : array, optional The state vector. If not given, then the data stored in the variables are returned instead. Returns ------- out : dict The dictionary of the state parts. """ return self.variables.get_state_parts(vec) def set_data(self, data, step=0, ignore_unknown=False): """ Set data (vectors of DOF values) of variables. Parameters ---------- data : array The dictionary of {variable_name : data vector}. step : int, optional The time history step, 0 (default) = current. ignore_unknown : bool, optional Ignore unknown variable names if `data` is a dict. """ self.variables.set_data(data, step=step, ignore_unknown=ignore_unknown) def apply_ebc(self, vec, force_values=None): """ Apply essential (Dirichlet) boundary conditions to a state vector. """ self.variables.apply_ebc(vec, force_values=force_values) def apply_ic(self, vec, force_values=None): """ Apply initial conditions to a state vector. """ self.variables.apply_ic(vec, force_values=force_values) def state_to_output(self, vec, fill_value=None, var_info=None, extend=True): return self.variables.state_to_output(vec, fill_value=fill_value, var_info=var_info, extend=extend) def get_lcbc_operator(self): return self.variables.get_lcbc_operator() def evaluate(self, names=None, mode='eval', dw_mode='vector', term_mode=None, asm_obj=None): """ Evaluate the equations. Parameters ---------- mode : one of 'eval', 'el_avg', 'qp', 'weak' The evaluation mode. names : str or sequence of str, optional Evaluate only equations of the given name(s). Returns ------- out : dict or result The evaluation result. In 'weak' mode it is the `asm_obj`. Otherwise, it is a dict of results with equation names as keys or a single result for a single equation. """ if names is None: eqs = self single = (len(eqs) == 1) else: single = isinstance(names, str) if single: names = [names] eqs = [self[eq] for eq in names] if mode == 'weak': for eq in eqs: eq.evaluate(mode=mode, dw_mode=dw_mode, term_mode=term_mode, asm_obj=asm_obj) out = asm_obj else: out = {} for eq in eqs: eout = eq.evaluate(mode=mode, dw_mode=dw_mode, term_mode=term_mode) out[eq.name] = eout if single: out = out.popitem()[1] return out def eval_residuals(self, state, by_blocks=False, names=None): """ Evaluate (assemble) residual vectors. Parameters ---------- state : array The vector of DOF values. Note that it is needed only in nonlinear terms. by_blocks : bool If True, return the individual blocks composing the whole residual vector. Each equation should then correspond to one required block and should be named as `'block_name, test_variable_name, unknown_variable_name'`. names : list of str, optional Optionally, select only blocks with the given `names`, if `by_blocks` is True. Returns ------- out : array or dict of array The assembled residual vector. If `by_blocks` is True, a dictionary is returned instead, with keys given by `block_name` part of the individual equation names. """ self.set_variables_from_state(state) if by_blocks: names = get_default(names, self.names) out = {} get_indx = self.variables.get_indx for name in names: eq = self[name] key, rname, cname = [aux.strip() for aux in name.split(',')] ir = get_indx(rname, stripped=True, allow_dual=True) residual = self.create_stripped_state_vector() eq.evaluate(mode='weak', dw_mode='vector', asm_obj=residual) out[key] = residual[ir] else: out = self.create_stripped_state_vector() self.evaluate(mode='weak', dw_mode='vector', asm_obj=out) return out def eval_tangent_matrices(self, state, tangent_matrix, by_blocks=False, names=None): """ Evaluate (assemble) tangent matrices. Parameters ---------- state : array The vector of DOF values. Note that it is needed only in nonlinear terms. tangent_matrix : csr_matrix The preallocated CSR matrix with zero data. by_blocks : bool If True, return the individual blocks composing the whole matrix. Each equation should then correspond to one required block and should be named as `'block_name, test_variable_name, unknown_variable_name'`. names : list of str, optional Optionally, select only blocks with the given `names`, if `by_blocks` is True. Returns ------- out : csr_matrix or dict of csr_matrix The assembled matrix. If `by_blocks` is True, a dictionary is returned instead, with keys given by `block_name` part of the individual equation names. """ self.set_variables_from_state(state) if by_blocks: names = get_default(names, self.names) out = {} get_indx = self.variables.get_indx for name in names: eq = self[name] key, rname, cname = [aux.strip() for aux in eq.name.split(',')] ir = get_indx(rname, stripped=True, allow_dual=True) ic = get_indx(cname, stripped=True, allow_dual=True) tangent_matrix.data[:] = 0.0 eq.evaluate(mode='weak', dw_mode='matrix', asm_obj=tangent_matrix) out[key] = tangent_matrix[ir, ic] else: tangent_matrix.data[:] = 0.0 self.evaluate(mode='weak', dw_mode='matrix', asm_obj=tangent_matrix) out = tangent_matrix return out
def make_term_args(arg_shapes, arg_kinds, arg_types, ats_mode, domain, material_value=None, poly_space_base=None): from sfepy.base.base import basestr from sfepy.discrete import FieldVariable, Material, Variables, Materials from sfepy.discrete.fem import Field from sfepy.solvers.ts import TimeStepper from sfepy.mechanics.tensors import dim2sym omega = domain.regions['Omega'] dim = domain.shape.dim sym = dim2sym(dim) def _parse_scalar_shape(sh): if isinstance(sh, basestr): if sh == 'D': return dim elif sh == 'S': return sym elif sh == 'N': # General number ;) return 1 else: return int(sh) else: return sh def _parse_tuple_shape(sh): if isinstance(sh, basestr): return [_parse_scalar_shape(ii.strip()) for ii in sh.split(',')] else: return (int(sh), ) args = {} str_args = [] materials = [] variables = [] for ii, arg_kind in enumerate(arg_kinds): if arg_kind != 'ts': if ats_mode is not None: extended_ats = arg_types[ii] + ('/%s' % ats_mode) else: extended_ats = arg_types[ii] try: sh = arg_shapes[arg_types[ii]] except KeyError: sh = arg_shapes[extended_ats] if arg_kind.endswith('variable'): shape = _parse_scalar_shape(sh[0] if isinstance(sh, tuple) else sh) field = Field.from_args('f%d' % ii, nm.float64, shape, omega, approx_order=1, poly_space_base=poly_space_base) if arg_kind == 'virtual_variable': if sh[1] is not None: istate = arg_types.index(sh[1]) else: # Only virtual variable in arguments. istate = -1 # -> Make fake variable. var = FieldVariable('u-1', 'unknown', field) var.set_constant(0.0) variables.append(var) var = FieldVariable('v', 'test', field, primary_var_name='u%d' % istate) elif arg_kind == 'state_variable': var = FieldVariable('u%d' % ii, 'unknown', field) var.set_constant(0.0) elif arg_kind == 'parameter_variable': var = FieldVariable('p%d' % ii, 'parameter', field, primary_var_name='(set-to-None)') var.set_constant(0.0) variables.append(var) str_args.append(var.name) args[var.name] = var elif arg_kind.endswith('material'): if sh is None: # Switched-off opt_material. continue prefix = '' if isinstance(sh, basestr): aux = sh.split(':') if len(aux) == 2: prefix, sh = aux if material_value is None: material_value = 1.0 shape = _parse_tuple_shape(sh) if (len(shape) > 1) or (shape[0] > 1): if ((len(shape) == 2) and (shape[0] == shape[1]) and (material_value != 0.0)): # Identity matrix. val = nm.eye(shape[0], dtype=nm.float64) else: # Array. val = nm.empty(shape, dtype=nm.float64) val.fill(material_value) values = {'%sc%d' % (prefix, ii): val} elif (len(shape) == 1) and (shape[0] == 1): # Single scalar as a special value. values = {'.c%d' % ii: material_value} else: raise ValueError('wrong material shape! (%s)' % shape) mat = Material('m%d' % ii, values=values) materials.append(mat) str_args.append(mat.name + '.' + 'c%d' % ii) args[mat.name] = mat elif arg_kind == 'ts': ts = TimeStepper(0.0, 1.0, 1.0, 5) str_args.append('ts') args['ts'] = ts else: str_args.append('user%d' % ii) args[str_args[-1]] = None materials = Materials(materials) variables = Variables(variables) return args, str_args, materials, variables
class Equations(Container): @staticmethod def from_conf(conf, variables, regions, materials, integrals, user=None, verbose=True): objs = OneTypeList(Equation) conf = copy(conf) ii = 0 for name, desc in conf.iteritems(): if verbose: output('equation "%s":' % name) output(desc) eq = Equation.from_desc(name, desc, variables, regions, materials, integrals, user=user) objs.append(eq) ii += 1 obj = Equations(objs) return obj def __init__(self, equations): Container.__init__(self, equations) self.variables = Variables(self.collect_variables()) self.materials = Materials(self.collect_materials()) self.domain = self.get_domain() self.active_bcs = set() self.collect_conn_info() def create_subequations(self, var_names, known_var_names=None): """ Create sub-equations containing only terms with the given virtual variables. Parameters ---------- var_names : list The list of names of virtual variables. known_var_names : list The list of names of (already) known state variables. Returns ------- subequations : Equations instance The sub-equations. """ from sfepy.discrete import FieldVariable known_var_names = get_default(known_var_names, []) objs = [] for iv, var_name in enumerate(var_names): terms = [term.copy(name=term.name) for eq in self for term in eq.terms if term.get_virtual_name() == var_name] # Make parameter variables from known state variables in terms # arguments. for known_name in known_var_names: for term in terms: if known_name in term.arg_names: ii = term.arg_names.index(known_name) state = self.variables[known_name] par = FieldVariable(known_name, 'parameter', state.field, primary_var_name='(set-to-None)') term.args[ii] = par term._kwargs[known_name] = par par.set_data(state()) new_terms = Terms(terms) objs.append(Equation('eq_%d' % iv, new_terms)) subequations = Equations(objs) return subequations def get_domain(self): domain = None for eq in self: for term in eq.terms: if term.has_region: domain = term.region.domain return domain def collect_materials(self): """ Collect materials present in the terms of all equations. """ materials = [] for eq in self: materials.extend(eq.collect_materials()) # Make the list items unique. materials = list(set(materials)) return materials def reset_materials(self): """ Clear material data so that next materials.time_update() is performed even for stationary materials. """ self.materials.reset() def collect_variables(self): """ Collect variables present in the terms of all equations. """ variables = [] for eq in self: variables.extend(eq.collect_variables()) # Make the list items unique. variables = list(set(variables)) return variables def get_variable(self, name): var = self.variables.get(name, msg_if_none='unknown variable! (%s)' % name) return var def collect_conn_info(self): """ Collect connectivity information as defined by the equations. """ self.conn_info = {} for eq in self: eq.collect_conn_info(self.conn_info) return self.conn_info def get_variable_names(self): """ Return the list of names of all variables used in equations. """ vns = set() for eq in self: for term in eq.terms: vns.update(term.get_variable_names()) return list(vns) def get_variable_dependencies(self): """ For each virtual variable get names of state/parameter variables that are present in terms with that virtual variable. The virtual variables define the actual equations and their dependencies define the variables needed to evaluate the equations. Returns ------- deps : dict The dependencies as a dictionary with virtual variable names as keys and sets of state/parameter variables as values. """ deps = {} for eq in self: for term in eq.terms: dep_list = deps.setdefault(term.get_virtual_name(), set()) dep_list.update(term.get_state_names()) return deps def invalidate_term_caches(self): """ Invalidate evaluate caches of variables present in equations. """ for var in self.variables: var.invalidate_evaluate_cache() def print_terms(self): """ Print names of equations and their terms. """ output('equations:') for eq in self: output(' %s:' % eq.name) for term in eq.terms: output(' %+.2e * %s.%d.%s(%s)' % (term.sign, term.name, term.integral.order, term.region.name, term.arg_str)) def time_update(self, ts, ebcs=None, epbcs=None, lcbcs=None, functions=None, problem=None, active_only=True, verbose=True): """ Update the equations for current time step. The update involves creating the mapping of active DOFs from/to all DOFs for all state variables, the setup of linear combination boundary conditions operators and the setup of active DOF connectivities. Parameters ---------- ts : TimeStepper instance The time stepper. ebcs : Conditions instance, optional The essential (Dirichlet) boundary conditions. epbcs : Conditions instance, optional The periodic boundary conditions. lcbcs : Conditions instance, optional The linear combination boundary conditions. functions : Functions instance, optional The user functions for boundary conditions, materials, etc. problem : Problem instance, optional The problem that can be passed to user functions as a context. active_only : bool If True, the active DOF connectivities and matrix graph have reduced size and are created with the reduced (active DOFs only) numbering. verbose : bool If False, reduce verbosity. Returns ------- graph_changed : bool The flag set to True if the current time step set of active boundary conditions differs from the set of the previous time step. """ self.variables.time_update(ts, functions, verbose=verbose) active_bcs = self.variables.equation_mapping(ebcs, epbcs, ts, functions, problem=problem, active_only=active_only) graph_changed = active_only and (active_bcs != self.active_bcs) self.active_bcs = active_bcs if graph_changed or not self.variables.adof_conns: adcs = create_adof_conns(self.conn_info, self.variables.adi.indx, active_only=active_only) self.variables.set_adof_conns(adcs) self.variables.setup_lcbc_operators(lcbcs, ts, functions) for eq in self: for term in eq.terms: term.time_update(ts) return graph_changed def time_update_materials(self, ts, mode='normal', problem=None, verbose=True): """ Update data materials for current time and possibly also state. Parameters ---------- ts : TimeStepper instance The time stepper. mode : 'normal', 'update' or 'force' The update mode, see :func:`sfepy.discrete.materials.Material.time_update()`. problem : Problem instance, optional The problem that can be passed to user functions as a context. verbose : bool If False, reduce verbosity. """ self.materials.time_update(ts, self, mode=mode, problem=problem, verbose=verbose) def setup_initial_conditions(self, ics, functions=None): self.variables.setup_initial_conditions(ics, functions) def get_graph_conns(self, any_dof_conn=False, rdcs=None, cdcs=None, active_only=True): """ Get DOF connectivities needed for creating tangent matrix graph. Parameters ---------- any_dof_conn : bool By default, only volume DOF connectivities are used, with the exception of trace surface DOF connectivities. If True, any kind of DOF connectivities is allowed. rdcs, cdcs : arrays, optional Additional row and column DOF connectivities, corresponding to the variables used in the equations. active_only : bool If True, the active DOF connectivities have reduced size and are created with the reduced (active DOFs only) numbering. Returns ------- rdcs, cdcs : arrays The row and column DOF connectivities defining the matrix graph blocks. """ if rdcs is None: rdcs = [] cdcs = [] elif cdcs is None: cdcs = copy(rdcs) else: assert_(len(rdcs) == len(cdcs)) if rdcs is cdcs: # Make sure the lists are not the same object. rdcs = copy(rdcs) adcs = self.variables.adof_conns # Only volume dof connectivities are used, with the exception of trace # surface dof connectivities. shared = set() for key, ii, info in iter_dict_of_lists(self.conn_info, return_keys=True): rvar, cvar = info.virtual, info.state if (rvar is None) or (cvar is None): continue is_surface = rvar.is_surface or cvar.is_surface dct = info.dc_type.type if not (dct in ('volume', 'scalar', 'custom') or is_surface or info.is_trace or any_dof_conn): continue rreg_name = info.get_region_name(can_trace=False) creg_name = info.get_region_name() rname = rvar.get_primary_name() rkey = (rname, rreg_name, dct, False) ckey = (cvar.name, creg_name, dct, info.is_trace) dc_key = (rkey, ckey) if not dc_key in shared: rdc = adcs[rkey] cdc = adcs[ckey] if not active_only: ii = nm.where(rdc < 0) rdc = rdc.copy() rdc[ii] = -1 - rdc[ii] ii = nm.where(cdc < 0) cdc = cdc.copy() cdc[ii] = -1 - cdc[ii] rdcs.append(rdc) cdcs.append(cdc) shared.add(dc_key) return rdcs, cdcs def create_matrix_graph(self, any_dof_conn=False, rdcs=None, cdcs=None, shape=None, active_only=True, verbose=True): """ Create tangent matrix graph, i.e. preallocate and initialize the sparse storage needed for the tangent matrix. Order of DOF connectivities is not important. Parameters ---------- any_dof_conn : bool By default, only volume DOF connectivities are used, with the exception of trace surface DOF connectivities. If True, any kind of DOF connectivities is allowed. rdcs, cdcs : arrays, optional Additional row and column DOF connectivities, corresponding to the variables used in the equations. shape : tuple, optional The required shape, if it is different from the shape determined by the equations variables. This may be needed if additional row and column DOF connectivities are passed in. active_only : bool If True, the matrix graph has reduced size and is created with the reduced (active DOFs only) numbering. verbose : bool If False, reduce verbosity. Returns ------- matrix : csr_matrix The matrix graph in the form of a CSR matrix with preallocated structure and zero data. """ if not self.variables.has_virtuals(): output('no matrix (no test variables)!') return None shape = get_default(shape, self.variables.get_matrix_shape()) output('matrix shape:', shape, verbose=verbose) if nm.prod(shape) == 0: output('no matrix (zero size)!') return None rdcs, cdcs = self.get_graph_conns(any_dof_conn=any_dof_conn, rdcs=rdcs, cdcs=cdcs, active_only=active_only) if not len(rdcs): output('no matrix (empty dof connectivities)!') return None output('assembling matrix graph...', verbose=verbose) tt = time.clock() nnz, prow, icol = create_mesh_graph(shape[0], shape[1], len(rdcs), rdcs, cdcs) output('...done in %.2f s' % (time.clock() - tt), verbose=verbose) output('matrix structural nonzeros: %d (%.2e%% fill)' \ % (nnz, float(nnz) / nm.prod(shape)), verbose=verbose) data = nm.zeros((nnz,), dtype=self.variables.dtype) matrix = sp.csr_matrix((data, icol, prow), shape) return matrix def init_time(self, ts): pass def advance(self, ts): for eq in self: for term in eq.terms: term.advance(ts) self.variables.advance(ts) ## # Interface to self.variables. def create_state_vector(self): return self.variables.create_state_vector() def create_stripped_state_vector(self): return self.variables.create_stripped_state_vector() def strip_state_vector(self, vec, follow_epbc=False): """ Strip a full vector by removing EBC dofs. Notes ----- If 'follow_epbc' is True, values of EPBC master dofs are not simply thrown away, but added to the corresponding slave dofs, just like when assembling. For vectors with state (unknown) variables it should be set to False, for assembled vectors it should be set to True. """ return self.variables.strip_state_vector(vec, follow_epbc=follow_epbc) def make_full_vec(self, svec, force_value=None): """ Make a full DOF vector satisfying E(P)BCs from a reduced DOF vector. """ return self.variables.make_full_vec(svec, force_value) def set_variables_from_state(self, vec, step=0): """ Set data (vectors of DOF values) of variables. Parameters ---------- data : array The state vector. step : int The time history step, 0 (default) = current. """ self.variables.set_data(vec, step=step) def get_state_parts(self, vec=None): """ Return parts of a state vector corresponding to individual state variables. Parameters ---------- vec : array, optional The state vector. If not given, then the data stored in the variables are returned instead. Returns ------- out : dict The dictionary of the state parts. """ return self.variables.get_state_parts(vec) def set_data(self, data, step=0, ignore_unknown=False): """ Set data (vectors of DOF values) of variables. Parameters ---------- data : array The dictionary of {variable_name : data vector}. step : int, optional The time history step, 0 (default) = current. ignore_unknown : bool, optional Ignore unknown variable names if `data` is a dict. """ self.variables.set_data(data, step=step, ignore_unknown=ignore_unknown) def apply_ebc(self, vec, force_values=None): """ Apply essential (Dirichlet) boundary conditions to a state vector. """ self.variables.apply_ebc(vec, force_values=force_values) def apply_ic(self, vec, force_values=None): """ Apply initial conditions to a state vector. """ self.variables.apply_ic(vec, force_values=force_values) def state_to_output(self, vec, fill_value=None, var_info=None, extend=True): return self.variables.state_to_output(vec, fill_value=fill_value, var_info=var_info, extend=extend) def get_lcbc_operator(self): return self.variables.get_lcbc_operator() def evaluate(self, names=None, mode='eval', dw_mode='vector', term_mode=None, asm_obj=None): """ Evaluate the equations. Parameters ---------- mode : one of 'eval', 'el_avg', 'qp', 'weak' The evaluation mode. names : str or sequence of str, optional Evaluate only equations of the given name(s). Returns ------- out : dict or result The evaluation result. In 'weak' mode it is the `asm_obj`. Otherwise, it is a dict of results with equation names as keys or a single result for a single equation. """ if names is None: eqs = self single = (len(eqs) == 1) else: single = isinstance(names, str) if single: names = [names] eqs = [self[eq] for eq in names] if mode == 'weak': for eq in eqs: eq.evaluate(mode=mode, dw_mode=dw_mode, term_mode=term_mode, asm_obj=asm_obj) out = asm_obj else: out = {} for eq in eqs: eout = eq.evaluate(mode=mode, dw_mode=dw_mode, term_mode=term_mode) out[eq.name] = eout if single: out = out.popitem()[1] return out def eval_residuals(self, state, by_blocks=False, names=None): """ Evaluate (assemble) residual vectors. Parameters ---------- state : array The vector of DOF values. Note that it is needed only in nonlinear terms. by_blocks : bool If True, return the individual blocks composing the whole residual vector. Each equation should then correspond to one required block and should be named as `'block_name, test_variable_name, unknown_variable_name'`. names : list of str, optional Optionally, select only blocks with the given `names`, if `by_blocks` is True. Returns ------- out : array or dict of array The assembled residual vector. If `by_blocks` is True, a dictionary is returned instead, with keys given by `block_name` part of the individual equation names. """ self.set_variables_from_state(state) if by_blocks: names = get_default(names, self.names) out = {} get_indx = self.variables.get_indx for name in names: eq = self[name] key, rname, cname = [aux.strip() for aux in name.split(',')] ir = get_indx(rname, stripped=True, allow_dual=True) residual = self.create_stripped_state_vector() eq.evaluate(mode='weak', dw_mode='vector', asm_obj=residual) out[key] = residual[ir] else: out = self.create_stripped_state_vector() self.evaluate(mode='weak', dw_mode='vector', asm_obj=out) return out def eval_tangent_matrices(self, state, tangent_matrix, by_blocks=False, names=None): """ Evaluate (assemble) tangent matrices. Parameters ---------- state : array The vector of DOF values. Note that it is needed only in nonlinear terms. tangent_matrix : csr_matrix The preallocated CSR matrix with zero data. by_blocks : bool If True, return the individual blocks composing the whole matrix. Each equation should then correspond to one required block and should be named as `'block_name, test_variable_name, unknown_variable_name'`. names : list of str, optional Optionally, select only blocks with the given `names`, if `by_blocks` is True. Returns ------- out : csr_matrix or dict of csr_matrix The assembled matrix. If `by_blocks` is True, a dictionary is returned instead, with keys given by `block_name` part of the individual equation names. """ self.set_variables_from_state(state) if by_blocks: names = get_default(names, self.names) out = {} get_indx = self.variables.get_indx for name in names: eq = self[name] key, rname, cname = [aux.strip() for aux in eq.name.split(',')] ir = get_indx(rname, stripped=True, allow_dual=True) ic = get_indx(cname, stripped=True, allow_dual=True) tangent_matrix.data[:] = 0.0 eq.evaluate(mode='weak', dw_mode='matrix', asm_obj=tangent_matrix) out[key] = tangent_matrix[ir, ic] else: tangent_matrix.data[:] = 0.0 self.evaluate(mode='weak', dw_mode='matrix', asm_obj=tangent_matrix) out = tangent_matrix return out