def get_evaluate_cache(self, cache=None, share_geometry=False, verbose=False): """ Get the evaluate cache for :func:`Variable.evaluate_at() <sfepy.discrete.variables.Variable.evaluate_at()>`. Parameters ---------- cache : Struct instance, optional Optionally, use the provided instance to store the cache data. share_geometry : bool Set to True to indicate that all the evaluations will work on the same region. Certain data are then computed only for the first probe and cached. verbose : bool If False, reduce verbosity. Returns ------- cache : Struct instance The evaluate cache. """ import time try: from scipy.spatial import cKDTree as KDTree except ImportError: from scipy.spatial import KDTree from sfepy.discrete.fem.geometry_element import create_geometry_elements if cache is None: cache = Struct(name='evaluate_cache') tt = time.clock() if (cache.get('cmesh', None) is None) or not share_geometry: mesh = self.create_mesh(extra_nodes=False) cache.cmesh = cmesh = mesh.cmesh gels = create_geometry_elements() cmesh.set_local_entities(gels) cmesh.setup_entities() cache.centroids = cmesh.get_centroids(cmesh.tdim) if self.gel.name != '3_8': cache.normals0 = cmesh.get_facet_normals() cache.normals1 = None else: cache.normals0 = cmesh.get_facet_normals(0) cache.normals1 = cmesh.get_facet_normals(1) output('cmesh setup: %f s' % (time.clock()-tt), verbose=verbose) tt = time.clock() if (cache.get('kdtree', None) is None) or not share_geometry: cache.kdtree = KDTree(cmesh.coors) output('kdtree: %f s' % (time.clock()-tt), verbose=verbose) return cache
def __init__(self, filename, approx, region_selects, mat_pars, options, evp_options, eigenmomenta_options, band_gaps_options, coefs_save_name='coefs', corrs_save_names=None, incwd=None, output_dir=None, **kwargs): Struct.__init__(self, approx=approx, region_selects=region_selects, mat_pars=mat_pars, options=options, evp_options=evp_options, eigenmomenta_options=eigenmomenta_options, band_gaps_options=band_gaps_options, **kwargs) self.incwd = get_default(incwd, lambda x: x) self.conf = Struct() self.conf.filename_mesh = self.incwd(filename) output_dir = get_default(output_dir, self.incwd('output')) default = {'evp': 'evp', 'corrs_rs': 'corrs_rs'} self.corrs_save_names = get_default(corrs_save_names, default) io = MeshIO.any_from_filename(self.conf.filename_mesh) self.bbox, self.dim = io.read_bounding_box(ret_dim=True) rpc_axes = nm.eye(self.dim, dtype=nm.float64) \ * (self.bbox[1] - self.bbox[0]) self.conf.options = options self.conf.options.update({ 'output_dir': output_dir, 'volume': { 'value': get_lattice_volume(rpc_axes), }, 'coefs': 'coefs', 'requirements': 'requirements', 'coefs_filename': coefs_save_name, }) self.conf.mat_pars = mat_pars self.conf.solvers = self.define_solvers() self.conf.regions = self.define_regions() self.conf.materials = self.define_materials() self.conf.fields = self.define_fields() self.conf.variables = self.define_variables() (self.conf.ebcs, self.conf.epbcs, self.conf.lcbcs, self.all_periodic) = self.define_bcs() self.conf.functions = self.define_functions() self.conf.integrals = self.define_integrals() self.equations, self.expr_coefs = self.define_equations() self.conf.coefs = self.define_coefs() self.conf.requirements = self.define_requirements()
def solve_pressure_eigenproblem(self, mtx, eig_problem=None, n_eigs=0, check=False): """G = B*AI*BT or B*AI*BT+D""" def get_slice(n_eigs, nn): if n_eigs > 0: ii = slice(0, n_eigs) elif n_eigs < 0: ii = slice(nn + n_eigs, nn) else: ii = slice(0, 0) return ii eig_problem = get_default(eig_problem, self.eig_problem) n_eigs = get_default(n_eigs, self.n_eigs) check = get_default(check, self.check) mtx_c, mtx_b, action_aibt = mtx['C'], mtx['B'], mtx['action_aibt'] mtx_g = mtx_b * action_aibt.to_array() # mtx_b must be sparse! if eig_problem == 'B*AI*BT+D': mtx_g += mtx['D'].toarray() mtx['G'] = mtx_g output(mtx_c.shape, mtx_g.shape) eigs, mtx_q = eig(mtx_c.toarray(), mtx_g, method='eig.sgscipy') if check: ee = nm.diag(sc.dot(mtx_q.T * mtx_c, mtx_q)).squeeze() oo = nm.diag(sc.dot(sc.dot(mtx_q.T, mtx_g), mtx_q)).squeeze() try: assert_(nm.allclose(ee, eigs)) assert_(nm.allclose(oo, nm.ones_like(eigs))) except ValueError: debug() nn = mtx_c.shape[0] if isinstance(n_eigs, tuple): output('required number of eigenvalues: (%d, %d)' % n_eigs) if sum(n_eigs) < nn: ii0 = get_slice(n_eigs[0], nn) ii1 = get_slice(-n_eigs[1], nn) eigs = nm.concatenate((eigs[ii0], eigs[ii1])) mtx_q = nm.concatenate((mtx_q[:, ii0], mtx_q[:, ii1]), 1) else: output('required number of eigenvalues: %d' % n_eigs) if (n_eigs != 0) and (abs(n_eigs) < nn): ii = get_slice(n_eigs, nn) eigs = eigs[ii] mtx_q = mtx_q[:, ii] ## from sfepy.base.plotutils import pylab, iplot ## pylab.semilogy(eigs) ## pylab.figure(2) ## iplot(eigs) ## pylab.show() ## debug() out = Struct(eigs=eigs, mtx_q=mtx_q) return out
def classify_args(self): """ Classify types of the term arguments and find matching call signature. A state variable can be in place of a parameter variable and vice versa. """ self.names = Struct(name='arg_names', material=[], variable=[], user=[], state=[], virtual=[], parameter=[]) # Prepare for 'opt_material' - just prepend a None argument if needed. if isinstance(self.arg_types[0], tuple): arg_types = self.arg_types[0] else: arg_types = self.arg_types if len(arg_types) == (len(self.args) + 1): self.args.insert(0, (None, None)) self.arg_names.insert(0, (None, None)) if isinstance(self.arg_types[0], tuple): assert_(len(self.modes) == len(self.arg_types)) # Find matching call signature using variable arguments - material # and user arguments are ignored! matched = [] for it, arg_types in enumerate(self.arg_types): arg_kinds = get_arg_kinds(arg_types) if self._check_variables(arg_kinds): matched.append((it, arg_kinds)) if len(matched) == 1: i_match, arg_kinds = matched[0] arg_types = self.arg_types[i_match] self.mode = self.modes[i_match] elif len(matched) == 0: msg = 'cannot match arguments! (%s)' % self.arg_names raise ValueError(msg) else: msg = 'ambiguous arguments! (%s)' % self.arg_names raise ValueError(msg) else: arg_types = self.arg_types arg_kinds = get_arg_kinds(self.arg_types) self.mode = Struct.get(self, 'mode', None) if not self._check_variables(arg_kinds): raise ValueError('cannot match variables! (%s)' % self.arg_names) # Set actual argument types. self.ats = list(arg_types) for ii, arg_kind in enumerate(arg_kinds): name = self.arg_names[ii] if arg_kind.endswith('variable'): names = self.names.variable if arg_kind == 'virtual_variable': self.names.virtual.append(name) elif arg_kind == 'state_variable': self.names.state.append(name) elif arg_kind == 'parameter_variable': self.names.parameter.append(name) elif arg_kind.endswith('material'): names = self.names.material else: names = self.names.user names.append(name) self.n_virtual = len(self.names.virtual) if self.n_virtual > 1: raise ValueError('at most one virtual variable is allowed! (%d)' % self.n_virtual) self.set_arg_types() self.setup_integration()
output_dir = incwd('output/band_gaps') # aluminium, in 1e+10 Pa D_m = get_pars(2, 5.898, 2.681) density_m = 0.2799 # in 1e4 kg/m3 # epoxy, in 1e+10 Pa D_c = get_pars(2, 0.1798, 0.148) density_c = 0.1142 # in 1e4 kg/m3 mat_pars = Coefficients(D_m=D_m, density_m=density_m, D_c=D_c, density_c=density_c) region_selects = Struct(matrix='cells of group 1', inclusion='cells of group 2') corrs_save_names = {'evp': 'evp', 'corrs_rs': 'corrs_rs'} options = { 'plot_transform_angle': None, 'plot_transform_wave': ('clip_sqrt', (0, 30)), 'plot_transform': ('normalize', (-2, 2)), 'fig_name': 'band_gaps', 'fig_name_angle': 'band_gaps_angle', 'fig_name_wave': 'band_gaps_wave', 'fig_suffix': '.pdf', 'coefs_filename': 'coefs.txt', 'incident_wave_dir': [1.0, 1.0], 'plot_options': { 'show': True,
def process_conf(conf, kwargs): """ Missing items are set to default values. Example configuration, all items:: solver_1 = { 'name' : 'oseen', 'kind' : 'nls.oseen', 'needs_problem_instance' : True, 'stabil_mat' : 'stabil', 'adimensionalize' : False, 'check_navier_stokes_rezidual' : False, 'i_max' : 10, 'eps_a' : 1e-8, 'eps_r' : 1.0, 'macheps' : 1e-16, 'lin_red' : 1e-2, # Linear system error < (eps_a * lin_red). 'is_plot' : False, 'log' : {'text' : 'oseen_log.txt', 'plot' : 'oseen_log.png'}, } """ get = make_get_conf(conf, kwargs) common = NonlinearSolver.process_conf(conf) # Compulsory. needs_problem_instance = get('needs_problem_instance', True) if not needs_problem_instance: msg = 'set solver option "needs_problem_instance" to True!' raise ValueError(msg) stabil_mat = get('stabil_mat', None, 'missing "stabil_mat" in options!') # With defaults. adimensionalize = get('adimensionalize', False) if adimensionalize: raise NotImplementedError check = get('check_navier_stokes_rezidual', False) log = get_logging_conf(conf) log = Struct(name='log_conf', **log) is_any_log = (log.text is not None) or (log.plot is not None) return Struct(needs_problem_instance=needs_problem_instance, stabil_mat=stabil_mat, adimensionalize=adimensionalize, check_navier_stokes_rezidual=check, i_max=get('i_max', 1), eps_a=get('eps_a', 1e-10), eps_r=get('eps_r', 1.0), macheps=get('macheps', nm.finfo(nm.float64).eps), lin_red=get('lin_red', 1.0), lin_precision=get('lin_precision', None), is_plot=get('is_plot', False), log=log, is_any_log=is_any_log) + common
filename = data_dir + '/meshes/2d/special/circle_in_square.mesh' output_dir = incwd('output/band_gaps') # aluminium, in 1e+10 Pa D_m = get_pars(2, 5.898, 2.681) density_m = 0.2799 # in 1e4 kg/m3 # epoxy, in 1e+10 Pa D_c = get_pars(2, 0.1798, 0.148) density_c = 0.1142 # in 1e4 kg/m3 mat_pars = Coefficients(D_m=D_m, density_m=density_m, D_c=D_c, density_c=density_c) region_selects = Struct(matrix=('elements of group 1', {}), inclusion=('elements of group 2', {})) corrs_save_names = {'evp' : 'evp', 'corrs_rs' : 'corrs_rs'} options = { 'plot_transform_angle' : None, 'plot_transform_wave' : ('clip_sqrt', (0, 30)), 'plot_transform' : ('normalize', (-2, 2)), 'fig_name' : 'band_gaps', 'fig_name_angle' : 'band_gaps_angle', 'fig_name_wave' : 'band_gaps_wave', 'fig_suffix' : '.pdf', 'coefs_filename' : 'coefs.txt',
def get_homog_coefs_linear(ts, coor, mode, micro_filename=None, regenerate=False, coefs_filename=None): oprefix = output.prefix output.prefix = 'micro:' required, other = get_standard_keywords() required.remove('equations') conf = ProblemConf.from_file(micro_filename, required, other, verbose=False) if coefs_filename is None: coefs_filename = conf.options.get('coefs_filename', 'coefs') coefs_filename = op.join(conf.options.get('output_dir', '.'), coefs_filename) + '.h5' if not regenerate: if op.exists(coefs_filename): if not pt.is_hdf5_file(coefs_filename): regenerate = True else: regenerate = True if regenerate: options = Struct(output_filename_trunk=None) app = HomogenizationApp(conf, options, 'micro:') coefs = app() if type(coefs) is tuple: coefs = coefs[0] coefs.to_file_hdf5(coefs_filename) else: coefs = Coefficients.from_file_hdf5(coefs_filename) out = {} if mode == None: for key, val in six.iteritems(coefs.__dict__): out[key] = val elif mode == 'qp': for key, val in six.iteritems(coefs.__dict__): if type(val) == nm.ndarray or type(val) == nm.float64: out[key] = nm.tile(val, (coor.shape[0], 1, 1)) elif type(val) == dict: for key2, val2 in six.iteritems(val): if type(val2) == nm.ndarray or type(val2) == nm.float64: out[key+'_'+key2] = \ nm.tile(val2, (coor.shape[0], 1, 1)) else: out = None output.prefix = oprefix return out
def get_homog_coefs_nonlinear(ts, coor, mode, mtx_f=None, term=None, problem=None, iteration=None, **kwargs): if not (mode == 'qp'): return oprefix = output.prefix output.prefix = 'micro:' if not hasattr(problem, 'homogen_app'): required, other = get_standard_keywords() required.remove('equations') micro_file = problem.conf.options.micro_filename conf = ProblemConf.from_file(micro_file, required, other, verbose=False) options = Struct(output_filename_trunk=None) problem.homogen_app = HomogenizationApp(conf, options, 'micro:', n_micro=coor.shape[0], update_micro_coors=True) app = problem.homogen_app def_grad = mtx_f(problem, term) if callable(mtx_f) else mtx_f if hasattr(problem, 'def_grad_prev'): rel_def_grad = la.dot_sequences(def_grad, nm.linalg.inv(problem.def_grad_prev), 'AB') else: rel_def_grad = def_grad.copy() problem.def_grad_prev = def_grad.copy() app.setup_macro_deformation(rel_def_grad) coefs, deps = app(ret_all=True, itime=ts.step, iiter=iteration) if type(coefs) is tuple: coefs = coefs[0] out = {} for key, val in six.iteritems(coefs.__dict__): if isinstance(val, list): out[key] = nm.array(val) elif isinstance(val, dict): for key2, val2 in six.iteritems(val): out[key + '_' + key2] = nm.array(val2) for key in six.iterkeys(out): shape = out[key].shape if len(shape) == 1: out[key] = out[key].reshape(shape + (1, 1)) elif len(shape) == 2: out[key] = out[key].reshape(shape + (1, )) output.prefix = oprefix return out
def __init__(self, name, mesh, share_mesh=True, n_point=None, **kwargs): """ Parameters ---------- name : str The probe name, set automatically by the subclasses. mesh : Mesh instance The FE mesh where the variables to be probed are defined. share_mesh : bool Set to True to indicate that all the probes will work on the same mesh. Certain data are then computed only for the first probe and cached. n_point : int The (fixed) number of probe points, when positive. When non-positive, the number of points is adaptively increased starting from -n_point, until the neighboring point distance is less than the diameter of the elements enclosing the points. When None, it is set to -10. For additional parameters see the __init__() docstrings of the subclasses. Notes ----- If the mesh contains vertices that are not contained in any element, we shift coordinates of such vertices so that they never match in the nearest node search. """ Struct.__init__(self, name=name, mesh=mesh, **kwargs) self.set_n_point(n_point) self.options = Struct(close_limit=0.1, size_hint=None) self.is_refined = False tt = time.clock() if share_mesh: if Probe.cache.iconn is None: offsets, iconn = make_inverse_connectivity(mesh.conns, mesh.n_nod, ret_offsets=True) Probe.cache.iconn = iconn Probe.cache.offsets = offsets self.cache = Probe.cache else: offsets, iconn = make_inverse_connectivity(mesh.conns, mesh.n_nod, ret_offsets=True) self.cache = Struct(name='probe_cache', offsets=offsets, iconn=iconn, kdtree=None) output('iconn: %f s' % (time.clock() - tt)) i_bad = nm.where(nm.diff(self.cache.offsets) == 0)[0] if len(i_bad): bbox = mesh.get_bounding_box() mesh.coors[i_bad] = bbox[1] + bbox[1] - bbox[0] output('warning: some vertices are not in any element!') output('warning: vertex-based results will be wrong!') tt = time.clock() if share_mesh: if Probe.cache.kdtree is None: self.cache.kdtree = KDTree(mesh.coors) else: self.cache.kdtree = KDTree(mesh.coors) output('kdtree: %f s' % (time.clock() - tt))
def recovery_micro(pb, corrs, macro): eps0 = macro['eps0'] mesh = pb.domain.mesh regions = pb.domain.regions dim = mesh.dim Ymc_map = regions['Ymc'].get_entities(0) Ym_map = regions['Ym'].get_entities(0) # deformation u1, phi = 0, 0 for ii in range(2): u1 += corrs['corrs_k%d' % ii]['u'] * macro['phi'][ii] phi += corrs['corrs_k%d' % ii]['r'] * macro['phi'][ii] for ii in range(dim): for jj in range(dim): kk = coor_to_sym(ii, jj, dim) phi += corrs['corrs_rs']['r_%d%d' % (ii, jj)]\ * nm.expand_dims(macro['strain'][Ym_map, kk], axis=1) u1 += corrs['corrs_rs']['u_%d%d' % (ii, jj)]\ * nm.expand_dims(macro['strain'][Ymc_map, kk], axis=1) u = macro['u'][Ymc_map, :] + eps0 * u1 mvar = pb.create_variables(['u', 'r', 'svar']) e_mac_Ymc = [None] * macro['strain'].shape[1] for ii in range(dim): for jj in range(dim): kk = coor_to_sym(ii, jj, dim) mvar['svar'].set_data(macro['strain'][:, kk]) mac_e_Ymc = pb.evaluate('ev_volume_integrate.i2.Ymc(svar)', mode='el_avg', var_dict={'svar': mvar['svar']}) e_mac_Ymc[kk] = mac_e_Ymc.squeeze() e_mac_Ymc = nm.vstack(e_mac_Ymc).T[:, nm.newaxis, :, nm.newaxis] mvar['r'].set_data(phi) E_mic = pb.evaluate('ev_grad.i2.Ym(r)', mode='el_avg', var_dict={'r': mvar['r']}) / eps0 mvar['u'].set_data(u1) e_mic = pb.evaluate('ev_cauchy_strain.i2.Ymc(u)', mode='el_avg', var_dict={'u': mvar['u']}) e_mic += e_mac_Ymc out = { 'u0': (macro['u'][Ymc_map, :], 'u', 'p'), 'u': (u, 'u', 'p'), 'u1': (u1, 'u', 'p'), 'e_mic': (e_mic, 'u', 'c'), 'phi': (phi, 'r', 'p'), 'E_mic': (E_mic, 'r', 'c'), } out_struct = {} for k, v in out.items(): out_struct[k] = Struct(name='output_data', mode='cell' if v[2] == 'c' else 'vertex', data=v[0], var_name=v[1], dofs=None) return out_struct
class Probe(Struct): """ Base class for all point probes. Enforces two points minimum. """ cache = Struct(name='probe_shared_cache', offsets=None, iconn=None, kdtree=None) is_cyclic = False def __init__(self, name, mesh, share_mesh=True, n_point=None, **kwargs): """ Parameters ---------- name : str The probe name, set automatically by the subclasses. mesh : Mesh instance The FE mesh where the variables to be probed are defined. share_mesh : bool Set to True to indicate that all the probes will work on the same mesh. Certain data are then computed only for the first probe and cached. n_point : int The (fixed) number of probe points, when positive. When non-positive, the number of points is adaptively increased starting from -n_point, until the neighboring point distance is less than the diameter of the elements enclosing the points. When None, it is set to -10. For additional parameters see the __init__() docstrings of the subclasses. Notes ----- If the mesh contains vertices that are not contained in any element, we shift coordinates of such vertices so that they never match in the nearest node search. """ Struct.__init__(self, name=name, mesh=mesh, **kwargs) self.set_n_point(n_point) self.options = Struct(close_limit=0.1, size_hint=None) self.is_refined = False tt = time.clock() if share_mesh: if Probe.cache.iconn is None: offsets, iconn = make_inverse_connectivity(mesh.conns, mesh.n_nod, ret_offsets=True) Probe.cache.iconn = iconn Probe.cache.offsets = offsets self.cache = Probe.cache else: offsets, iconn = make_inverse_connectivity(mesh.conns, mesh.n_nod, ret_offsets=True) self.cache = Struct(name='probe_cache', offsets=offsets, iconn=iconn, kdtree=None) output('iconn: %f s' % (time.clock() - tt)) i_bad = nm.where(nm.diff(self.cache.offsets) == 0)[0] if len(i_bad): bbox = mesh.get_bounding_box() mesh.coors[i_bad] = bbox[1] + bbox[1] - bbox[0] output('warning: some vertices are not in any element!') output('warning: vertex-based results will be wrong!') tt = time.clock() if share_mesh: if Probe.cache.kdtree is None: self.cache.kdtree = KDTree(mesh.coors) else: self.cache.kdtree = KDTree(mesh.coors) output('kdtree: %f s' % (time.clock() - tt)) def set_n_point(self, n_point): """ Set the number of probe points. Parameters ---------- n_point : int The (fixed) number of probe points, when positive. When non-positive, the number of points is adaptively increased starting from -n_point, until the neighboring point distance is less than the diameter of the elements enclosing the points. When None, it is set to -10. """ if n_point is None: n_point = -10 if n_point <= 0: n_point = max(-n_point, 2) self.n_point_required = -1 else: n_point = max(n_point, 2) self.n_point_required = n_point self.n_point0 = self.n_point = n_point def set_options(self, close_limit=None, size_hint=None): """ Set the probe options. Parameters ---------- close_limit : float The maximum limit distance of a point from the closest element allowed for extrapolation. size_hint : float Element size hint for the refinement of probe parametrization. """ if close_limit is not None: self.options.close_limit = close_limit if size_hint is not None: self.options.size_hint = size_hint def report(self): """Report the probe parameters.""" out = [self.__class__.__name__] if self.n_point_required == -1: aux = 'adaptive' else: aux = 'fixed' out.append('number of points: %s (%s)' % (self.n_point, aux)) return out def __call__(self, variable): """ Probe the given variable. The actual implementation is in self.probe(), so that it can be overridden in subclasses. Parameters ---------- variable : Variable instance The variable to be sampled along the probe. """ return self.probe(variable) def probe(self, variable): """ Probe the given variable. Parameters ---------- variable : Variable instance The variable to be sampled along the probe. """ refine_flag = None ev = variable.evaluate_at self.reset_refinement() while True: pars, points = self.get_points(refine_flag) vals, cells, status = ev(points, strategy='kdtree', close_limit=self.options.close_limit, cache=self.cache, ret_status=True) ii = nm.where(status > 1)[0] vals[ii] = nm.nan if self.is_refined: break else: refine_flag = self.refine_points(variable, points, cells) if (refine_flag == False).all(): break self.is_refined = True return pars, vals def reset_refinement(self): """ Reset the probe refinement state. """ self.is_refined = False self.n_point = self.n_point0 def refine_points(self, variable, points, cells): """ Mark intervals between points for a refinement, based on element sizes at those points. Assumes the points to be ordered. Returns ------- refine_flag : bool array True at places corresponding to intervals between subsequent points that need to be refined. """ if self.n_point_required == self.n_point: refine_flag = nm.array([False]) else: if self.options.size_hint is None: ed = variable.get_element_diameters(cells, 0) pd = 0.5 * (ed[1:] + ed[:-1]) else: pd = self.options.size_hint dist = norm_l2_along_axis(points[1:] - points[:-1]) refine_flag = dist > pd if self.is_cyclic: pd1 = 0.5 * (ed[0] + ed[-1]) dist1 = nla.norm(points[0] - points[-1]) refine_flag = nm.r_[refine_flag, dist1 > pd1] return refine_flag @staticmethod def refine_pars(pars, refine_flag, cyclic_val=None): """ Refine the probe parametrization based on the refine_flag. """ ii = nm.where(refine_flag)[0] ip = ii + 1 if cyclic_val is not None: cpars = nm.r_[pars, cyclic_val] pp = 0.5 * (cpars[ip] + cpars[ii]) else: pp = 0.5 * (pars[ip] + pars[ii]) pars = nm.insert(pars, ip, pp) return pars
def main(): parser = ArgumentParser(description=__doc__, formatter_class=RawDescriptionHelpFormatter) parser.add_argument('--version', action='version', version='%(prog)s ' + sfepy.__version__) parser.add_argument('--debug', action='store_true', dest='debug', default=False, help=help['debug']) parser.add_argument('-o', metavar='filename', action='store', dest='output_filename_trunk', default=None, help=help['filename']) parser.add_argument('-d', '--dump', action='store_true', dest='dump', default=False, help=help['dump']) parser.add_argument('--same-dir', action='store_true', dest='same_dir', default=False, help=help['same_dir']) parser.add_argument('-l', '--linearization', metavar='options', action='store', dest='linearization', default=None, help=help['linearization']) parser.add_argument('--times', action='store_true', dest='times', default=False, help=help['times']) parser.add_argument('-f', '--from', type=int, metavar='ii', action='store', dest='step_from', default=0, help=help['from']) parser.add_argument('-t', '--to', type=int, metavar='ii', action='store', dest='step_to', default=None, help=help['to']) parser.add_argument('-s', '--step', type=int, metavar='ii', action='store', dest='step_by', default=1, help=help['step']) parser.add_argument('-e', '--extract', metavar='list', action='store', dest='extract', default=None, help=help['extract']) parser.add_argument('-a', '--average', action='store_true', dest='average', default=False, help=help['average']) parser.add_argument('input_file', nargs='?', default=None) parser.add_argument('results_file') options = parser.parse_args() if options.debug: from sfepy.base.base import debug_on_error debug_on_error() filename_in = options.input_file filename_results = options.results_file if filename_in is None: linearize = False else: linearize = True options.dump = True if options.times: steps, times, nts, dts = th.extract_times(filename_results) for ii, time in enumerate(times): step = steps[ii] print('%d %e %e %e' % (step, time, nts[ii], dts[ii])) if options.dump: trunk = get_default(options.output_filename_trunk, get_trunk(filename_results)) if options.same_dir: trunk = os.path.join(os.path.dirname(filename_results), os.path.basename(trunk)) args = {} if linearize: problem = create_problem(filename_in) linearization = Struct(kind='adaptive', min_level=0, max_level=2, eps=1e-2) aux = problem.conf.options.get('linearization', None) linearization.update(aux) if options.linearization is not None: aux = parse_linearization(options.linearization) linearization.update(aux) args.update({ 'fields': problem.fields, 'linearization': linearization }) if options.step_to is None: args.update({'step0': options.step_from}) else: args.update({ 'steps': nm.arange(options.step_from, options.step_to + 1, options.step_by, dtype=nm.int) }) th.dump_to_vtk(filename_results, output_filename_trunk=trunk, **args) if options.extract: ths, ts = th.extract_time_history(filename_results, options.extract) if options.average: ths = th.average_vertex_var_in_cells(ths) if options.output_filename_trunk: th.save_time_history(ths, ts, options.output_filename_trunk + '.h5') else: print(dict_to_struct(ths, flag=(1, 1, 1)).str_all())
def create_mapping(self, region, integral, integration, return_mapping=True): """ Create a new reference mapping. Compute jacobians, element volumes and base function derivatives for Volume-type geometries (volume mappings), and jacobians, normals and base function derivatives for Surface-type geometries (surface mappings). Notes ----- - surface mappings are defined on the surface region - surface mappings require field order to be > 0 """ domain = self.domain coors = domain.get_mesh_coors(actual=True) dconn = domain.get_conn() if integration == 'volume': qp = self.get_qp('v', integral) iels = region.get_cells() geo_ps = self.gel.poly_space ps = self.poly_space bf = self.get_base('v', 0, integral, iels=iels) conn = nm.take(dconn, iels.astype(nm.int32), axis=0) mapping = VolumeMapping(coors, conn, poly_space=geo_ps) vg = mapping.get_mapping(qp.vals, qp.weights, poly_space=ps, ori=self.ori, transform=self.basis_transform) out = vg elif (integration == 'surface') or (integration == 'surface_extra'): assert_(self.approx_order > 0) if self.ori is not None: msg = 'surface integrals do not work yet with the' \ ' hierarchical basis!' raise ValueError(msg) sd = domain.surface_groups[region.name] esd = self.surface_data[region.name] geo_ps = self.gel.poly_space ps = self.poly_space conn = sd.get_connectivity() mapping = SurfaceMapping(coors, conn, poly_space=geo_ps) if not self.is_surface: self.create_bqp(region.name, integral) qp = self.qp_coors[(integral.order, esd.bkey)] abf = ps.eval_base(qp.vals[0], transform=self.basis_transform) bf = abf[..., self.efaces[0]] indx = self.gel.get_surface_entities()[0] # Fix geometry element's 1st facet orientation for gradients. indx = nm.roll(indx, -1)[::-1] mapping.set_basis_indices(indx) sg = mapping.get_mapping(qp.vals[0], qp.weights, poly_space=Struct(n_nod=bf.shape[-1]), mode=integration) if integration == 'surface_extra': sg.alloc_extra_data(self.econn.shape[1]) bf_bg = geo_ps.eval_base(qp.vals, diff=True) ebf_bg = self.get_base(esd.bkey, 1, integral) sg.evaluate_bfbgm(bf_bg, ebf_bg, coors, sd.fis, dconn) else: # Do not use BQP for surface fields. qp = self.get_qp(sd.face_type, integral) bf = ps.eval_base(qp.vals, transform=self.basis_transform) sg = mapping.get_mapping(qp.vals, qp.weights, poly_space=Struct(n_nod=bf.shape[-1]), mode=integration) out = sg elif integration == 'point': out = mapping = None elif integration == 'custom': raise ValueError('cannot create custom mapping!') else: raise ValueError('unknown integration geometry type: %s' % integration) if out is not None: # Store the integral used. out.integral = integral out.qp = qp out.ps = ps # Update base. out.bf[:] = bf if return_mapping: out = (out, mapping) return out
def detect_band_gaps(mass, freq_info, opts, gap_kind='normal', mtx_b=None): """ Detect band gaps given solution to eigenproblem (eigs, eig_vectors). Only valid resonance frequencies (e.i. those for which corresponding eigenmomenta are above a given threshold) are taken into account. Notes ----- - make freq_eps relative to ]f0, f1[ size? """ output('eigensolver:', opts.eigensolver) fm = freq_info.freq_range_margins min_freq, max_freq = fm[0], fm[-1] output('freq. range with margins: [%8.3f, %8.3f]' % (min_freq, max_freq)) df = opts.freq_step * (max_freq - min_freq) fz_callback = get_callback(mass.evaluate, opts.eigensolver, mtx_b=mtx_b, mode='find_zero') trace_callback = get_callback(mass.evaluate, opts.eigensolver, mtx_b=mtx_b, mode='trace') n_col = 1 + (mtx_b is not None) logs = [[] for ii in range(n_col + 1)] gaps = [] for ii in range(freq_info.freq_range.shape[0] + 1): f0, f1 = fm[[ii, ii + 1]] output('interval: ]%.8f, %.8f[...' % (f0, f1)) log_freqs = get_log_freqs(f0, f1, df, opts.freq_eps, 100, 1000) output('n_logged: %d' % log_freqs.shape[0]) log_mevp = [[] for ii in range(n_col)] for f in log_freqs: for ii, data in enumerate(trace_callback(f)): log_mevp[ii].append(data) # Get log for the first and last f in log_freqs. lf0 = log_freqs[0] lf1 = log_freqs[-1] log0, log1 = log_mevp[0][0], log_mevp[0][-1] min_eig0 = log0[0] max_eig1 = log1[-1] if gap_kind == 'liquid': mevp = nm.array(log_mevp, dtype=nm.float64).squeeze() si = nm.where(mevp[:, 0] < 0.0)[0] li = nm.where(mevp[:, -1] < 0.0)[0] wi = nm.setdiff1d(si, li) if si.shape[0] == 0: # No gaps. gap = ([2, lf0, log0[0]], [2, lf0, log0[-1]]) gaps.append(gap) elif li.shape[0] == mevp.shape[0]: # Full interval strong gap. gap = ([1, lf1, log1[0]], [1, lf1, log1[-1]]) gaps.append(gap) else: subgaps = [] for chunk in split_chunks(li): # Strong gaps. i0, i1 = chunk[0], chunk[-1] fmin, fmax = log_freqs[i0], log_freqs[i1] gap = ([1, fmin, mevp[i0, -1]], [1, fmax, mevp[i1, -1]]) subgaps.append(gap) for chunk in split_chunks(wi): # Weak gaps. i0, i1 = chunk[0], chunk[-1] fmin, fmax = log_freqs[i0], log_freqs[i1] gap = ([0, fmin, mevp[i0, -1]], [2, fmax, mevp[i1, -1]]) subgaps.append(gap) gaps.append(subgaps) else: if min_eig0 > 0.0: # No gaps. gap = ([2, lf0, log0[0]], [2, lf0, log0[-1]]) elif max_eig1 < 0.0: # Full interval strong gap. gap = ([1, lf1, log1[0]], [1, lf1, log1[-1]]) else: llog_freqs = list(log_freqs) # Insert fmin, fmax into log. output('finding zero of the largest eig...') smax, fmax, vmax = find_zero(lf0, lf1, fz_callback, opts.freq_eps, opts.zero_eps, 1) im = nm.searchsorted(log_freqs, fmax) llog_freqs.insert(im, fmax) for ii, data in enumerate(trace_callback(fmax)): log_mevp[ii].insert(im, data) output('...done') if smax in [0, 2]: output('finding zero of the smallest eig...') # having fmax instead of f0 does not work if freq_eps is # large. smin, fmin, vmin = find_zero(lf0, lf1, fz_callback, opts.freq_eps, opts.zero_eps, 0) im = nm.searchsorted(log_freqs, fmin) # +1 due to fmax already inserted before. llog_freqs.insert(im + 1, fmin) for ii, data in enumerate(trace_callback(fmin)): log_mevp[ii].insert(im + 1, data) output('...done') elif smax == 1: smin = 1 # both are negative everywhere. fmin, vmin = fmax, vmax gap = ([smin, fmin, vmin], [smax, fmax, vmax]) log_freqs = nm.array(llog_freqs) output(gap[0]) output(gap[1]) gaps.append(gap) logs[0].append(log_freqs) for ii, data in enumerate(log_mevp): logs[ii + 1].append(nm.array(data, dtype=nm.float64)) output('...done') kinds = describe_gaps(gaps) slogs = Struct(freqs=logs[0], eigs=logs[1]) if n_col == 2: slogs.eig_vectors = logs[2] return slogs, gaps, kinds
def _gen_common_data(orders, gels, report): import sfepy from sfepy.base.base import Struct from sfepy.linalg import combine from sfepy.discrete import FieldVariable, Integral from sfepy.discrete.fem import Mesh, FEDomain, Field from sfepy.discrete.common.global_interp import get_ref_coors bases = ([ii for ii in combine([['2_4', '3_8'], ['lagrange', 'serendipity', 'bernstein', 'lobatto']])] + [ii for ii in combine([['2_3', '3_4'], ['lagrange', 'bernstein']])]) for geom, poly_space_base in bases: order = orders[geom] if (geom == '3_8') and (poly_space_base == 'serendipity'): order = 2 report('geometry: %s, base: %s, order: %d' % (geom, poly_space_base, order)) integral = Integral('i', order=order) aux = '' if geom in ['2_4', '3_8'] else 'z' mesh0 = Mesh.from_file('meshes/elements/%s_2%s.mesh' % (geom, aux), prefix_dir=sfepy.data_dir) if (geom == '3_8'): meshes = _permute_quad_face(mesh0) else: meshes = [mesh0] gel = gels[geom] perms = gel.get_conn_permutations() qps, qp_weights = integral.get_qp(gel.surface_facet.name) zz = nm.zeros_like(qps[:, :1]) qps = nm.hstack(([qps] + [zz])) shift = shifts[geom] rcoors = nm.ascontiguousarray(qps + shift[:1, :] - shift[1:, :]) ccoors = nm.ascontiguousarray(qps + shift[:1, :] + shift[1:, :]) all_oris = _get_possible_oris(geom) oris = set() for (ir, pr), (ic, pc), (im, mesh0) in product( enumerate(perms), enumerate(perms), enumerate(meshes), ): report('im: %d, ir: %d, ic: %d' % (im, ir, ic)) report('pr: %s, pc: %s' % (pr, pc)) mesh = mesh0.copy() conn = mesh.cmesh.get_conn(mesh0.cmesh.tdim, 0).indices conn = conn.reshape((mesh0.n_el, -1)) conn[0, :] = conn[0, pr] conn[1, :] = conn[1, pc] conn2 = mesh.get_conn(gel.name) assert_((conn == conn2).all()) cache = Struct(mesh=mesh) domain = FEDomain('domain', mesh) omega = domain.create_region('Omega', 'all') region = domain.create_region('Facet', rsels[geom], 'facet') field = Field.from_args('f', nm.float64, shape=1, region=omega, approx_order=order, poly_space_base=poly_space_base) fis = region.get_facet_indices() conn = mesh.cmesh.get_conn_as_graph(region.dim, region.dim - 1) _oris = mesh.cmesh.facet_oris[conn.indptr[fis[:, 0]] + fis[:, 1]] oris |= set(_oris) if oris == all_oris: break var = FieldVariable('u', 'unknown', field) report('# dofs: %d' % var.n_dof) vec = nm.empty(var.n_dof, dtype=var.dtype) ps = field.poly_space dofs = field.get_dofs_in_region(region, merge=False) edofs, fdofs = nm.unique(dofs[1]), nm.unique(dofs[2]) rrc, rcells, rstatus = get_ref_coors(field, rcoors, cache=cache) crc, ccells, cstatus = get_ref_coors(field, ccoors, cache=cache) assert_((rstatus == 0).all() and (cstatus == 0).all()) yield (geom, poly_space_base, qp_weights, mesh, im, ir, ic, field, ps, rrc, rcells[0], crc, ccells[0], vec, edofs, fdofs)
def test_solution(self): from sfepy.base.base import Struct from sfepy.base.conf import ProblemConf, get_standard_keywords from sfepy.applications import solve_pde, assign_standard_hooks import numpy as nm import os.path as op solutions = {} ok = True for hp, pb_filename in input_names.iteritems(): required, other = get_standard_keywords() input_name = op.join(op.dirname(__file__), pb_filename) test_conf = ProblemConf.from_file(input_name, required, other) name = output_name_trunk + hp solver_options = Struct(output_filename_trunk=name, output_format='vtk', save_ebc=False, save_ebc_nodes=False, save_regions=False, save_regions_as_groups=False, save_field_meshes=False, solve_not=False) assign_standard_hooks(self, test_conf.options.get_default_attr, test_conf) self.report('hyperelastic formulation: %s' % (hp, )) status = NLSStatus(conditions=[]) pb, state = solve_pde( test_conf, solver_options, nls_status=status, output_dir=self.options.out_dir, step_hook=self.step_hook, post_process_hook=self.post_process_hook, post_process_hook_final=self.post_process_hook_final) converged = status.condition == 0 ok = ok and converged solutions[hp] = state.get_parts()['u'] self.report('%s solved' % input_name) rerr = 1.0e-3 aerr = nm.linalg.norm(solutions['TL'], ord=None) * rerr self.report('allowed error: rel = %e, abs = %e' % (rerr, aerr)) ok = ok and self.compare_vectors(solutions['TL'], solutions['UL'], label1='TLF', label2='ULF', allowed_error=rerr) ok = ok and self.compare_vectors(solutions['UL'], solutions['ULM'], label1='ULF', label2='ULF_mixed', allowed_error=rerr) return ok
def main(): parser = OptionParser(usage=usage, version='%prog') parser.add_option('-b', '--basis', metavar='name', action='store', dest='basis', default='lagrange', help=help['basis']) parser.add_option('-d', '--derivative', metavar='d', type=int, action='store', dest='derivative', default=0, help=help['derivative']) parser.add_option('-n', '--max-order', metavar='order', type=int, action='store', dest='max_order', default=2, help=help['max_order']) parser.add_option('-g', '--geometry', metavar='name', action='store', dest='geometry', default='2_4', help=help['geometry']) parser.add_option('-m', '--mesh', metavar='mesh', action='store', dest='mesh', default=None, help=help['mesh']) parser.add_option('', '--permutations', metavar='permutations', action='store', dest='permutations', default=None, help=help['permutations']) parser.add_option('', '--dofs', metavar='dofs', action='store', dest='dofs', default=None, help=help['dofs']) parser.add_option('-l', '--lin-options', metavar='options', action='store', dest='lin_options', default='min_level=2,max_level=5,eps=1e-3', help=help['lin_options']) parser.add_option('', '--plot-dofs', action='store_true', dest='plot_dofs', default=False, help=help['plot_dofs']) options, args = parser.parse_args() if len(args) == 1: output_dir = args[0] else: parser.print_help(), return output('polynomial space:', options.basis) output('max. order:', options.max_order) lin = Struct(kind='adaptive', min_level=2, max_level=5, eps=1e-3) for opt in options.lin_options.split(','): key, val = opt.split('=') setattr(lin, key, eval(val)) if options.mesh is None: dim, n_ep = int(options.geometry[0]), int(options.geometry[2]) output('reference element geometry:') output(' dimension: %d, vertices: %d' % (dim, n_ep)) gel = GeometryElement(options.geometry) gps = PolySpace.any_from_args(None, gel, 1, base=options.basis) ps = PolySpace.any_from_args(None, gel, options.max_order, base=options.basis) n_digit, _format = get_print_info(ps.n_nod, fill='0') name_template = os.path.join(output_dir, 'bf_%s.vtk' % _format) for ip in get_dofs(options.dofs, ps.n_nod): output('shape function %d...' % ip) def eval_dofs(iels, rx): if options.derivative == 0: bf = ps.eval_base(rx).squeeze() rvals = bf[None, :, ip:ip + 1] else: bfg = ps.eval_base(rx, diff=True) rvals = bfg[None, ..., ip] return rvals def eval_coors(iels, rx): bf = gps.eval_base(rx).squeeze() coors = nm.dot(bf, gel.coors)[None, ...] return coors (level, coors, conn, vdofs, mat_ids) = create_output(eval_dofs, eval_coors, 1, ps, min_level=lin.min_level, max_level=lin.max_level, eps=lin.eps) out = { 'bf': Struct(name='output_data', mode='vertex', data=vdofs, var_name='bf', dofs=None) } mesh = Mesh.from_data('bf_mesh', coors, None, [conn], [mat_ids], [options.geometry]) name = name_template % ip ensure_path(name) mesh.write(name, out=out) output('...done (%s)' % name) else: mesh = Mesh.from_file(options.mesh) output('mesh geometry:') output(' dimension: %d, vertices: %d, elements: %d' % (mesh.dim, mesh.n_nod, mesh.n_el)) if options.permutations: if options.permutations == 'all': from sfepy.linalg import cycle gel = GeometryElement(mesh.descs[0]) n_perms = gel.get_conn_permutations().shape[0] all_permutations = [ii for ii in cycle(mesh.n_el * [n_perms])] else: all_permutations = [ int(ii) for ii in options.permutations.split(',') ] all_permutations = nm.array(all_permutations) np = len(all_permutations) all_permutations.shape = (np / mesh.n_el, mesh.n_el) output('using connectivity permutations:\n', all_permutations) else: permutations = [None] for ip, permutations in enumerate(all_permutations): aux = mesh.copy() save_basis_on_mesh(aux, options, output_dir, lin, permutations, '_'.join('%d' % ii for ii in permutations))
def call(self, ret_all=False): problem = self.problem opts = self.app_options # Some coefficients can require other coefficients - resolve their # order here. req_info = getattr(self.conf, opts.requirements, {}) coef_info = getattr(self.conf, opts.coefs, {}) is_store_filenames = coef_info.pop('filenames', None) is not None sorted_names = self.get_sorted_dependencies(req_info, coef_info, opts.compute_only) use_multiprocessing = _use_multiprocessing\ and getattr(self.conf.options, 'multiprocessing', True)\ and len(sorted_names) > 2 coefs = Struct() if use_multiprocessing: manager = multiprocessing.Manager() dependencies = manager.dict() sd_names = manager.dict() numdeps = manager.list() remaining = manager.Value('i', len(sorted_names)) tasks = multiprocessing.Queue() lock = multiprocessing.Lock() # calculate namber of dependencies and inverse map inverse_deps = {} for ii, name in enumerate(sorted_names): if name.startswith('c.'): reqs = coef_info[name[2:]].get('requires', []) else: reqs = req_info[name].get('requires', []) numdeps.append(len(reqs)) if len(reqs) > 0: for req in reqs: if req in inverse_deps: inverse_deps[req].append((ii, name)) else: inverse_deps[req] = [(ii, name)] for ii, name in enumerate(sorted_names): if numdeps[ii] == 0: tasks.put(name) num_workers = multiprocessing.cpu_count() workers = [] for ii in range(num_workers): args = (tasks, lock, remaining, numdeps, inverse_deps, problem, opts, self.volume, self.post_process_hook, req_info, coef_info, sd_names, dependencies) w = multiprocessing.Process(target=self.calculate_req_multi, args=args) w.start() workers.append(w) # block until all workes are terminated for w in workers: w.join() else: # no mlutiprocessing dependencies = {} sd_names = {} for name in sorted_names: val = self.calculate_req(problem, opts, self.volume, self.post_process_hook, name, req_info, coef_info, sd_names, dependencies) dependencies[name] = val coefs = Struct() deps = {} for name in dependencies.keys(): data = dependencies[name] if name.startswith('c.'): coef_name = name[2:] cstat = coef_info[coef_name].get('status', 'main') # remove "auxiliary" coefs if not cstat == 'auxiliary': setattr(coefs, coef_name, data) else: deps[name] = data # Store filenames of all requirements as a "coefficient". if is_store_filenames: save_names = {} dump_names = {} for name in sd_names.keys(): val = sd_names[name] if name.startswith('s.'): save_names[name[2:]] = val elif name.startswith('d.'): dump_names[name[2:]] = val coefs.save_names = save_names coefs.dump_names = dump_names if opts.coefs_info is not None: coefs.info = opts.coefs_info if ret_all: return coefs, deps else: return coefs
def main(): parser = ArgumentParser(description=__doc__, formatter_class=RawDescriptionHelpFormatter) parser.add_argument('--version', action='version', version='%(prog)s') parser.add_argument('-d', '--dims', metavar='dims', action='store', dest='dims', default='[1.0, 1.0]', help=helps['dims']) parser.add_argument('-c', '--centre', metavar='centre', action='store', dest='centre', default='[0.0, 0.0]', help=helps['centre']) parser.add_argument('-s', '--shape', metavar='shape', action='store', dest='shape', default='[11, 11]', help=helps['shape']) parser.add_argument('-b', '--bc-kind', metavar='kind', action='store', dest='bc_kind', choices=['free', 'cantilever', 'fixed'], default='free', help=helps['bc_kind']) parser.add_argument('-a', '--axis', metavar='0, ..., dim, or -1', type=int, action='store', dest='axis', default=-1, help=helps['axis']) parser.add_argument('--young', metavar='float', type=float, action='store', dest='young', default=6.80e+10, help=helps['young']) parser.add_argument('--poisson', metavar='float', type=float, action='store', dest='poisson', default=0.36, help=helps['poisson']) parser.add_argument('--density', metavar='float', type=float, action='store', dest='density', default=2700.0, help=helps['density']) parser.add_argument('--order', metavar='int', type=int, action='store', dest='order', default=1, help=helps['order']) parser.add_argument('-n', '--n-eigs', metavar='int', type=int, action='store', dest='n_eigs', default=6, help=helps['n_eigs']) parser.add_argument('-i', '--ignore', metavar='int', type=int, action='store', dest='ignore', default=None, help=helps['ignore']) parser.add_argument('--solver', metavar='solver', action='store', dest='solver', default= \ "eig.scipy,method:'eigh',tol:1e-5,maxiter:1000", help=helps['solver']) parser.add_argument('--show', action="store_true", dest='show', default=False, help=helps['show']) parser.add_argument('filename', nargs='?', default=None) options = parser.parse_args() aux = options.solver.split(',') kwargs = {} for option in aux[1:]: key, val = option.split(':') kwargs[key.strip()] = eval(val) eig_conf = Struct(name='evp', kind=aux[0], **kwargs) output('using values:') output(" Young's modulus:", options.young) output(" Poisson's ratio:", options.poisson) output(' density:', options.density) output('displacement field approximation order:', options.order) output('requested %d eigenvalues' % options.n_eigs) output('using eigenvalue problem solver:', eig_conf.kind) output.level += 1 for key, val in six.iteritems(kwargs): output('%s: %r' % (key, val)) output.level -= 1 assert_((0.0 < options.poisson < 0.5), "Poisson's ratio must be in ]0, 0.5[!") assert_((0 < options.order), 'displacement approximation order must be at least 1!') filename = options.filename if filename is not None: mesh = Mesh.from_file(filename) dim = mesh.dim dims = nm.diff(mesh.get_bounding_box(), axis=0) else: dims = nm.array(eval(options.dims), dtype=nm.float64) dim = len(dims) centre = nm.array(eval(options.centre), dtype=nm.float64)[:dim] shape = nm.array(eval(options.shape), dtype=nm.int32)[:dim] output('dimensions:', dims) output('centre: ', centre) output('shape: ', shape) mesh = gen_block_mesh(dims, shape, centre, name='mesh') output('axis: ', options.axis) assert_((-dim <= options.axis < dim), 'invalid axis value!') eig_solver = Solver.any_from_conf(eig_conf) # Build the problem definition. domain = FEDomain('domain', mesh) bbox = domain.get_mesh_bounding_box() min_coor, max_coor = bbox[:, options.axis] eps = 1e-8 * (max_coor - min_coor) ax = 'xyz'[:dim][options.axis] omega = domain.create_region('Omega', 'all') bottom = domain.create_region('Bottom', 'vertices in (%s < %.10f)' % (ax, min_coor + eps), 'facet') bottom_top = domain.create_region('BottomTop', 'r.Bottom +v vertices in (%s > %.10f)' % (ax, max_coor - eps), 'facet') field = Field.from_args('fu', nm.float64, 'vector', omega, approx_order=options.order) u = FieldVariable('u', 'unknown', field) v = FieldVariable('v', 'test', field, primary_var_name='u') mtx_d = stiffness_from_youngpoisson(dim, options.young, options.poisson) m = Material('m', D=mtx_d, rho=options.density) integral = Integral('i', order=2*options.order) t1 = Term.new('dw_lin_elastic(m.D, v, u)', integral, omega, m=m, v=v, u=u) t2 = Term.new('dw_volume_dot(m.rho, v, u)', integral, omega, m=m, v=v, u=u) eq1 = Equation('stiffness', t1) eq2 = Equation('mass', t2) lhs_eqs = Equations([eq1, eq2]) pb = Problem('modal', equations=lhs_eqs) if options.bc_kind == 'free': pb.time_update() n_rbm = dim * (dim + 1) // 2 elif options.bc_kind == 'cantilever': fixed = EssentialBC('Fixed', bottom, {'u.all' : 0.0}) pb.time_update(ebcs=Conditions([fixed])) n_rbm = 0 elif options.bc_kind == 'fixed': fixed = EssentialBC('Fixed', bottom_top, {'u.all' : 0.0}) pb.time_update(ebcs=Conditions([fixed])) n_rbm = 0 else: raise ValueError('unsupported BC kind! (%s)' % options.bc_kind) if options.ignore is not None: n_rbm = options.ignore pb.update_materials() # Assemble stiffness and mass matrices. mtx_k = eq1.evaluate(mode='weak', dw_mode='matrix', asm_obj=pb.mtx_a) mtx_m = mtx_k.copy() mtx_m.data[:] = 0.0 mtx_m = eq2.evaluate(mode='weak', dw_mode='matrix', asm_obj=mtx_m) try: eigs, svecs = eig_solver(mtx_k, mtx_m, options.n_eigs + n_rbm, eigenvectors=True) except sla.ArpackNoConvergence as ee: eigs = ee.eigenvalues svecs = ee.eigenvectors output('only %d eigenvalues converged!' % len(eigs)) output('%d eigenvalues converged (%d ignored as rigid body modes)' % (len(eigs), n_rbm)) eigs = eigs[n_rbm:] svecs = svecs[:, n_rbm:] omegas = nm.sqrt(eigs) freqs = omegas / (2 * nm.pi) output('number | eigenvalue | angular frequency ' '| frequency') for ii, eig in enumerate(eigs): output('%6d | %17.12e | %17.12e | %17.12e' % (ii + 1, eig, omegas[ii], freqs[ii])) # Make full eigenvectors (add DOFs fixed by boundary conditions). variables = pb.get_variables() vecs = nm.empty((variables.di.ptr[-1], svecs.shape[1]), dtype=nm.float64) for ii in range(svecs.shape[1]): vecs[:, ii] = variables.make_full_vec(svecs[:, ii]) # Save the eigenvectors. out = {} state = pb.create_state() for ii in range(eigs.shape[0]): state.set_full(vecs[:, ii]) aux = state.create_output_dict() strain = pb.evaluate('ev_cauchy_strain.i.Omega(u)', integrals=Integrals([integral]), mode='el_avg', verbose=False) out['u%03d' % ii] = aux.popitem()[1] out['strain%03d' % ii] = Struct(mode='cell', data=strain) pb.save_state('eigenshapes.vtk', out=out) pb.save_regions_as_groups('regions') if len(eigs) and options.show: # Show the solution. If the approximation order is greater than 1, the # extra DOFs are simply thrown away. from sfepy.postprocess.viewer import Viewer from sfepy.postprocess.domain_specific import DomainSpecificPlot scaling = 0.05 * dims.max() / nm.abs(vecs).max() ds = {} for ii in range(eigs.shape[0]): pd = DomainSpecificPlot('plot_displacements', ['rel_scaling=%s' % scaling, 'color_kind="tensors"', 'color_name="strain%03d"' % ii]) ds['u%03d' % ii] = pd view = Viewer('eigenshapes.vtk') view(domain_specific=ds, only_names=sorted(ds.keys()), is_scalar_bar=False, is_wireframe=True)
def generate_images(images_dir, examples_dir): """ Generate images from results of running examples found in `examples_dir` directory. The generated images are stored to `images_dir`, """ from sfepy.applications import solve_pde from sfepy.postprocess.viewer import Viewer from sfepy.postprocess.utils import mlab prefix = output.prefix output_dir = tempfile.mkdtemp() trunk = os.path.join(output_dir, 'result') options = Struct(output_filename_trunk=trunk, output_format='vtk', save_ebc=False, save_ebc_nodes=False, save_regions=False, save_field_meshes=False, save_regions_as_groups=False, solve_not=False) default_views = {'' : {}} ensure_path(images_dir + os.path.sep) view = Viewer('', offscreen=False) for ex_filename in locate_files('*.py', examples_dir): if _omit(ex_filename): continue output.level = 0 output.prefix = prefix ebase = ex_filename.replace(examples_dir, '')[1:] output('trying "%s"...' % ebase) try: problem, state = solve_pde(ex_filename, options=options) except KeyboardInterrupt: raise except: problem = None output('***** failed! *****') if problem is not None: if ebase in custom: views = custom[ebase] else: views = default_views tsolver = problem.get_time_solver() if tsolver.ts is None: suffix = None else: suffix = tsolver.ts.suffix % (tsolver.ts.n_step - 1) filename = problem.get_output_name(suffix=suffix) for suffix, kwargs in six.iteritems(views): fig_filename = _get_fig_filename(ebase, images_dir, suffix) fname = edit_filename(filename, suffix=suffix) output('displaying results from "%s"' % fname) disp_name = fig_filename.replace(sfepy.data_dir, '') output('to "%s"...' % disp_name.lstrip(os.path.sep)) view.filename = fname view(scene=view.scene, show=False, is_scalar_bar=True, **kwargs) view.save_image(fig_filename) mlab.clf() output('...done') remove_files(output_dir) output('...done')
def __call__(self, volume=None, problem=None, data=None): problem = get_default(problem, self.problem) opts = self.app_options evp, ema, mass = [data[ii] for ii in self.requires[:3]] if len(self.requires) == 4: mtx_b = data[self.requires[3]] else: mtx_b = None eigs = evp.eigs self.fix_eig_range(eigs.shape[0]) if opts.fixed_freq_range is not None: (freq_range_initial, opts.eig_range) = get_ranges(opts.fixed_freq_range, eigs) else: opts.eig_range = slice(*opts.eig_range) freq_range_initial = nm.sqrt(eigs[opts.eig_range]) output('initial freq. range : [%8.3f, %8.3f]' % tuple(freq_range_initial[[0, -1]])) aux = cut_freq_range(freq_range_initial, eigs, ema.valid, opts.freq_margins, opts.eig_range, opts.fixed_freq_range, opts.freq_eps) freq_range, freq_range_margins = aux if len(freq_range): output('freq. range : [%8.3f, %8.3f]' % tuple(freq_range[[0, -1]])) else: # All masked. output('freq. range : all masked!') freq_info = Struct(name='freq_info', freq_range_initial=freq_range_initial, freq_range=freq_range, freq_range_margins=freq_range_margins) logs, gaps, kinds = opts.detect_fun(mass, freq_info, opts, mtx_b=mtx_b) gap_ranges = get_gap_ranges(freq_range_margins, gaps, kinds) bg = Struct(name='band_gaps', logs=logs, gaps=gaps, kinds=kinds, gap_ranges=gap_ranges, valid=ema.valid, eig_range=opts.eig_range, n_eigs=eigs.shape[0], n_zeroed=ema.n_zeroed, freq_range_initial=freq_info.freq_range_initial, freq_range=freq_info.freq_range, freq_range_margins=freq_info.freq_range_margins, opts=opts, to_file_txt=self.to_file_txt, log_save_name=opts.log_save_name, raw_log_save_name=opts.raw_log_save_name, save_log=self.save_log) return bg
def solve_problem(mesh_filename, options, comm): order = options.order rank, size = comm.Get_rank(), comm.Get_size() output('rank', rank, 'of', size) mesh = Mesh.from_file(mesh_filename) if rank == 0: cell_tasks = pl.partition_mesh(mesh, size, use_metis=options.metis, verbose=True) else: cell_tasks = None output('creating global domain and field...') tt = time.clock() domain = FEDomain('domain', mesh) omega = domain.create_region('Omega', 'all') field = Field.from_args('fu', nm.float64, 1, omega, approx_order=order) output('...done in', time.clock() - tt) output('distributing field %s...' % field.name) tt = time.clock() distribute = pl.distribute_fields_dofs lfds, gfds = distribute([field], cell_tasks, is_overlap=True, save_inter_regions=options.save_inter_regions, output_dir=options.output_dir, comm=comm, verbose=True) lfd = lfds[0] output('...done in', time.clock() - tt) if rank == 0: dof_maps = gfds[0].dof_maps id_map = gfds[0].id_map if options.verify: verify_save_dof_maps(field, cell_tasks, dof_maps, id_map, options, verbose=True) if options.plot: ppd.plot_partitioning([None, None], field, cell_tasks, gfds[0], options.output_dir, size) output('creating local problem...') tt = time.clock() omega_gi = Region.from_cells(lfd.cells, field.domain) omega_gi.finalize() omega_gi.update_shape() pb = create_local_problem(omega_gi, order) output('...done in', time.clock() - tt) variables = pb.get_variables() eqs = pb.equations u_i = variables['u_i'] field_i = u_i.field if options.plot: ppd.plot_local_dofs([None, None], field, field_i, omega_gi, options.output_dir, rank) output('allocating global system...') tt = time.clock() sizes, drange = pl.get_sizes(lfd.petsc_dofs_range, field.n_nod, 1) output('sizes:', sizes) output('drange:', drange) pdofs = pl.get_local_ordering(field_i, lfd.petsc_dofs_conn) output('pdofs:', pdofs) pmtx, psol, prhs = pl.create_petsc_system(pb.mtx_a, sizes, pdofs, drange, is_overlap=True, comm=comm, verbose=True) output('...done in', time.clock() - tt) output('evaluating local problem...') tt = time.clock() state = State(variables) state.fill(0.0) state.apply_ebc() rhs_i = eqs.eval_residuals(state()) # This must be after pl.create_petsc_system() call! mtx_i = eqs.eval_tangent_matrices(state(), pb.mtx_a) output('...done in', time.clock() - tt) output('assembling global system...') tt = time.clock() pl.apply_ebc_to_matrix(mtx_i, u_i.eq_map.eq_ebc) pl.assemble_rhs_to_petsc(prhs, rhs_i, pdofs, drange, is_overlap=True, comm=comm, verbose=True) pl.assemble_mtx_to_petsc(pmtx, mtx_i, pdofs, drange, is_overlap=True, comm=comm, verbose=True) output('...done in', time.clock() - tt) output('creating solver...') tt = time.clock() conf = Struct(method='cg', precond='gamg', sub_precond='none', i_max=10000, eps_a=1e-50, eps_r=1e-5, eps_d=1e4, verbose=True) status = {} ls = PETScKrylovSolver(conf, comm=comm, mtx=pmtx, status=status) output('...done in', time.clock() - tt) output('solving...') tt = time.clock() psol = ls(prhs, psol, conf) psol_i = pl.create_local_petsc_vector(pdofs) gather, scatter = pl.create_gather_scatter(pdofs, psol_i, psol, comm=comm) scatter(psol_i, psol) sol0_i = state() - psol_i[...] psol_i[...] = sol0_i gather(psol, psol_i) output('...done in', time.clock() - tt) output('saving solution...') tt = time.clock() u_i.set_data(sol0_i) out = u_i.create_output() filename = os.path.join(options.output_dir, 'sol_%02d.h5' % comm.rank) pb.domain.mesh.write(filename, io='auto', out=out) gather_to_zero = pl.create_gather_to_zero(psol) psol_full = gather_to_zero(psol) if comm.rank == 0: sol = psol_full[...].copy()[id_map] u = FieldVariable('u', 'parameter', field, primary_var_name='(set-to-None)') filename = os.path.join(options.output_dir, 'sol.h5') if (order == 1) or (options.linearization == 'strip'): out = u.create_output(sol) mesh.write(filename, io='auto', out=out) else: out = u.create_output(sol, linearization=Struct(kind='adaptive', min_level=0, max_level=order, eps=1e-3)) out['u'].mesh.write(filename, io='auto', out=out) output('...done in', time.clock() - tt) if options.show: plt.show()
def process_options(self): get = self.options.get return Struct(mode=get('mode', 'simple'), incident_wave_dir=get('incident_wave_dir', None))
# ---------------------------- # | Create initial condition | # ---------------------------- def ic_wrap(x, ic=None): return ghump(x - .5) ic_fun = Function('ic_fun', ic_wrap) ics = InitialCondition('ic', omega, {'u.0': ic_fun}) # ------------------ # | Create problem | # ------------------ pb = Problem(problem_name, equations=eqs, conf=Struct(options={"save_times": save_timestn}, ics={}, ebcs={}, epbcs={}, lcbcs={}, materials={}), active_only=False) pb.setup_output(output_dir=output_folder, output_format=output_format) pb.set_ics(Conditions([ics])) # ------------------ # | Create limiter | # ------------------ limiter = IdentityLimiter # --------------------------- # | Set time discretization | # --------------------------- t0 = 0 t1 = .1 dx = nm.min(mesh.cmesh.get_volumes(1))
def process_options(self): get = self.options.get return Struct(incident_wave_dir=get('incident_wave_dir', None))
def _button_make_snapshots_fired(self): view = mlab.view() roll = mlab.roll() make_animation(self.viewer.filename, view, roll, 'png', Struct(**self.viewer.options), self.viewer)
def process_options(self): get = self.options.get return Struct(eigensolver=get('eigensolver', 'eig.sgscipy'))
def get_dof_conn_type(self): return Struct(name='dof_conn_info', type=self.dof_conn_type, region_name=self.region.name)
def create_output(self, dofs, var_name, dof_names=None, key=None, extend=True, fill_value=None, linearization=None): """ Convert the DOFs corresponding to the field to a dictionary of output data usable by Mesh.write(). Parameters ---------- dofs : array, shape (n_nod, n_component) The array of DOFs reshaped so that each column corresponds to one component. var_name : str The variable name corresponding to `dofs`. dof_names : tuple of str The names of DOF components. key : str, optional The key to be used in the output dictionary instead of the variable name. extend : bool Extend the DOF values to cover the whole domain. fill_value : float or complex The value used to fill the missing DOF values if `extend` is True. linearization : Struct or None The linearization configuration for higher order approximations. Returns ------- out : dict The output dictionary. """ linearization = get_default(linearization, Struct(kind='strip')) out = {} if linearization.kind is None: out[key] = Struct(name='output_data', mode='full', data=dofs, var_name=var_name, dofs=dof_names, field_name=self.name) elif linearization.kind == 'strip': if extend: ext = self.extend_dofs(dofs, fill_value) else: ext = self.remove_extra_dofs(dofs) if ext is not None: approx_order = self.get_output_approx_order() if approx_order != 0: # Has vertex data. out[key] = Struct(name='output_data', mode='vertex', data=ext, var_name=var_name, dofs=dof_names) else: ext.shape = (ext.shape[0], 1, ext.shape[1], 1) out[key] = Struct(name='output_data', mode='cell', data=ext, var_name=var_name, dofs=dof_names) else: mesh, vdofs, levels = self.linearize(dofs, linearization.min_level, linearization.max_level, linearization.eps) out[key] = Struct(name='output_data', mode='vertex', data=vdofs, var_name=var_name, dofs=dof_names, mesh=mesh, levels=levels) out = convert_complex_output(out) return out