Exemple #1
1
    def __init__(self, name='mesh', filename=None,
                 prefix_dir=None, **kwargs):
        """Create a Mesh.

        Parameters
        ----------
        name : str
            Object name.
        filename : str
            Loads a mesh from the specified file, if not None.
        prefix_dir : str
            If not None, the filename is relative to that directory.
        """
        Struct.__init__(self, name=name, **kwargs)
        self.nodal_bcs = {}

        if filename is None:
            self.io = None
            self.setup_done = 0

        else:
            io = MeshIO.any_from_filename(filename, prefix_dir=prefix_dir)
            output('reading mesh (%s)...' % (io.filename))
            tt = time.clock()
            io.read(self)
            output('...done in %.2f s' % (time.clock() - tt))
            self._set_shape_info()
Exemple #2
0
def conv_test( conf, it, of, of0, ofg_norm = None ):
    """
    Returns
    -------
    flag : int
        * -1 ... continue
        *  0 ... small OF -> stop
        *  1 ... i_max reached -> stop
        *  2 ... small OFG -> stop
        *  3 ... small relative decrase of OF
     """

    status = -1
    output( 'opt: iter: %d, of: %e (||ofg||: %e)' % (it, of, ofg_norm) )
#    print (of0 - of), (conf.eps_rd * of0)

    if (abs( of ) < conf.eps_of):
        status = 0
    elif ofg_norm and (ofg_norm < conf.eps_ofg):
        status = 2
    elif (it > 0) and (abs(of0 - of) < (conf.eps_rd * abs( of0 ))):
        status = 3
        
    if (status == -1) and (it >= conf.i_max):
        status = 1

    return status
Exemple #3
0
def setup_dof_conns(conn_info, dof_conns=None,
                    make_virtual=False, verbose=True):
    """
    Dof connectivity key:
        (field.name, var.n_components, region.name, type, ig)
    """
    if verbose:
        output('setting up dof connectivities...')
        tt = time.clock()

    dof_conns = get_default(dof_conns, {})

    for key, ii, info in iter_dict_of_lists(conn_info, return_keys=True):

        if info.primary is not None:
            var = info.primary
            field = var.get_field()
            field.setup_extra_data(info.ps_tg, info, info.is_trace)
            field.setup_dof_conns(dof_conns, var.n_components,
                                  info.dc_type, info.get_region(),
                                  info.is_trace)

        if info.has_virtual and not info.is_trace:
            # This is needed regardless make_virtual.
            var = info.virtual
            field = var.get_field()
            field.setup_extra_data(info.v_tg, info, False)
            field.setup_dof_conns(dof_conns, var.n_components,
                                  info.dc_type,
                                  info.get_region(can_trace=False))

    if verbose:
        output('...done in %.2f s' % (time.clock() - tt))

    return dof_conns
Exemple #4
0
    def __call__(self, vec0=None, nls=None, init_fun=None, prestep_fun=None,
                 poststep_fun=None, status=None, **kwargs):
        """
        Solve elastodynamics problems by the Newmark method.
        """
        conf = self.conf
        nls = get_default(nls, self.nls)

        vec, unpack, pack = self.get_initial_vec(
            nls, vec0, init_fun, prestep_fun, poststep_fun)

        ts = self.ts
        for step, time in ts.iter_from(ts.step):
            output(self.format % (time, step + 1, ts.n_step),
                   verbose=self.verbose)
            dt = ts.dt

            prestep_fun(ts, vec)
            ut, vt, at = unpack(vec)

            nlst = self.create_nlst(nls, dt, conf.gamma, conf.beta, ut, vt, at)
            atp = nlst(at)
            vtp = nlst.v(atp)
            utp = nlst.u(atp)

            vect = pack(utp, vtp, atp)
            poststep_fun(ts, vect)

            vec = vect

        return vec
Exemple #5
0
def make_explicit_step(ts, state0, problem, mass, nls_status=None):
    problem.time_update(ts)

    if ts.step == 0:
        state0.apply_ebc()
        state = state0.copy(deep=True)

        problem.init_time(ts)

        # Initialize variables with history.
        state0.init_history()

    ev = problem.get_evaluator()
    try:
        vec_r = ev.eval_residual(state0(), is_full=True)
    except ValueError:
        output('residual evaluation failed, giving up...')
        raise
    else:
        err = nm.linalg.norm(vec_r)
        output('residual: %e' % err)

    if ts.step > 0:
        variables = problem.get_variables()
        vec_rf = variables.make_full_vec(vec_r, force_value=0.0)

        rhs = -ts.dt * vec_rf + mass.action(state0())

        vec = mass.inverse_action(rhs)

        state = state0.copy(preserve_caches=True)
        state.set_full(vec)
        state.apply_ebc()

    return state
Exemple #6
0
def triangulate(mesh, verbose=False):
    """
    Triangulate a 2D or 3D tensor product mesh: quadrilaterals->triangles,
    hexahedrons->tetrahedrons.

    Parameters
    ----------
    mesh : Mesh
        The input mesh.

    Returns
    -------
    mesh : Mesh
        The triangulated mesh.
    """
    conns = None
    for k, new_desc in [('3_8', '3_4'), ('2_4', '2_3')]:
        if k in mesh.descs:
            conns = mesh.get_conn(k)
            break

    if conns is not None:
        nelo = conns.shape[0]
        output('initial mesh: %d elements' % nelo, verbose=verbose)

        new_conns = elems_q2t(conns)
        nn = new_conns.shape[0] // nelo
        new_cgroups = nm.repeat(mesh.cmesh.cell_groups, nn)

        output('new mesh: %d elements' % new_conns.shape[0], verbose=verbose)
        mesh = Mesh.from_data(mesh.name, mesh.coors,
                              mesh.cmesh.vertex_groups,
                              [new_conns], [new_cgroups], [new_desc])

    return mesh
def get_conductivity(ts, coors, problem, equations = None, mode = None, **kwargs):
    """
    Calculates the conductivity with a constitutive k(psi) relation,
    where psi = -h.
    """
    if mode == 'qp':

        ## Get pressure values
        h_values = problem.evaluate('ev_volume_integrate.i.Omega(h)',
                                    mode = 'qp', verbose = False) * scaling
        psi_values = -h_values

        # van Genuchten
        val = vanGenuchten(ksat = silt_ksat, aVG = silt_aVG, nVG = silt_nVG,
                           mVG = silt_mVG, lVG = silt_lVG, psi = psi_values)

        # Brooks and Corey
        #val = brooksCorey(ksat = silt_ksat, aev = silt_aev, lBC = silt_lBC,
        #                  psi = psi_values)

        # Reshape the val vector to match SfePy expectations
        val.shape = (val.shape[0] * val.shape[1], 1, 1)

        # Check output
        output('h_values: min:', h_values.min(), 'max:', h_values.max())
        output('conductivity: min:', val.min(), 'max:', val.max())

        return {'val' : val}
Exemple #8
0
    def igs(self):
        """
        Cell group indices according to region kind.
        """
        if self.parent is not None:
            self._igs = self.domain.regions[self.parent].igs

        elif self._igs is None:
            if 'vertex' in self.true_kind:
                self._igs = self.domain.cmesh.get_igs(self.vertices, 0)

            elif 'edge' in self.true_kind:
                self._igs = self.domain.cmesh.get_igs(self.edges, 1)

            elif 'face' in self.true_kind:
                self._igs = self.domain.cmesh.get_igs(self.faces, 2)

            elif 'cell' in self.true_kind:
                self._igs = self.domain.cmesh.get_igs(self.cells, self.tdim)

            if not len(self._igs):
                output('warning: region %s of %s kind has empty group indices!'
                       % (self.name, self.kind))

        return self._igs
Exemple #9
0
    def create_conn_graph(self, verbose=True):
        """
        Create a graph of mesh connectivity.

        Returns
        -------
        graph : csr_matrix
            The mesh connectivity graph as a SciPy CSR matrix.
        """
        from extmods.cmesh import create_mesh_graph

        shape = (self.n_nod, self.n_nod)
        output('graph shape:', shape, verbose=verbose)
        if nm.prod(shape) == 0:
            output('no graph (zero size)!', verbose=verbose)
            return None

        output('assembling mesh graph...', verbose=verbose)
        tt = time.clock()

        nnz, prow, icol = create_mesh_graph(shape[0], shape[1],
                                            len(self.conns),
                                            self.conns, self.conns)
        output('...done in %.2f s' % (time.clock() - tt), verbose=verbose)
        output('graph nonzeros: %d (%.2e%% fill)' \
               % (nnz, float(nnz) / nm.prod(shape)))

        data = nm.ones((nnz,), dtype=nm.bool)
        graph = sp.csr_matrix((data, icol, prow), shape)

        return graph
Exemple #10
0
    def setup_facets(self, create_edges=True, create_faces=True,
                     verbose=False):
        """
        Setup the edges and faces (in 3D) of domain elements.
        """
        kinds = ['edges', 'faces']

        is_face = self.has_faces()
        create = [create_edges, create_faces and is_face]

        for ii, kind in enumerate(kinds):
            if create[ii]:
                if verbose:
                    output('setting up domain %s...' % kind)

                tt = time.clock()
                obj = Facets.from_domain(self, kind)
                obj.sort_and_orient()
                obj.setup_unique()
                obj.setup_neighbours()

                # 'ed' or 'fa'
                setattr(self, kind[:2], obj)

                if verbose:
                    output('...done in %.2f s' % (time.clock() - tt))

        if not is_face:
            self.fa = None
Exemple #11
0
    def __call__( self, problem = None, data = None, save_hook = None ):
        """data: corrs_pressure, evp, optionally vec_g"""
        problem = get_default( problem, self.problem )
        ts = problem.get_time_solver().ts

        corrs, evp = [data[ii] for ii in self.requires[:2]]
        if len(self.requires) == 3:
            vec_g = data[self.requires[2]]
        else:
            vec_g = None

        assert_( evp.ebcs == self.ebcs )
        assert_( evp.epbcs == self.epbcs )

        filename = self.get_dump_name()
        savename = self.get_save_name()

        self.setup_equations(self.equations)

        solve = self.compute_correctors
        solve(evp, 1.0, corrs.state, ts, filename, savename, vec_g=vec_g)

        if self.check:
            self.setup_equations(self.verify_equations)
            self.init_solvers(problem)

            output( 'verifying correctors %s...' % self.name )
            verify = self.verify_correctors
            ok = verify(1.0, corrs.state, filename)
            output( '...done, ok: %s' % ok )

        return Struct( name = self.name,
                       filename = filename )
Exemple #12
0
def main():
    from sfepy.base.base import output
    from sfepy.base.conf import ProblemConf, get_standard_keywords
    from sfepy.discrete import Problem

    output.prefix = "therel:"

    required, other = get_standard_keywords()
    conf = ProblemConf.from_file(__file__, required, other)

    problem = Problem.from_conf(conf, init_equations=False)

    # Setup output directory according to options above.
    problem.setup_default_output()

    # First solve the stationary electric conduction problem.
    problem.set_equations({"eq": conf.equations["1"]})
    problem.time_update()
    state_el = problem.solve()
    problem.save_state(problem.get_output_name(suffix="el"), state_el)

    # Then solve the evolutionary heat conduction problem, using state_el.
    problem.set_equations({"eq": conf.equations["2"]})
    phi_var = problem.get_variables()["phi_known"]
    phi_var.set_data(state_el())
    time_solver = problem.get_time_solver()
    time_solver()

    output("results saved in %s" % problem.get_output_name(suffix="*"))
Exemple #13
0
def refine_mesh(filename, level):
    """
    Uniformly refine `level`-times a mesh given by `filename`.

    The refined mesh is saved to a file with name constructed from base
    name of `filename` and `level`-times appended `'_r'` suffix.

    Parameters
    ----------
    filename : str
        The mesh file name.
    level : int
        The refinement level.
    """
    import os
    from sfepy.base.base import output
    from sfepy.fem import Mesh, Domain

    if level > 0:
        mesh = Mesh.from_file(filename)
        domain = Domain(mesh.name, mesh)
        for ii in range(level):
            output('refine %d...' % ii)
            domain = domain.refine()
            output('... %d nodes %d elements'
                   % (domain.shape.n_nod, domain.shape.n_el))

        suffix = os.path.splitext(filename)[1]
        filename = domain.name + suffix

        domain.mesh.write(filename, io='auto')

    return filename
Exemple #14
0
def post_process(out, pb, state, extend=False):
    """
    Calculate :math:`\nabla t` and compute boundary fluxes.
    """
    dv = pb.evaluate('ev_diffusion_velocity.i.Omega(m.K, t)', mode='el_avg',
                     verbose=False)
    out['dv'] = Struct(name='output_data', mode='cell',
                       data=dv, dofs=None)

    totals = nm.zeros(3)
    for gamma in ['Gamma_N', 'Gamma_N0', 'Gamma_D']:

        flux = pb.evaluate('d_surface_flux.i.%s(m.K, t)' % gamma,
                           verbose=False)
        area = pb.evaluate('d_surface.i.%s(t)' % gamma, verbose=False)

        flux_data = (gamma, flux, area, flux / area)
        totals += flux_data[1:]

        output('%8s flux: % 8.3f length: % 8.3f flux/length: % 8.3f'
               % flux_data)

    totals[2] = totals[0] / totals[1]
    output('   total flux: % 8.3f length: % 8.3f flux/length: % 8.3f'
           % tuple(totals))

    return out
Exemple #15
0
    def __call__(self, mtx_a, mtx_b=None, n_eigs=None,
                 eigenvectors=None, status=None, conf=None):
        from pysparse import jdsym, itsolvers, precon

        output("loading...")
        A = self._convert_mat(mtx_a)
        output("...done")
        if mtx_b is not None:
            M = self._convert_mat(mtx_b)

        output("solving...")
        Atau=A.copy()
        Atau.shift(-conf.tau,M)
        K=precon.jacobi(Atau)
        A=A.to_sss();
        if mtx_b is not None:
            M=M.to_sss();

        method = getattr(itsolvers, conf.method)
        kconv, lmbd, Q, it, it_in = jdsym.jdsym(A, M, K, n_eigs, conf.tau,
                                                conf.eps_a, conf.i_max,
                                                method,
                                                clvl=conf.verbosity,
                                                strategy=conf.strategy)

        output("number of converged eigenvalues:", kconv)
        output("...done")

        if status is not None:
            status['q'] = Q
            status['it'] = it
            status['it_in'] = it_in

        return lmbd, Q
Exemple #16
0
    def get_actual_order(self, geometry):
        """
        Return the actual integration order for given geometry.

        Parameters
        ----------
        geometry : str
            The geometry key describing the integration domain,
            see the keys of `sfepy.fem.quadratures.quadrature_tables`.

        Returns
        -------
        order : int
            If `self.order` is in quadrature tables it is this
            value. Otherwise it is the closest higher order. If no
            higher order is available, a warning is printed and the
            highest available order is used.
        """
        table = quadrature_tables[geometry]
        if self.order in table:
            order = self.order

        else:
            orders = table.keys()
            ii = nm.searchsorted(orders, self.order)
            if ii >= len(orders):
                order = max(orders)
                output(self._msg1 % (self.order, geometry))
                output(self._msg2 % order)

            else:
                order = orders[ii]

        return order
Exemple #17
0
    def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None,
                 i_max=None, mtx=None, status=None, **kwargs):

        eps_a = get_default(eps_a, self.conf.eps_a)
        eps_r = get_default(eps_r, self.conf.eps_r)
        i_max = get_default(i_max, self.conf.i_max)
        eps_d = self.conf.eps_d

        # There is no use in caching matrix in the solver - always set as new.
        pmtx, psol, prhs = self.set_matrix(mtx)

        ksp = self.ksp
        ksp.setOperators(pmtx)
        ksp.setFromOptions() # PETSc.Options() not used yet...
        ksp.setTolerances(atol=eps_a, rtol=eps_r, divtol=eps_d, max_it=i_max)

        # Set PETSc rhs, solve, get solution from PETSc solution.
        if x0 is not None:
            psol[...] = x0
            ksp.setInitialGuessNonzero(True)
        prhs[...] = rhs
        ksp.solve(prhs, psol)
        sol = psol[...].copy()
        output('%s(%s) convergence: %s (%s)'
               % (self.conf.method, self.conf.precond,
                  ksp.reason, self.converged_reasons[ksp.reason]))

        return sol
Exemple #18
0
    def __call__(self, state0=None, save_results=True, step_hook=None,
                 post_process_hook=None, nls_status=None):
        """
        Solve the time-dependent problem.
        """
        problem = self.problem
        ts = self.ts

        if state0 is None:
            state0 = get_initial_state(problem)

        ii = 0
        for step, time in ts:
            output(self.format % (time, ts.dt, self.adt.wait,
                                  step + 1, ts.n_step))

            state = self.solve_step(ts, state0, nls_status=nls_status)
            state0 = state.copy(deep=True)

            if step_hook is not None:
                step_hook(problem, ts, state)

            if save_results:
                filename = problem.get_output_name(suffix=ts.suffix % ts.step)
                problem.save_state(filename, state,
                                   post_process_hook=post_process_hook,
                                   file_per_var=None,
                                   ts=ts)
                ii += 1

            problem.advance(ts)

        return state
Exemple #19
0
def make_implicit_step(ts, state0, problem, nls_status=None):
    """
    Make a step of an implicit time stepping solver.
    """
    if ts.step == 0:
        state0.apply_ebc()
        state = state0.copy(deep=True)

        if not ts.is_quasistatic:
            ev = problem.get_evaluator()
            try:
                vec_r = ev.eval_residual(state(), is_full=True)
            except ValueError:
                output('initial residual evaluation failed, giving up...')
                raise
            else:
                err = nm.linalg.norm(vec_r)
                output('initial residual: %e' % err)

        if problem.is_linear():
            mtx = prepare_matrix(problem, state)
            problem.try_presolve(mtx)

        # Initialize variables with history.
        state0.init_history()
        if ts.is_quasistatic:
            # Ordinary solve.
            state = problem.solve(state0=state0, nls_status=nls_status)

    else:
        problem.time_update(ts)
        state = problem.solve(state0=state0, nls_status=nls_status)

    return state
def vary_omega1_size( problem ):
    """Vary size of \Omega1. Saves also the regions into options['output_dir'].

    Input:
      problem: ProblemDefinition instance
    Return:
      a generator object:
      1. creates new (modified) problem
      2. yields the new (modified) problem and output container
      3. use the output container for some logging
      4. yields None (to signal next iteration to Application)
    """
    from sfepy.fem import ProblemDefinition
    from sfepy.solvers.ts import get_print_info
    
    output.prefix = 'vary_omega1_size:'

    diameters = nm.linspace( 0.1, 0.6, 7 ) + 0.001
    ofn_trunk, output_format = problem.ofn_trunk, problem.output_format
    output_dir = problem.output_dir
    join = os.path.join

    conf = problem.conf
    cf = conf.get_raw( 'functions' )
    n_digit, aux, d_format = get_print_info( len( diameters ) + 1 )
    for ii, diameter in enumerate( diameters ):
        output( 'iteration %d: diameter %3.2f' % (ii, diameter) )

        cf['select_circ'] = (lambda coors, domain=None: 
                             select_circ(coors[:,0], coors[:,1], 0, diameter),)
        conf.edit('functions', cf)
        problem = ProblemDefinition.from_conf( conf )

        problem.save_regions( join( output_dir, ('regions_' + d_format) % ii ),
                              ['Omega_1'] )
        region = problem.domain.regions['Omega_1']
        if not region.has_cells_if_can():
            print region
            raise ValueError( 'region %s has no cells!' % region.name )

        ofn_trunk = ofn_trunk + '_' + (d_format % ii)
        problem.setup_output(output_filename_trunk=ofn_trunk,
                             output_dir=output_dir,
                             output_format=output_format)

        out = []
        yield problem, out

        out_problem, state = out[-1]

        filename = join( output_dir,
                         ('log_%s.txt' % d_format) % ii )
        fd = open( filename, 'w' )
        log_item = '$r(\Omega_1)$: %f\n' % diameter
        fd.write( log_item )
        fd.write( 'solution:\n' )
        nm.savetxt(fd, state())
        fd.close()

        yield None
Exemple #21
0
def postprocess(filename_input, filename_results, options):
    """
    Postprocess probe data files - replot, integrate data.
    """
    from matplotlib import pyplot as plt

    header, results = read_results(filename_input,
                                   only_names=options.only_names)
    output(header)

    fig = plt.figure()
    for name, result in results.iteritems():
        pars, vals = result[:, 0], result[:, 1]

        ii = nm.where(nm.isfinite(vals))[0]
        # Nans only at the edges.
        assert_(nm.diff(ii).sum() == (len(ii)-1))

        val = integrate_along_line(pars[ii], vals[ii], options.radial)

        label = r'%s: $\int\ %s' % (name, name)
        if options.radial:
            label += ' (r)'
        label += '$ = %.5e'% val

        plt.plot(pars, vals, label=label, lw=0.2, marker='+', ms=1)
        plt.ylabel('probed data')
        plt.xlabel('probe coordinate')

        output(label)

    plt.legend()

    fig.savefig(filename_results)
def get_homog_mat(ts, coors, mode, term=None, problem=None, **kwargs):
    if problem.update_materials_flag == 2 and mode == 'qp':
        out = hyperelastic_data['homog_mat']
        return {k: nm.array(v) for k, v in six.iteritems(out)}
    elif problem.update_materials_flag == 0 or not mode == 'qp':
        return

    output('get_homog_mat')
    dim = problem.domain.mesh.dim

    update_var = problem.conf.options.mesh_update_variables[0]
    state_u = problem.equations.variables[update_var]
    state_u.field.clear_mappings()
    family_data = problem.family_data(state_u, term.region,
                                      term.integral, term.integration)

    mtx_f = family_data.mtx_f.reshape((coors.shape[0],)
                                      + family_data.mtx_f.shape[-2:])
    out = get_homog_coefs_nonlinear(ts, coors, mode, mtx_f,
                                    term=term, problem=problem,
                                    iteration=problem.iiter, **kwargs)

    out['E'] = 0.5 * (la.dot_sequences(mtx_f, mtx_f, 'ATB') - nm.eye(dim))

    hyperelastic_data['time'] = ts.step
    hyperelastic_data['homog_mat_shape'] = family_data.det_f.shape[:2]
    hyperelastic_data['homog_mat'] = \
        {k: nm.array(v) for k, v in six.iteritems(out)}

    return out
Exemple #23
0
def get_std_wave_fun(pb, options):
    stiffness = pb.evaluate('ev_volume_integrate_mat.2.Omega(m.D, u)',
                            mode='el_avg', copy_materials=False, verbose=False)
    young, poisson = mc.youngpoisson_from_stiffness(stiffness,
                                                    plane=options.plane)
    density = pb.evaluate('ev_volume_integrate_mat.2.Omega(m.density, u)',
                          mode='el_avg', copy_materials=False, verbose=False)

    lam, mu = mc.lame_from_youngpoisson(young, poisson,
                                        plane=options.plane)
    alam = nm.average(lam)
    amu = nm.average(mu)
    adensity = nm.average(density)

    cp = nm.sqrt((alam + 2.0 * amu) / adensity)
    cs = nm.sqrt(amu / adensity)
    output('average p-wave speed:', cp)
    output('average shear wave speed:', cs)

    log_names = [r'$\omega_p$', r'$\omega_s$']
    log_plot_kwargs = [{'ls' : '--', 'color' : 'k'},
                       {'ls' : '--', 'color' : 'gray'}]

    if options.mode == 'omega':
        fun = lambda wmag, wdir: (cp * wmag, cs * wmag)

    else:
        fun = lambda wmag, wdir: (wmag / cp, wmag / cs)

    return fun, log_names, log_plot_kwargs
Exemple #24
0
    def _check_region(self, region):
        """
        Check whether the `region` can be used for the
        field. Non-surface fields require the region to span whole
        element groups.

        Returns
        -------
        ok : bool
            True if the region is usable for the field.
        """
        ok = True
        domain = region.domain
        for ig in region.igs:
            if domain.groups[ig].gel.dim != domain.shape.tdim:
                output('cells with a bad topological dimension! (%d == %d)'
                       % (domain.groups[ig].gel.dim, domain.shape.tdim))
                ok = False
                break
            shape = domain.groups[ig].shape
            if region.shape[ig].n_vertex < shape.n_vertex:
                output('region does not span a whole element group!')
                ok = False
                break

        return ok
Exemple #25
0
def main():
    parser = ArgumentParser(description=__doc__)
    parser.add_argument('--version', action='version', version='%(prog)s')
    parser.add_argument('--eps', action='store', dest='eps',
                        default=1e-12, help=helps['eps'])
    parser.add_argument('-o', '--filename-out',
                        action='store', dest='filename_out',
                        default=None, help=helps['filename-out'])
    parser.add_argument('filename')
    options = parser.parse_args()

    filename = options.filename

    mesh = Mesh.from_file(filename)
    mesh_out = extract_edges(mesh, eps=float(options.eps))
    mesh_out = merge_lines(mesh_out)

    filename_out = options.filename_out
    if filename_out is None:
        filename_out = edit_filename(filename, prefix='edge_', new_ext='.vtk')

    output('Outline mesh - vertices: %d, edges: %d, output filename: %s'
           % (mesh_out[0].shape[0], mesh_out[2][0].shape[0], filename_out))

    # hack to write '3_2' elements - edges
    io = VTKMeshIO(None)
    aux_mesh = Struct()
    aux_mesh._get_io_data = lambda: mesh_out
    aux_mesh.n_el = mesh_out[2][0].shape[0]
    io.write(filename_out, aux_mesh)
Exemple #26
0
def create_petsc_system(mtx, sizes, pdofs, drange, is_overlap=True,
                        comm=None, verbose=False):
    """
    Create and pre-allocate (if `is_overlap` is True) a PETSc matrix and
    related solution and right-hand side vectors.
    """
    if comm is None:
        comm = PETSc.COMM_WORLD

    if is_overlap:
        mtx.data[:] = 1
        mtx_prealloc = create_prealloc_data(mtx, pdofs, drange,
                                            verbose=True)
        pmtx = create_petsc_matrix(sizes, mtx_prealloc, comm=comm)

    else:
        pmtx = create_petsc_matrix(sizes, comm=comm)

    own_range = pmtx.getOwnershipRange()
    output('pmtx ownership:', own_range, verbose=verbose)
    assert_(own_range == drange)

    psol, prhs = pmtx.getVecs()

    own_range = prhs.getOwnershipRange()
    output('prhs ownership:', own_range, verbose=verbose)
    assert_(own_range == drange)

    return pmtx, psol, prhs
Exemple #27
0
    def get_dofs_in_region(self, region, merge=False, clean=False, warn=False, igs=None):
        """
        Return indices of DOFs that belong to the given region.
        """
        if igs is None:
            igs = region.igs

        nods = []
        for ig in self.igs:
            if not ig in igs:
                nods.append(None)
                continue

            nn = self.get_dofs_in_region_group(region, ig)
            nods.append(nn)

        if merge:
            nods = [nn for nn in nods if nn is not None]
            nods = nm.unique(nm.hstack(nods))

        elif clean:
            for nn in nods[:]:
                if nn is None:
                    nods.remove(nn)
                    if warn is not None:
                        output(warn + ("%s" % region.name))

        return nods
Exemple #28
0
    def __call__(self, state0=None, save_results=True, step_hook=None,
                 post_process_hook=None, nls_status=None):
        """
        Solve the time-dependent problem.
        """
        problem = self.problem
        ts = self.ts

        suffix, is_save = prepare_save_data(ts, problem.conf)

        if state0 is None:
            state0 = get_initial_state(problem)

        ii = 0
        for step, time in ts:
            output(self.format % (time, step + 1, ts.n_step))

            state = self.solve_step(ts, state0, nls_status=nls_status)
            state0 = state.copy(deep=True)

            if step_hook is not None:
                step_hook(problem, ts, state)

            if save_results and (is_save[ii] == ts.step):
                filename = problem.get_output_name(suffix=suffix % ts.step)
                problem.save_state(filename, state,
                                   post_process_hook=post_process_hook,
                                   file_per_var=None,
                                   ts=ts)
                ii += 1

            yield step, time, state

            problem.advance(ts)
Exemple #29
0
    def __init__(self, conf, **kwargs):
        LinearSolver.__init__(self, conf, solve=None, **kwargs)
        um = self.sls = None

        aux = try_imports(['import scipy.linsolve as sls',
                           'import scipy.splinalg.dsolve as sls',
                           'import scipy.sparse.linalg.dsolve as sls'],
                          'cannot import scipy sparse direct solvers!')
        self.sls = aux['sls']
        aux = try_imports(['import scipy.linsolve.umfpack as um',
                           'import scipy.splinalg.dsolve.umfpack as um',
                           'import scipy.sparse.linalg.dsolve.umfpack as um',
                           'import scikits.umfpack as um'])
        if 'um' in aux:
            um = aux['um']

        if um is not None:
            is_umfpack = hasattr(um, 'UMFPACK_OK')
        else:
            is_umfpack = False

        method = self.conf.method
        if method == 'superlu':
            self.sls.use_solver(useUmfpack=False)
        elif method == 'umfpack':
            if not is_umfpack and self.conf.warn:
                output('umfpack not available, using superlu!')
        elif method != 'auto':
            raise ValueError('uknown solution method! (%s)' % method)

        if method != 'superlu' and is_umfpack:
            self.sls.use_solver(useUmfpack=True,
                                assumeSortedIndices=True)
Exemple #30
0
def main():
    from sfepy.base.base import output
    from sfepy.base.conf import ProblemConf, get_standard_keywords
    from sfepy.fem import ProblemDefinition
    from sfepy.applications import solve_evolutionary

    output.prefix = 'therel:'

    required, other = get_standard_keywords()
    conf = ProblemConf.from_file(__file__, required, other)

    problem = ProblemDefinition.from_conf(conf, init_equations=False)

    # Setup output directory according to options above.
    problem.setup_default_output()

    # First solve the stationary electric conduction problem.
    problem.set_equations({'eq' : conf.equations['1']})
    problem.time_update()
    state_el = problem.solve()
    problem.save_state(problem.get_output_name(suffix = 'el'), state_el)

    # Then solve the evolutionary heat conduction problem, using state_el.
    problem.set_equations({'eq' : conf.equations['2']})
    phi_var = problem.get_variables()['phi_known']
    phi_var.data_from_any(state_el())
    solve_evolutionary(problem)

    output('results saved in %s' % problem.get_output_name(suffix = '*'))
Exemple #31
0
    def __call__(self,
                 rhs,
                 x0=None,
                 conf=None,
                 eps_a=None,
                 eps_r=None,
                 i_max=None,
                 mtx=None,
                 status=None,
                 **kwargs):
        import os, sys, shutil, tempfile
        from sfepy import base_dir
        from sfepy.base.ioutils import ensure_path

        eps_a = get_default(eps_a, self.conf.eps_a)
        eps_r = get_default(eps_r, self.conf.eps_r)
        i_max = get_default(i_max, self.conf.i_max)
        eps_d = self.conf.eps_d

        petsc = self.petsc

        # There is no use in caching matrix in the solver - always set as new.
        pmtx, psol, prhs = self.set_matrix(mtx)

        ksp = self.ksp
        ksp.setOperators(pmtx)
        ksp.setFromOptions()  # PETSc.Options() not used yet...
        ksp.setTolerances(atol=eps_a, rtol=eps_r, divtol=eps_d, max_it=i_max)

        output_dir = tempfile.mkdtemp()

        # Set PETSc rhs, solve, get solution from PETSc solution.
        if x0 is not None:
            psol[...] = x0
            sol0_filename = os.path.join(output_dir, 'sol0.dat')

        else:
            sol0_filename = ''

        prhs[...] = rhs

        script_filename = os.path.join(base_dir, 'solvers/petsc_worker.py')

        mtx_filename = os.path.join(output_dir, 'mtx.dat')
        rhs_filename = os.path.join(output_dir, 'rhs.dat')
        sol_filename = os.path.join(output_dir, 'sol.dat')
        status_filename = os.path.join(output_dir, 'status.txt')

        log_filename = os.path.join(self.conf.log_dir, 'sol.log')
        ensure_path(log_filename)

        output('storing system to %s...' % output_dir)
        tt = time.clock()
        view_mtx = petsc.Viewer().createBinary(mtx_filename, mode='w')
        view_rhs = petsc.Viewer().createBinary(rhs_filename, mode='w')
        pmtx.view(view_mtx)
        prhs.view(view_rhs)
        if sol0_filename:
            view_sol0 = petsc.Viewer().createBinary(sol0_filename, mode='w')
            psol.view(view_sol0)
        output('...done in %.2f s' % (time.clock() - tt))

        command = [
            'mpiexec -n %d' % self.conf.n_proc,
            sys.executable,
            script_filename,
            '-mtx %s' % mtx_filename,
            '-rhs %s' % rhs_filename,
            '-sol0 %s' % sol0_filename,
            '-sol %s' % sol_filename,
            '-status %s' % status_filename,
            '-ksp_type %s' % self.conf.method,
            '-pc_type %s' % self.conf.precond,
            '-sub_pc_type %s' % self.conf.sub_precond,
            '-ksp_atol %.3e' % self.conf.eps_a,
            '-ksp_rtol %.3e' % self.conf.eps_r,
            '-ksp_max_it %d' % self.conf.i_max,
            '-ksp_monitor %s' % log_filename,
            '-ksp_view %s' % log_filename,
        ]
        if self.conf.precond_side is not None:
            command.append('-ksp_pc_side %s' % self.conf.precond_side)

        out = os.system(" ".join(command))
        assert_(out == 0)

        output('reading solution...')
        tt = time.clock()
        view_sol = self.petsc.Viewer().createBinary(sol_filename, mode='r')
        psol = petsc.Vec().load(view_sol)

        fd = open(status_filename, 'r')
        line = fd.readline().split()
        reason = int(line[0])
        elapsed = float(line[1])
        fd.close()
        output('...done in %.2f s' % (time.clock() - tt))

        sol = psol[...].copy()
        output('%s(%s, %s/proc) convergence: %s (%s)' %
               (self.conf.method, self.conf.precond, self.conf.sub_precond,
                reason, self.converged_reasons[reason]))
        output('elapsed: %.2f [s]' % elapsed)

        shutil.rmtree(output_dir)

        return sol
Exemple #32
0
def gen_cylinder_mesh(dims,
                      shape,
                      centre,
                      axis='x',
                      force_hollow=False,
                      is_open=False,
                      open_angle=0.0,
                      non_uniform=False,
                      name='cylinder',
                      verbose=True):
    """
    Generate a cylindrical mesh along an axis. Its cross-section can be
    ellipsoidal.

    Parameters
    ----------
    dims : array of 5 floats
        Dimensions of the cylinder: inner surface semi-axes a1, b1, outer
        surface semi-axes a2, b2, length.
    shape : array of 3 ints
        Shape (counts of nodes in radial, circumferential and longitudinal
        directions) of the cylinder mesh.
    centre : array of 3 floats
        Centre of the cylinder.
    axis: one of 'x', 'y', 'z'
        The axis of the cylinder.
    force_hollow : boolean
        Force hollow mesh even if inner radii a1 = b1 = 0.
    is_open : boolean
        Generate an open cylinder segment.
    open_angle : float
        Opening angle in radians.
    non_uniform : boolean
        If True, space the mesh nodes in radial direction so that the element
        volumes are (approximately) the same, making thus the elements towards
        the outer surface thinner.
    name : string
        Mesh name.
    verbose : bool
        If True, show progress of the mesh generation.

    Returns
    -------
    mesh : Mesh instance
    """
    dims = nm.asarray(dims, dtype=nm.float64)
    shape = nm.asarray(shape, dtype=nm.int32)
    centre = nm.asarray(centre, dtype=nm.float64)

    a1, b1, a2, b2, length = dims
    nr, nfi, nl = shape
    origin = centre - nm.array([0.5 * length, 0.0, 0.0])

    dfi = 2.0 * (nm.pi - open_angle) / nfi
    if is_open:
        nnfi = nfi + 1
    else:
        nnfi = nfi

    is_hollow = force_hollow or not (max(abs(a1), abs(b1)) < 1e-15)

    if is_hollow:
        mr = 0
    else:
        mr = (nnfi - 1) * nl

    grid = nm.zeros((nr, nnfi, nl), dtype=nm.int32)

    n_nod = nr * nnfi * nl - mr
    coors = nm.zeros((n_nod, 3), dtype=nm.float64)

    angles = nm.linspace(open_angle, open_angle + (nfi) * dfi, nfi + 1)
    xs = nm.linspace(0.0, length, nl)
    if non_uniform:
        ras = nm.zeros((nr, ), dtype=nm.float64)
        rbs = nm.zeros_like(ras)
        advol = (a2**2 - a1**2) / (nr - 1)
        bdvol = (b2**2 - b1**2) / (nr - 1)
        ras[0], rbs[0] = a1, b1
        for ii in range(1, nr):
            ras[ii] = nm.sqrt(advol + ras[ii - 1]**2)
            rbs[ii] = nm.sqrt(bdvol + rbs[ii - 1]**2)
    else:
        ras = nm.linspace(a1, a2, nr)
        rbs = nm.linspace(b1, b2, nr)

    # This is 3D only...
    output('generating %d vertices...' % n_nod, verbose=verbose)
    ii = 0
    for ix in range(nr):
        a, b = ras[ix], rbs[ix]
        for iy, fi in enumerate(angles[:nnfi]):
            for iz, x in enumerate(xs):
                grid[ix, iy, iz] = ii
                coors[ii] = origin + [x, a * nm.cos(fi), b * nm.sin(fi)]
                ii += 1

                if not is_hollow and (ix == 0):
                    if iy > 0:
                        grid[ix, iy, iz] = grid[ix, 0, iz]
                        ii -= 1
    assert_(ii == n_nod)
    output('...done', verbose=verbose)

    n_el = (nr - 1) * nnfi * (nl - 1)
    conn = nm.zeros((n_el, 8), dtype=nm.int32)

    output('generating %d cells...' % n_el, verbose=verbose)
    ii = 0
    for (ix, iy, iz) in cycle([nr - 1, nnfi, nl - 1]):
        if iy < (nnfi - 1):
            conn[ii, :] = [
                grid[ix, iy, iz], grid[ix + 1, iy, iz], grid[ix + 1, iy + 1,
                                                             iz],
                grid[ix, iy + 1, iz], grid[ix, iy, iz + 1], grid[ix + 1, iy,
                                                                 iz + 1],
                grid[ix + 1, iy + 1, iz + 1], grid[ix, iy + 1, iz + 1]
            ]
            ii += 1
        elif not is_open:
            conn[ii, :] = [
                grid[ix, iy, iz], grid[ix + 1, iy, iz], grid[ix + 1, 0, iz],
                grid[ix, 0, iz], grid[ix, iy, iz + 1],
                grid[ix + 1, iy, iz + 1], grid[ix + 1, 0, iz + 1], grid[ix, 0,
                                                                        iz + 1]
            ]
            ii += 1

    mat_id = nm.zeros((n_el, ), dtype=nm.int32)
    desc = '3_8'

    assert_(n_nod == (conn.max() + 1))
    output('...done', verbose=verbose)

    if axis == 'z':
        coors = coors[:, [1, 2, 0]]
    elif axis == 'y':
        coors = coors[:, [2, 0, 1]]

    mesh = Mesh.from_data(name, coors, None, [conn], [mat_id], [desc])
    return mesh
Exemple #33
0
    def __call__(self,
                 rhs,
                 x0=None,
                 conf=None,
                 eps_a=None,
                 eps_r=None,
                 i_max=None,
                 mtx=None,
                 status=None,
                 comm=None,
                 context=None,
                 **kwargs):
        eps_a = get_default(eps_a, self.conf.eps_a)
        eps_r = get_default(eps_r, self.conf.eps_r)
        i_max = get_default(i_max, self.conf.i_max)
        eps_d = self.conf.eps_d

        if self.mtx_id == id(mtx):
            ksp = self.ksp
            pmtx = self.pmtx

        else:
            pmtx = self.create_petsc_matrix(mtx, comm=comm)

            ksp = self.create_ksp(comm=comm)
            ksp.setOperators(pmtx)
            ksp.setTolerances(atol=eps_a,
                              rtol=eps_r,
                              divtol=eps_d,
                              max_it=i_max)

            setup_precond = self.conf.setup_precond
            if setup_precond is not None:
                ksp.pc.setPythonContext(setup_precond(mtx, context))

            ksp.setFromOptions()
            self.mtx_id = id(mtx)
            self.ksp = ksp
            self.pmtx = pmtx

        if isinstance(rhs, self.petsc.Vec):
            prhs = rhs

        else:
            prhs = pmtx.getVecLeft()
            prhs[...] = rhs

        if x0 is not None:
            if isinstance(x0, self.petsc.Vec):
                psol = x0

            else:
                psol = pmtx.getVecRight()
                psol[...] = x0

            ksp.setInitialGuessNonzero(True)

        else:
            psol = pmtx.getVecRight()

            ksp.setInitialGuessNonzero(False)

        ksp.solve(prhs, psol)
        output('%s(%s, %s/proc) convergence: %s (%s, %d iterations)' %
               (ksp.getType(), ksp.getPC().getType(), self.conf.sub_precond,
                ksp.reason, self.converged_reasons[ksp.reason],
                ksp.getIterationNumber()),
               verbose=conf.verbose)

        if isinstance(rhs, self.petsc.Vec):
            sol = psol

        else:
            sol = psol[...].copy()

        return sol
Exemple #34
0
def distribute_field_dofs(field,
                          gfd,
                          use_expand_dofs=False,
                          comm=None,
                          verbose=False):
    """
    Distribute the owned cells and DOFs of the given field to all tasks.

    The DOFs use the PETSc ordering and are in form of a connectivity, so that
    each task can easily identify them with the DOFs of the original global
    ordering or local ordering.
    """
    if comm is None:
        comm = PETSc.COMM_WORLD

    size = comm.size
    mpi = comm.tompi4py()

    if comm.rank == 0:
        dof_maps = gfd.dof_maps
        id_map = gfd.id_map

        # Send subdomain data to other tasks.
        for it in range(1, size):
            # Send owned and overlap cells.
            cells = nm.union1d(gfd.cell_parts[it], gfd.overlap_cells[it])
            mpi.send(len(cells), it)
            mpi.Send([cells, MPI.INTEGER4], it)

            dof_map = dof_maps[it]

            # Send owned petsc_dofs range.
            mpi.send(gfd.coffsets[it], it)
            mpi.send(gfd.coffsets[it] + dof_map[3], it)

            # Send petsc_dofs of global_dofs.
            global_dofs = field.econn[cells]
            if use_expand_dofs:
                global_dofs = expand_dofs(global_dofs, field.n_components)
            petsc_dofs_conn = id_map[global_dofs]
            mpi.send(petsc_dofs_conn.shape[0], it)
            mpi.send(petsc_dofs_conn.shape[1], it)
            mpi.Send([petsc_dofs_conn, MPI.INTEGER4], it)

        cells = nm.union1d(gfd.cell_parts[0], gfd.overlap_cells[0])
        n_cell = len(cells)

        global_dofs = field.econn[cells]
        if use_expand_dofs:
            global_dofs = expand_dofs(global_dofs, field.n_components)

        dof_map = dof_maps[0]
        petsc_dofs_range = (gfd.coffsets[0], gfd.coffsets[0] + dof_map[3])
        petsc_dofs_conn = id_map[global_dofs]

    else:
        # Receive owned cells.
        n_cell = mpi.recv(source=0)
        cells = nm.empty(n_cell, dtype=nm.int32)
        mpi.Recv([cells, MPI.INTEGER4], source=0)

        # Receive owned petsc_dofs range.
        i0 = mpi.recv(source=0)
        i1 = mpi.recv(source=0)
        petsc_dofs_range = (i0, i1)

        # Receive petsc_dofs of global_dofs.
        n_cell = mpi.recv(source=0)
        n_cdof = mpi.recv(source=0)
        petsc_dofs_conn = nm.empty((n_cell, n_cdof), dtype=nm.int32)
        mpi.Recv([petsc_dofs_conn, MPI.INTEGER4], source=0)

        dof_maps = id_map = None

    if verbose:
        output('n_cell:', n_cell)
        output('cells:', cells)
        output('owned petsc DOF range:', petsc_dofs_range,
               petsc_dofs_range[1] - petsc_dofs_range[0])
        aux = nm.unique(petsc_dofs_conn)
        output('local petsc DOFs (owned + shared):', aux, len(aux))

    return cells, petsc_dofs_range, petsc_dofs_conn, dof_maps, id_map
Exemple #35
0
def recover_micro_hook_eps(micro_filename,
                           region,
                           eval_var,
                           nodal_values,
                           const_values,
                           eps0,
                           recovery_file_tag='',
                           define_args=None,
                           verbose=False,
                           use_cache=False):
    # Create a micro-problem instance.
    required, other = get_standard_keywords()
    required.remove('equations')
    conf = ProblemConf.from_file(micro_filename,
                                 required,
                                 other,
                                 verbose=False,
                                 define_args=define_args)

    coefs_filename = conf.options.get('coefs_filename', 'coefs')
    output_dir = conf.options.get('output_dir', '.')
    coefs_filename = op.join(output_dir, coefs_filename) + '.h5'

    if not use_cache:
        cc_cache.clear()

    # Coefficients and correctors
    if coefs_filename not in cc_cache:
        coefs = Coefficients.from_file_hdf5(coefs_filename)
        corrs = get_correctors_from_file_hdf5(dump_names=coefs.save_names)
        cc_cache[coefs_filename] = coefs, corrs
    else:
        coefs, corrs = cc_cache[coefs_filename]

    recovery_hook = conf.options.get('recovery_hook', None)

    if recovery_hook is not None:
        recovery_hook = conf.get_function(recovery_hook)
        pb = Problem.from_conf(conf, init_equations=False, init_solvers=False)

        # Get tiling of a given region
        rcoors = region.domain.mesh.coors[region.get_entities(0), :]
        rcmin = nm.min(rcoors, axis=0)
        rcmax = nm.max(rcoors, axis=0)
        nn = nm.round((rcmax - rcmin) / eps0)
        if nm.prod(nn) == 0:
            output('inconsistency in recovery region and microstructure size!')
            return

        cs = []
        for ii, n in enumerate(nn):
            cs.append(nm.arange(n) * eps0 + rcmin[ii])

        x0 = nm.empty((int(nm.prod(nn)), nn.shape[0]), dtype=nm.float64)
        for ii, icoor in enumerate(nm.meshgrid(*cs, indexing='ij')):
            x0[:, ii] = icoor.flatten()

        mesh = pb.domain.mesh

        coors, conn, outs, ndoffset = [], [], [], 0
        # Recover region
        mic_coors = (mesh.coors - mesh.get_bounding_box()[0, :]) * eps0
        evfield = eval_var.field

        output('recovering microsctructures...')
        timer = Timer(start=True)
        output_fun = output.output_function
        output_level = output.level

        for ii, c0 in enumerate(x0):
            local_macro = {'eps0': eps0}
            local_coors = mic_coors + c0
            # Inside recovery region?
            v = nm.ones((evfield.region.entities[0].shape[0], 1))
            v[evfield.vertex_remap[region.entities[0]]] = 0
            no = nm.sum(v)
            aux = evfield.evaluate_at(local_coors, v)
            if no > 0 and (nm.sum(aux) / no) > 1e-3:
                continue

            output.level = output_level
            output('micro: %d / %d' % (ii, x0.shape[0]))

            for k, v in six.iteritems(nodal_values):
                local_macro[k] = evfield.evaluate_at(local_coors, v)
            for k, v in six.iteritems(const_values):
                local_macro[k] = v

            output.set_output(quiet=not (verbose))
            outs.append(recovery_hook(pb, corrs, local_macro))
            output.output_function = output_fun
            coors.append(local_coors)
            conn.append(mesh.get_conn(mesh.descs[0]) + ndoffset)
            ndoffset += mesh.n_nod

    output('...done in %.2f s' % timer.stop())

    # Collect output variables
    outvars = {}
    for k, v in six.iteritems(outs[0]):
        if v.var_name in outvars:
            outvars[v.var_name].append(k)
        else:
            outvars[v.var_name] = [k]

    # Split output by variables/regions
    pvs = pb.create_variables(outvars.keys())
    outregs = {k: pvs[k].field.region.get_entities(-1) for k in outvars.keys()}
    nrve = len(coors)
    coors = nm.vstack(coors)
    ngroups = nm.tile(mesh.cmesh.vertex_groups.squeeze(), (nrve, ))
    conn = nm.vstack(conn)
    cgroups = nm.tile(mesh.cmesh.cell_groups.squeeze(), (nrve, ))

    # Get region mesh and data
    for k, cidxs in six.iteritems(outregs):
        gcidxs = nm.hstack([cidxs + mesh.n_el * ii for ii in range(nrve)])
        rconn = conn[gcidxs]
        remap = -nm.ones((coors.shape[0], ), dtype=nm.int32)
        remap[rconn] = 1
        vidxs = nm.where(remap > 0)[0]
        remap[vidxs] = nm.arange(len(vidxs))
        rconn = remap[rconn]
        rcoors = coors[vidxs, :]

        out = {}
        for ifield in outvars[k]:
            data = [outs[ii][ifield].data for ii in range(nrve)]
            out[ifield] = Struct(name='output_data',
                                 mode=outs[0][ifield].mode,
                                 dofs=None,
                                 var_name=k,
                                 data=nm.vstack(data))

        micro_name = pb.get_output_name(extra='recovered%s_%s' %
                                        (recovery_file_tag, k))
        filename = op.join(output_dir, op.basename(micro_name))
        mesh_out = Mesh.from_data('recovery_%s' % k, rcoors, ngroups[vidxs],
                                  [rconn], [cgroups[gcidxs]], [mesh.descs[0]])
        mesh_out.write(filename, io='auto', out=out)
Exemple #36
0
def recover_micro_hook(micro_filename,
                       region,
                       macro,
                       naming_scheme='step_iel',
                       recovery_file_tag='',
                       define_args=None,
                       verbose=False):
    # Create a micro-problem instance.
    required, other = get_standard_keywords()
    required.remove('equations')
    conf = ProblemConf.from_file(micro_filename,
                                 required,
                                 other,
                                 verbose=False,
                                 define_args=define_args)

    coefs_filename = conf.options.get('coefs_filename', 'coefs')
    output_dir = conf.options.get('output_dir', '.')
    coefs_filename = op.join(output_dir, coefs_filename) + '.h5'

    # Coefficients and correctors
    coefs = Coefficients.from_file_hdf5(coefs_filename)
    corrs = get_correctors_from_file_hdf5(dump_names=coefs.save_names)

    recovery_hook = conf.options.get('recovery_hook', None)

    if recovery_hook is not None:
        recovery_hook = conf.get_function(recovery_hook)
        pb = Problem.from_conf(conf, init_equations=False, init_solvers=False)

        format = get_print_info(pb.domain.mesh.n_el, fill='0')[1]

        output('recovering microsctructures...')
        timer = Timer(start=True)
        output_fun = output.output_function
        output_level = output.level
        for ii, iel in enumerate(region.cells):
            output.level = output_level
            output('micro: %d (el=%d)' % (ii, iel))

            local_macro = {}
            for k, v in six.iteritems(macro):
                local_macro[k] = v[ii, 0]

            output.set_output(quiet=not (verbose))
            out = recovery_hook(pb, corrs, local_macro)
            output.output_function = output_fun

            if ii == 0:
                new_keys = []
                new_data = {}
                new_idxs = []
                for k in six.iterkeys(local_macro):
                    if k not in macro:
                        new_keys.append(k)
                        new_data[k] = []

            new_idxs.append(ii)
            for jj in new_keys:
                new_data[jj].append(local_macro[jj])

            # save data
            if out is not None:
                suffix = format % iel
                micro_name = pb.get_output_name(extra='recovered_' +
                                                recovery_file_tag + suffix)
                filename = op.join(output_dir, op.basename(micro_name))
                fpv = pb.conf.options.get('file_per_var', False)
                pb.save_state(filename, out=out, file_per_var=fpv)

        output('...done in %.2f s' % timer.stop())

        for jj in new_keys:
            lout = new_data[jj]
            macro[jj] = nm.zeros((nm.max(new_idxs) + 1, 1) + lout[0].shape,
                                 dtype=lout[0].dtype)
            out = macro[jj]
            for kk, ii in enumerate(new_idxs):
                out[ii, 0] = lout[kk]
Exemple #37
0
def assemble_mtx_to_petsc(pmtx,
                          mtx,
                          pdofs,
                          drange,
                          is_overlap=True,
                          comm=None,
                          verbose=False):
    """
    Assemble a local CSR matrix to a global PETSc matrix.

    WIP
    ---
    Try Mat.setValuesCSR() - no lgmap - filtering vectorized?
    """
    if comm is None:
        comm = PETSc.COMM_WORLD

    lgmap = PETSc.LGMap().create(pdofs, comm=comm)

    if is_overlap:
        pmtx.setLGMap(lgmap, lgmap)

        data, prows, cols = mtx.data, mtx.indptr, mtx.indices

        output('setting matrix values...', verbose=verbose)
        tt = time.clock()
        for ir, rdof in enumerate(pdofs):
            if (rdof < drange[0]) or (rdof >= drange[1]): continue

            for ic in range(prows[ir], prows[ir + 1]):
                # output(ir, rdof, cols[ic])
                pmtx.setValueLocal(ir, cols[ic], data[ic],
                                   PETSc.InsertMode.INSERT_VALUES)
        output('...done in', time.clock() - tt, verbose=verbose)

        output('assembling matrix...', verbose=verbose)
        tt = time.clock()
        pmtx.assemble()
        output('...done in', time.clock() - tt, verbose=verbose)

    else:
        pmtx.setLGMap(lgmap, lgmap)
        output('setting matrix values...', verbose=verbose)
        tt = time.clock()
        pmtx.setValuesLocalCSR(mtx.indptr, mtx.indices, mtx.data,
                               PETSc.InsertMode.ADD_VALUES)
        output('...done in', time.clock() - tt, verbose=verbose)

        output('assembling matrix...', verbose=verbose)
        tt = time.clock()
        pmtx.assemble()
        output('...done in', time.clock() - tt, verbose=verbose)
Exemple #38
0
def assemble_rhs_to_petsc(prhs,
                          rhs,
                          pdofs,
                          drange,
                          is_overlap=True,
                          comm=None,
                          verbose=False):
    """
    Assemble a local right-hand side vector to a global PETSc vector.
    """
    if comm is None:
        comm = PETSc.COMM_WORLD

    lgmap = PETSc.LGMap().create(pdofs, comm=comm)

    if is_overlap:
        prhs.setLGMap(lgmap)
        output('setting rhs values...', verbose=verbose)
        tt = time.clock()
        for ir, rdof in enumerate(pdofs):
            if (rdof < drange[0]) or (rdof >= drange[1]): continue
            prhs.setValueLocal(ir, rhs[ir], PETSc.InsertMode.INSERT_VALUES)
        output('...done in', time.clock() - tt, verbose=verbose)

        output('assembling rhs...', verbose=verbose)
        tt = time.clock()
        prhs.assemble()
        output('...done in', time.clock() - tt, verbose=verbose)

    else:
        prhs.setLGMap(lgmap)
        output('setting rhs values...', verbose=verbose)
        tt = time.clock()
        prhs.setValuesLocal(nm.arange(len(rhs), dtype=nm.int32), rhs,
                            PETSc.InsertMode.ADD_VALUES)
        output('...done in', time.clock() - tt, verbose=verbose)

        output('assembling rhs...', verbose=verbose)
        tt = time.clock()
        prhs.assemble()
        output('...done in', time.clock() - tt, verbose=verbose)
Exemple #39
0
def setup_composite_dofs(lfds, fields, local_variables, verbose=False):
    """
    Setup composite DOFs built from field blocks described by `lfds` local
    field distributions information.

    Returns (local, total) sizes of a vector, local equation range for a
    composite matrix, and the local ordering of composite PETSc DOFs,
    corresponding to `local_variables` (must be in the order of `fields`!).
    """
    for ii, variable in enumerate(local_variables.iter_state(ordered=True)):
        output('field %s:' % fields[ii].name, verbose=verbose)
        lfd = lfds[ii]
        output('PETSc DOFs range:', lfd.petsc_dofs_range, verbose=verbose)

        n_cdof = fields[ii].n_nod * fields[ii].n_components
        lfd.sizes, lfd.drange = get_sizes(lfd.petsc_dofs_range, n_cdof, 1)
        output('sizes, drange:', lfd.sizes, lfd.drange, verbose=verbose)

        lfd.petsc_dofs = get_local_ordering(variable.field,
                                            lfd.petsc_dofs_conn,
                                            use_expand_dofs=True)
        output('petsc dofs:', lfd.petsc_dofs, verbose=verbose)

    sizes, drange = get_composite_sizes(lfds)
    output('composite sizes:', sizes, verbose=verbose)
    output('composite drange:', drange, verbose=verbose)

    pdofs = nm.concatenate([ii.petsc_dofs for ii in lfds])
    output('composite pdofs:', pdofs, verbose=verbose)

    return sizes, drange, pdofs
Exemple #40
0
def distribute_fields_dofs(fields,
                           cell_tasks,
                           is_overlap=True,
                           use_expand_dofs=False,
                           comm=None,
                           verbose=False):
    """
    Distribute the owned cells and DOFs of the given field to all tasks.

    Uses interleaved PETSc numbering in each task, i.e., the PETSc DOFs of each
    tasks are consecutive and correspond to the first field DOFs block followed
    by the second etc.

    Expand DOFs to equations if `use_expand_dofs` is True.
    """
    if comm is None:
        comm = PETSc.COMM_WORLD

    size = comm.size

    if comm.rank == 0:
        gfds = []
        inter_facets = get_inter_facets(fields[0].domain, cell_tasks)
        for field in fields:
            aux = create_task_dof_maps(field,
                                       cell_tasks,
                                       inter_facets,
                                       is_overlap=is_overlap,
                                       use_expand_dofs=use_expand_dofs)
            cell_parts = aux[2]
            n_cell_parts = [len(ii) for ii in cell_parts]
            output('numbers of cells in tasks (without overlaps):',
                   n_cell_parts,
                   verbose=verbose)
            assert_(sum(n_cell_parts) == field.domain.mesh.n_el)
            assert_(nm.all(n_cell_parts > 0))

            gfd = Struct(name='global field %s distribution' % field.name,
                         dof_maps=aux[0],
                         id_map=aux[1],
                         cell_parts=aux[2],
                         overlap_cells=aux[3],
                         coffsets=nm.empty(size, dtype=nm.int32))
            gfds.append(gfd)

        # Initialize composite offsets of DOFs.
        if len(fields) > 1:
            # Renumber id_maps for field inter-leaving.
            offset = 0
            for ir in range(size):
                for ii, gfd in enumerate(gfds):
                    dof_map = gfd.dof_maps[ir]
                    n_owned = dof_map[3]
                    off = dof_map[4]

                    iown = nm.concatenate([dof_map[0]] + dof_map[1])
                    gfd.id_map[iown] += offset - off
                    gfd.coffsets[ir] = offset

                    offset += n_owned

        else:
            gfd = gfds[0]
            gfd.coffsets[:] = [gfd.dof_maps[ir][4] for ir in range(size)]

    else:
        gfds = [None] * len(fields)

    lfds = []
    for ii, field in enumerate(fields):
        aux = distribute_field_dofs(field,
                                    gfds[ii],
                                    use_expand_dofs=use_expand_dofs,
                                    comm=comm,
                                    verbose=verbose)
        lfd = Struct(name='local field %s distribution' % field.name,
                     cells=aux[0],
                     petsc_dofs_range=aux[1],
                     petsc_dofs_conn=aux[2])
        lfds.append(lfd)

    return lfds, gfds
Exemple #41
0
def main():
    parser = ArgumentParser(description=__doc__,
                            formatter_class=RawDescriptionHelpFormatter)
    parser.add_argument('-s',
                        '--scale',
                        metavar='scale',
                        action='store',
                        dest='scale',
                        default=None,
                        help=help['scale'])
    parser.add_argument('-c',
                        '--center',
                        metavar='center',
                        action='store',
                        dest='center',
                        default=None,
                        help=help['center'])
    parser.add_argument('-r',
                        '--refine',
                        metavar='level',
                        action='store',
                        type=int,
                        dest='refine',
                        default=0,
                        help=help['refine'])
    parser.add_argument('-f',
                        '--format',
                        metavar='format',
                        action='store',
                        type=str,
                        dest='format',
                        default=None,
                        help=help['format'])
    parser.add_argument('-l',
                        '--list',
                        action='store_true',
                        dest='list',
                        help=help['list'])
    parser.add_argument('filename_in')
    parser.add_argument('filename_out')
    options = parser.parse_args()

    if options.list:
        output('Supported readable mesh formats:')
        output('--------------------------------')
        output_mesh_formats('r')
        output('')
        output('Supported writable mesh formats:')
        output('--------------------------------')
        output_mesh_formats('w')
        sys.exit(0)

    scale = _parse_val_or_vec(options.scale, 'scale', parser)
    center = _parse_val_or_vec(options.center, 'center', parser)

    filename_in = options.filename_in
    filename_out = options.filename_out

    mesh = Mesh.from_file(filename_in)

    if scale is not None:
        if len(scale) == 1:
            tr = nm.eye(mesh.dim, dtype=nm.float64) * scale
        elif len(scale) == mesh.dim:
            tr = nm.diag(scale)
        else:
            raise ValueError('bad scale! (%s)' % scale)
        mesh.transform_coors(tr)

    if center is not None:
        cc = 0.5 * mesh.get_bounding_box().sum(0)
        shift = center - cc
        tr = nm.c_[nm.eye(mesh.dim, dtype=nm.float64), shift[:, None]]
        mesh.transform_coors(tr)

    if options.refine > 0:
        domain = FEDomain(mesh.name, mesh)
        output('initial mesh: %d nodes %d elements' %
               (domain.shape.n_nod, domain.shape.n_el))

        for ii in range(options.refine):
            output('refine %d...' % ii)
            domain = domain.refine()
            output('... %d nodes %d elements' %
                   (domain.shape.n_nod, domain.shape.n_el))

        mesh = domain.mesh

    io = MeshIO.for_format(filename_out, format=options.format, writable=True)

    cell_types = ', '.join(supported_cell_types[io.format])
    output('writing [%s] %s...' % (cell_types, filename_out))
    mesh.write(filename_out, io=io)
    output('...done')
Exemple #42
0
def verify_task_dof_maps(dof_maps,
                         id_map,
                         field,
                         use_expand_dofs=False,
                         verbose=False):
    """
    Verify the counts and values of DOFs in `dof_maps` and `id_map`
    corresponding to `field`.

    Returns the vector with a task number for each DOF.
    """
    tt = time.clock()
    if verbose:
        output('verifying...')
        output('total number of DOFs:', field.n_nod)
        output('number of tasks:', len(dof_maps))

    count = count2 = 0
    dofs = []
    if use_expand_dofs:
        vec = nm.empty(field.n_nod * field.n_components, dtype=nm.float64)

    else:
        vec = nm.empty(field.n_nod, dtype=nm.float64)
    for ir, dof_map in ordered_iteritems(dof_maps):
        n_owned = dof_map[3]
        offset = dof_map[4]
        o2 = offset + n_owned

        if verbose:
            output('task %d: %d owned on offset %d' % (ir, n_owned, offset))

        if not use_expand_dofs:
            aux = dof_map[0]
            assert_(nm.all((id_map[aux] >= offset) & (id_map[aux] < o2)))

        count2 += dof_map[3]

        count += len(dof_map[0])

        dofs.append(dof_map[0])
        vec[dof_map[0]] = ir
        for aux in dof_map[1]:
            if not use_expand_dofs:
                assert_(nm.all((id_map[aux] >= offset) & (id_map[aux] < o2)))

            count += len(aux)
            dofs.append(aux)
            vec[aux] = ir

    dofs = nm.concatenate(dofs)

    n_dof = vec.shape[0]

    assert_(n_dof == len(dofs))
    if not expand_dofs:
        assert_(nm.all(nm.sort(dofs) == nm.sort(id_map)))

    dofs = nm.unique(dofs)
    assert_(n_dof == len(dofs))

    assert_(n_dof == dofs[-1] + 1)
    assert_(n_dof == count)
    assert_(n_dof == count2)
    assert_(n_dof == len(id_map))
    assert_(n_dof == len(nm.unique(id_map)))

    output('...done in', time.clock() - tt, verbose=verbose)

    return vec
    def evaluate_at(self, coors, source_vals, mode='val', strategy='general',
                    close_limit=0.1, get_cells_fun=None, cache=None,
                    ret_cells=False, ret_status=False, ret_ref_coors=False,
                    verbose=False):
        """
        Evaluate source DOF values corresponding to the field in the given
        coordinates using the field interpolation.

        Parameters
        ----------
        coors : array, shape ``(n_coor, dim)``
            The coordinates the source values should be interpolated into.
        source_vals : array, shape ``(n_nod, n_components)``
            The source DOF values corresponding to the field.
        mode : {'val', 'grad'}, optional
            The evaluation mode: the field value (default) or the field value
            gradient.
        strategy : {'general', 'convex'}, optional
            The strategy for finding the elements that contain the
            coordinates. For convex meshes, the 'convex' strategy might be
            faster than the 'general' one.
        close_limit : float, optional
            The maximum limit distance of a point from the closest
            element allowed for extrapolation.
        get_cells_fun : callable, optional
            If given, a function with signature ``get_cells_fun(coors, cmesh,
            **kwargs)`` returning cells and offsets that potentially contain
            points with the coordinates `coors`. Applicable only when
            `strategy` is 'general'. When not given,
            :func:`get_potential_cells()
            <sfepy.discrete.common.global_interp.get_potential_cells>` is used.
        cache : Struct, optional
            To speed up a sequence of evaluations, the field mesh and other
            data can be cached. Optionally, the cache can also contain the
            reference element coordinates as `cache.ref_coors`, `cache.cells`
            and `cache.status`, if the evaluation occurs in the same
            coordinates repeatedly. In that case the mesh related data are
            ignored. See :func:`Field.get_evaluate_cache()
            <sfepy.discrete.fem.fields_base.FEField.get_evaluate_cache()>`.
        ret_ref_coors : bool, optional
            If True, return also the found reference element coordinates.
        ret_status : bool, optional
            If True, return also the enclosing cell status for each point.
        ret_cells : bool, optional
            If True, return also the cell indices the coordinates are in.
        verbose : bool
            If False, reduce verbosity.

        Returns
        -------
        vals : array
            The interpolated values with shape ``(n_coor, n_components)`` or
            gradients with shape ``(n_coor, n_components, dim)`` according to
            the `mode`. If `ret_status` is False, the values where the status
            is greater than one are set to ``numpy.nan``.
        ref_coors : array
            The found reference element coordinates, if `ret_ref_coors` is True.
        cells : array
            The cell indices, if `ret_ref_coors` or `ret_cells` or `ret_status`
            are True.
        status : array
            The status, if `ret_ref_coors` or `ret_status` are True, with the
            following meaning: 0 is success, 1 is extrapolation within
            `close_limit`, 2 is extrapolation outside `close_limit`, 3 is
            failure, 4 is failure due to non-convergence of the Newton
            iteration in tensor product cells. If close_limit is 0, then for
            the 'general' strategy the status 5 indicates points outside of the
            field domain that had no potential cells.
        """
        from sfepy.discrete.common.global_interp import get_ref_coors
        from sfepy.discrete.common.extmods.crefcoors import evaluate_in_rc
        from sfepy.base.base import complex_types

        output('evaluating in %d points...' % coors.shape[0], verbose=verbose)

        ref_coors, cells, status = get_ref_coors(self, coors,
                                                 strategy=strategy,
                                                 close_limit=close_limit,
                                                 get_cells_fun=get_cells_fun,
                                                 cache=cache,
                                                 verbose=verbose)

        timer = Timer(start=True)

        # Interpolate to the reference coordinates.
        source_dtype = nm.float64 if source_vals.dtype in complex_types\
            else source_vals.dtype
        if mode == 'val':
            vals = nm.empty((coors.shape[0], source_vals.shape[1], 1),
                            dtype=source_dtype)
            cmode = 0

        elif mode == 'grad':
            vals = nm.empty((coors.shape[0], source_vals.shape[1],
                             coors.shape[1]),
                            dtype=source_dtype)
            cmode = 1

        ctx = self.create_basis_context()

        if source_vals.dtype in complex_types:
            valsi = vals.copy()
            evaluate_in_rc(vals, ref_coors, cells, status,
                           nm.ascontiguousarray(source_vals.real),
                           self.get_econn('volume', self.region), cmode, ctx)
            evaluate_in_rc(valsi, ref_coors, cells, status,
                           nm.ascontiguousarray(source_vals.imag),
                           self.get_econn('volume', self.region), cmode, ctx)
            vals = vals + valsi * 1j
        else:
            evaluate_in_rc(vals, ref_coors, cells, status, source_vals,
                           self.get_econn('volume', self.region), cmode, ctx)

        output('interpolation: %f s' % timer.stop(),verbose=verbose)

        output('...done',verbose=verbose)

        if mode == 'val':
            vals.shape = (coors.shape[0], source_vals.shape[1])

        if not ret_status:
            ii = nm.where(status > 1)[0]
            vals[ii] = nm.nan

        if ret_ref_coors:
            return vals, ref_coors, cells, status

        elif ret_status:
            return vals, cells, status

        elif ret_cells:
            return vals, cells

        else:
            return vals
Exemple #44
0
    def solve_eigen_problem(self):
        opts = self.app_options
        pb = self.problem

        pb.set_equations(pb.conf.equations)
        pb.time_update()

        output('assembling lhs...')
        timer = Timer(start=True)
        mtx_a = pb.evaluate(pb.conf.equations['lhs'], mode='weak',
                            auto_init=True, dw_mode='matrix')
        output('...done in %.2f s' % timer.stop())

        if 'rhs' in pb.conf.equations:
            output('assembling rhs...')
            timer.start()
            mtx_b = pb.evaluate(pb.conf.equations['rhs'], mode='weak',
                                dw_mode='matrix')
            output('...done in %.2f s' % timer.stop())

        else:
            mtx_b = None

        _n_eigs = get_default(opts.n_eigs, mtx_a.shape[0])

        output('solving eigenvalue problem for {} values...'.format(_n_eigs))
        eig = Solver.any_from_conf(pb.get_solver_conf(opts.evps))
        if opts.eigs_only:
            eigs = eig(mtx_a, mtx_b, opts.n_eigs, eigenvectors=False)
            svecs = None

        else:
            eigs, svecs = eig(mtx_a, mtx_b, opts.n_eigs, eigenvectors=True)

        output('...done')

        vecs = self.make_full(svecs)
        self.save_results(eigs, vecs)

        return Struct(pb=pb, eigs=eigs, vecs=vecs)
Exemple #45
0
    def make_global_operator(self, adi, new_only=False):
        """
        Assemble all LCBC operators into a single matrix.

        Parameters
        ----------
        adi : DofInfo
            The active DOF information.
        new_only : bool
            If True, the operator columns will contain only new DOFs.

        Returns
        -------
        mtx_lc : csr_matrix
            The global LCBC operator in the form of a CSR matrix.
        rhs_lc : array
            The right-hand side for non-homogeneous LCBCs.
        lcdi : DofInfo
            The global active LCBC-constrained DOF information.
        """
        self.finalize()

        if len(self) == 0: return (None,) * 3

        n_dof = self.variables.adi.ptr[-1]
        n_constrained = nm.sum([val for val in self.n_master.itervalues()])
        n_dof_free = nm.sum([val for val in self.n_free.itervalues()])
        n_dof_new = nm.sum([val for val in self.n_new.itervalues()])
        n_dof_active = nm.sum([val for val in self.n_active.itervalues()])

        output('dofs: total %d, free %d, constrained %d, new %d'\
               % (n_dof, n_dof_free, n_constrained, n_dof_new))
        output(' -> active %d' % (n_dof_active))

        adi = self.variables.adi
        lcdi, ndi, fdi = self.lcdi, self.ndi, self.fdi

        rows = []
        cols = []
        data = []

        lcbc_mask = nm.ones(n_dof, dtype=nm.bool)
        is_homogeneous = True
        for ii, op in enumerate(self):
            rvar_name = op.var_names[0]
            roff = adi.indx[rvar_name].start

            irs = roff + op.ameq
            lcbc_mask[irs] = False

            if op.get('rhs', None) is not None:
                is_homogeneous = False

        if not is_homogeneous:
            vec_lc = nm.zeros(n_dof, dtype=nm.float64)

        else:
            vec_lc = None

        for ii, op in enumerate(self):
            rvar_name = op.var_names[0]
            roff = adi.indx[rvar_name].start

            irs = roff + op.ameq

            cvar_name = op.var_names[1]
            if cvar_name is None:
                if new_only:
                    coff = ndi.indx[rvar_name].start

                else:
                    coff = lcdi.indx[rvar_name].start + fdi.n_dof[rvar_name]

                iis, icols = self.ics[rvar_name]
                ici = nm.searchsorted(iis, ii)
                ics = nm.arange(coff + icols[ici], coff + icols[ici+1])
                if isinstance(op.mtx, sp.spmatrix):
                    lr, lc, lv = sp.find(op.mtx)
                    rows.append(irs[lr])
                    cols.append(ics[lc])
                    data.append(lv)

                else:
                    _irs, _ics = nm.meshgrid(irs, ics)
                    rows.append(_irs.ravel())
                    cols.append(_ics.ravel())
                    data.append(op.mtx.T.ravel())

            else:
                coff = lcdi.indx[cvar_name].start

                lr, lc, lv = sp.find(op.mtx)
                ii1 = nm.where(lcbc_mask[adi.indx[cvar_name]])[0]
                ii2 = nm.searchsorted(ii1, lc)

                rows.append(roff + lr)
                cols.append(coff + ii2)
                data.append(lv)

            if vec_lc is not None:
                vec_lc[irs] += op.get('rhs', 0)

        rows = nm.concatenate(rows)
        cols = nm.concatenate(cols)
        data = nm.concatenate(data)

        if new_only:
            mtx_lc = sp.coo_matrix((data, (rows, cols)),
                                   shape=(n_dof, n_dof_new))

        else:
            mtx_lc = sp.coo_matrix((data, (rows, cols)),
                                   shape=(n_dof, n_dof_active))

            ir = nm.where(lcbc_mask)[0]
            ic = nm.empty((n_dof_free,), dtype=nm.int32)
            for var_name in adi.var_names:
                ii = nm.arange(fdi.n_dof[var_name], dtype=nm.int32)
                ic[fdi.indx[var_name]] = lcdi.indx[var_name].start + ii

            mtx_lc2 = sp.coo_matrix((nm.ones((ir.shape[0],)), (ir, ic)),
                                    shape=(n_dof, n_dof_active),
                                    dtype=nm.float64)

            mtx_lc = mtx_lc + mtx_lc2

        mtx_lc = mtx_lc.tocsr()

        return mtx_lc, vec_lc, lcdi
def main():
    parser = ArgumentParser(description=__doc__.rstrip(),
                            formatter_class=RawDescriptionHelpFormatter)
    parser.add_argument('output_dir', help=helps['output_dir'])
    parser.add_argument('-d', '--dims', metavar='l,w,t',
                        action='store', dest='dims',
                        default='0.2,0.01,0.001', help=helps['dims'])
    parser.add_argument('-n', '--nx', metavar='start,stop,step',
                        action='store', dest='nx',
                        default='2,103,10', help=helps['nx'])
    parser.add_argument('-t', '--transform', choices=['none', 'bend', 'twist'],
                        action='store', dest='transform',
                        default='none', help=helps['transform'])
    parser.add_argument('--young', metavar='float', type=float,
                        action='store', dest='young',
                        default=210e9, help=helps['young'])
    parser.add_argument('--poisson', metavar='float', type=float,
                        action='store', dest='poisson',
                        default=0.3, help=helps['poisson'])
    parser.add_argument('--force', metavar='float', type=float,
                        action='store', dest='force',
                        default=-1.0, help=helps['force'])
    parser.add_argument('-p', '--plot',
                        action="store_true", dest='plot',
                        default=False, help=helps['plot'])
    parser.add_argument('--u-scaling', metavar='float', type=float,
                        action='store', dest='scaling',
                        default=1.0, help=helps['scaling'])
    parser.add_argument('-s', '--show',
                        action="store_true", dest='show',
                        default=False, help=helps['show'])
    parser.add_argument('--silent',
                        action='store_true', dest='silent',
                        default=False, help=helps['silent'])
    options = parser.parse_args()

    dims = nm.array([float(ii) for ii in options.dims.split(',')],
                    dtype=nm.float64)
    nxs = tuple([int(ii) for ii in options.nx.split(',')])
    young = options.young
    poisson = options.poisson
    force = options.force

    output_dir = options.output_dir

    odir = lambda filename: os.path.join(output_dir, filename)

    filename = odir('output_log.txt')
    ensure_path(filename)
    output.set_output(filename=filename, combined=options.silent == False)

    output('output directory:', output_dir)
    output('using values:')
    output("  dimensions:", dims)
    output("  nx range:", nxs)
    output("  Young's modulus:", options.young)
    output("  Poisson's ratio:", options.poisson)
    output('  force:', options.force)
    output('  transform:', options.transform)

    if options.transform == 'none':
        options.transform = None

    u_exact = get_analytical_displacement(dims, young, force,
                                          transform=options.transform)

    if options.transform is None:
        ilog = 2
        labels = ['u_3']

    elif options.transform == 'bend':
        ilog = 0
        labels = ['u_1']

    elif options.transform == 'twist':
        ilog = [0, 1, 2]
        labels = ['u_1', 'u_2', 'u_3']

    label = ', '.join(labels)

    log = []
    for nx in xrange(*nxs):
        shape = (nx, 2)

        pb, state, u, gamma2 = solve_problem(shape, dims, young, poisson, force,
                                             transform=options.transform)

        dofs = u.get_state_in_region(gamma2)
        output('DOFs along the loaded edge:')
        output('\n%s' % dofs)

        log.append([nx - 1] + nm.array(dofs[0, ilog], ndmin=1).tolist())

    pb.save_state(odir('shell10x_cantilever.vtk'), state)

    log = nm.array(log)

    output('max. %s displacement w.r.t. number of cells:' % label)
    output('\n%s' % log)
    output('analytical value:', u_exact)

    if options.plot:
        import matplotlib.pyplot as plt

        plt.rcParams.update({
            'lines.linewidth' : 3,
            'font.size' : 16,
        })

        fig, ax1 = plt.subplots()
        fig.suptitle('max. $%s$ displacement' % label)

        for ic in range(log.shape[1] - 1):
            ax1.plot(log[:, 0], log[:, ic + 1], label=r'$%s$' % labels[ic])
        ax1.set_xlabel('# of cells')
        ax1.set_ylabel(r'$%s$' % label)
        ax1.grid(which='both')

        lines1, labels1 = ax1.get_legend_handles_labels()

        if u_exact is not None:
            ax1.hlines(u_exact, log[0, 0], log[-1, 0],
                       'r', 'dotted', label=r'$%s^{analytical}$' % label)

            ax2 = ax1.twinx()
            # Assume single log column.
            ax2.semilogy(log[:, 0], nm.abs(log[:, 1] - u_exact), 'g',
                         label=r'$|%s - %s^{analytical}|$' % (label, label))
            ax2.set_ylabel(r'$|%s - %s^{analytical}|$' % (label, label))

            lines2, labels2 = ax2.get_legend_handles_labels()

        else:
            lines2, labels2 = [], []

        ax1.legend(lines1 + lines2, labels1 + labels2, loc='best')

        plt.tight_layout()
        ax1.set_xlim([log[0, 0] - 2, log[-1, 0] + 2])

        suffix = {None: 'straight',
                  'bend' : 'bent', 'twist' : 'twisted'}[options.transform]
        fig.savefig(odir('shell10x_cantilever_convergence_%s.png' % suffix))

        plt.show()

    if options.show:
        from sfepy.postprocess.viewer import Viewer
        from sfepy.postprocess.domain_specific import DomainSpecificPlot

        ds = {'u_disp' :
                  DomainSpecificPlot('plot_displacements',
                                     ['rel_scaling=%f' % options.scaling])}
        view = Viewer(odir('shell10x_cantilever.vtk'))
        view(domain_specific=ds, is_scalar_bar=True, is_wireframe=True,
             opacity={'wireframe' : 0.5})
Exemple #47
0
def main():
    parser = ArgumentParser(description=__doc__,
                            formatter_class=RawDescriptionHelpFormatter)
    parser.add_argument('--version', action='version', version='%(prog)s')
    parser.add_argument('-n',
                        '--dry-run',
                        action='store_true',
                        dest='dry_run',
                        default=False,
                        help=helps['dry_run'])
    parser.add_argument('doc_dir')
    parser.add_argument('top_dir')
    options = parser.parse_args()

    doc_dir, top_dir = [
        os.path.realpath(ii) for ii in [options.doc_dir, options.top_dir]
    ]

    docs = set(ii for ii in locate_files('*.rst', root_dir=doc_dir))

    sources = set(
        ii
        for ii in locate_files('*.py', root_dir=os.path.join(top_dir, 'sfepy'))
        if os.path.basename(ii) not in omits)
    sources.update(ii for ii in locate_files(
        '*.pyx', root_dir=os.path.join(top_dir, 'sfepy'))
                   if os.path.basename(ii) not in omits_pyx)
    scripts = set(ii for ii in locate_files(
        '*.py', root_dir=os.path.join(top_dir, 'script'))
                  if os.path.basename(ii) not in omits)
    top_scripts = set(
        os.path.realpath(ii)
        for ii in fnmatch.filter(os.listdir(top_dir), '*.py')
        if os.path.basename(ii) not in omits)

    all_sources = set()
    all_sources.update(sources, scripts, top_scripts)

    cwd = os.path.realpath(os.path.curdir) + os.path.sep

    output.prefix = 'smd:'
    output('removing unneeded rst files in "%s"...' % doc_dir)
    for doc in sorted(docs):
        aux = edit_filename(doc, new_ext='.py')
        src1 = os.path.normpath(aux.replace(doc_dir, top_dir))

        aux = edit_filename(doc, new_ext='.pyx')
        src2 = os.path.normpath(aux.replace(doc_dir, top_dir))

        if (src1 not in all_sources) and (src2 not in all_sources):
            output('remove: %s' % doc.replace(cwd, ''))
            if not options.dry_run:
                os.remove(doc)
    output('...done')

    output('creating missing rst files in "%s"...' % doc_dir)
    for src in sorted(all_sources):
        aux = edit_filename(src, new_ext='.rst')
        doc = os.path.normpath(aux.replace(top_dir, doc_dir))

        if doc not in docs:
            output('create: %s' % doc.replace(cwd, ''))
            if not options.dry_run:
                mod_filename = src.replace(top_dir + os.path.sep, '')
                mod_name = mod_filename.replace(os.path.sep, '.')
                mod_name = edit_filename(mod_name, new_ext='')
                if mod_name.startswith('sfepy'):  # Module.
                    title = mod_name + ' module'

                else:  # Script.
                    title = mod_filename + ' script'
                    mod_name = mod_name.split('.')[-1]

                underlines = '=' * len(title)

                contents = doc_template % (title, underlines, mod_name)

                ensure_path(doc)
                fd = open(doc, 'w')
                fd.write(contents)
                fd.close()

    output('...done')
Exemple #48
0
    def _region_leaf(level, op):

        token, details = op['token'], op['orig']

        if token != 'KW_Region':
            parse_def = token + '<' + ' '.join(details) + '>'
            region = Region('leaf', rdef, domain, parse_def=parse_def)

        if token == 'KW_Region':
            details = details[1][2:]
            aux = regions.find(details)
            if not aux:
                raise ValueError, 'region %s does not exist' % details
            else:
                if rdef[:4] == 'copy':
                    region = aux.copy()
                else:
                    region = aux

        elif token == 'KW_All':
            region.set_vertices(nm.arange(domain.mesh.n_nod, dtype=nm.int32))
        elif token == 'E_NIR':
            where = details[2]

            if where[0] == '[':
                out = nm.array(eval(where), dtype=nm.int32)
                assert_(nm.amin(out) >= 0)
                assert_(nm.amax(out) < domain.mesh.n_nod)
            else:
                coors = domain.get_mesh_coors()
                x = coors[:, 0]
                y = coors[:, 1]
                if domain.mesh.dim == 3:
                    z = coors[:, 2]
                else:
                    z = None
                coor_dict = {'x': x, 'y': y, 'z': z}

                out = nm.where(eval(where, {}, coor_dict))[0]
            region.set_vertices(out)

        elif token == 'E_NOS':

            if domain.fa:  # 3D.
                fa = domain.fa
            else:
                fa = domain.ed

            flag = fa.mark_surface_facets()
            ii = nm.where(flag > 0)[0]
            aux = nm.unique(fa.facets[ii])
            if aux[0] == -1:  # Triangular faces have -1 as 4. point.
                aux = aux[1:]

            region.can_cells = False
            region.set_vertices(aux)

        elif token == 'E_NBF':
            where = details[2]

            coors = domain.get_mesh_coors()

            fun = functions[where]
            out = fun(coors, domain=domain)

            region.set_vertices(out)

        elif token == 'E_EBF':
            where = details[2]

            coors = domain.get_mesh_coors()

            fun = functions[where]
            out = fun(coors, domain=domain)

            region.set_cells(out)

        elif token == 'E_EOG':

            group = int(details[3])

            ig = domain.mat_ids_to_i_gs[group]
            group = domain.groups[ig]
            region.set_from_group(ig, group.vertices, group.shape.n_el)

        elif token == 'E_NOG':

            try:
                group = int(details[3])
                group_nodes = nm.where(domain.mesh.ngroups == group)[0]

            except ValueError:
                try:
                    group_nodes = domain.mesh.nodal_bcs[details[3]]

                except KeyError:
                    msg = 'undefined nodal group! (%s)' % details[3]
                    raise ValueError(msg)

            region.set_vertices(group_nodes)

        elif token == 'E_ONIR':
            aux = regions[details[3][2:]]
            region.set_vertices(aux.all_vertices[0:1])

        elif token == 'E_NI':
            region.set_vertices(
                nm.array([int(ii) for ii in details[1:]], dtype=nm.int32))

        elif token == 'E_EI1':
            region.set_cells(
                {0: nm.array([int(ii) for ii in details[1:]], dtype=nm.int32)})

        elif token == 'E_EI2':
            num = len(details[1:]) / 2

            cells = {}
            for ii in range(num):
                ig, iel = int(details[1 + 2 * ii]), int(details[2 + 2 * ii])
                cells.setdefault(ig, []).append(iel)

            region.set_cells(cells)

        else:
            output('token "%s" unkown - check regions!' % token)
            raise NotImplementedError
        return region
Exemple #49
0
def main():
    # Aluminium and epoxy.
    default_pars = '70e9,0.35,2.799e3, 3.8e9,0.27,1.142e3'
    default_solver_conf = ("kind='eig.scipy',method='eigsh',tol=1.0e-5,"
                           "maxiter=1000,which='LM',sigma=0.0")

    parser = ArgumentParser(description=__doc__,
                            formatter_class=RawDescriptionHelpFormatter)
    parser.add_argument('--pars',
                        metavar='young1,poisson1,density1'
                        ',young2,poisson2,density2',
                        action='store',
                        dest='pars',
                        default=default_pars,
                        help=helps['pars'])
    parser.add_argument('--conf',
                        metavar='filename',
                        action='store',
                        dest='conf',
                        default=None,
                        help=helps['conf'])
    parser.add_argument('--mesh-size',
                        type=float,
                        metavar='float',
                        action='store',
                        dest='mesh_size',
                        default=None,
                        help=helps['mesh_size'])
    parser.add_argument('--unit-multipliers',
                        metavar='c_time,c_length,c_mass',
                        action='store',
                        dest='unit_multipliers',
                        default='1.0,1.0,1.0',
                        help=helps['unit_multipliers'])
    parser.add_argument('--plane',
                        action='store',
                        dest='plane',
                        choices=['strain', 'stress'],
                        default='strain',
                        help=helps['plane'])
    parser.add_argument('--wave-dir',
                        metavar='float,float[,float]',
                        action='store',
                        dest='wave_dir',
                        default='1.0,0.0,0.0',
                        help=helps['wave_dir'])
    parser.add_argument('--mode',
                        action='store',
                        dest='mode',
                        choices=['omega', 'kappa'],
                        default='omega',
                        help=helps['mode'])
    parser.add_argument('--range',
                        metavar='start,stop,count',
                        action='store',
                        dest='range',
                        default='0,6.4,33',
                        help=helps['range'])
    parser.add_argument('--order',
                        metavar='int',
                        type=int,
                        action='store',
                        dest='order',
                        default=1,
                        help=helps['order'])
    parser.add_argument('--refine',
                        metavar='int',
                        type=int,
                        action='store',
                        dest='refine',
                        default=0,
                        help=helps['refine'])
    parser.add_argument('-n',
                        '--n-eigs',
                        metavar='int',
                        type=int,
                        action='store',
                        dest='n_eigs',
                        default=6,
                        help=helps['n_eigs'])
    group = parser.add_mutually_exclusive_group()
    group.add_argument('--eigs-only',
                       action='store_true',
                       dest='eigs_only',
                       default=False,
                       help=helps['eigs_only'])
    group.add_argument('--post-process',
                       action='store_true',
                       dest='post_process',
                       default=False,
                       help=helps['post_process'])
    parser.add_argument('--solver-conf',
                        metavar='dict-like',
                        action='store',
                        dest='solver_conf',
                        default=default_solver_conf,
                        help=helps['solver_conf'])
    parser.add_argument('--save-regions',
                        action='store_true',
                        dest='save_regions',
                        default=False,
                        help=helps['save_regions'])
    parser.add_argument('--save-materials',
                        action='store_true',
                        dest='save_materials',
                        default=False,
                        help=helps['save_materials'])
    parser.add_argument('--log-std-waves',
                        action='store_true',
                        dest='log_std_waves',
                        default=False,
                        help=helps['log_std_waves'])
    parser.add_argument('--no-legends',
                        action='store_false',
                        dest='show_legends',
                        default=True,
                        help=helps['no_legends'])
    parser.add_argument('--no-show',
                        action='store_false',
                        dest='show',
                        default=True,
                        help=helps['no_show'])
    parser.add_argument('--silent',
                        action='store_true',
                        dest='silent',
                        default=False,
                        help=helps['silent'])
    parser.add_argument('-c',
                        '--clear',
                        action='store_true',
                        dest='clear',
                        default=False,
                        help=helps['clear'])
    parser.add_argument('-o',
                        '--output-dir',
                        metavar='path',
                        action='store',
                        dest='output_dir',
                        default='output',
                        help=helps['output_dir'])
    parser.add_argument('mesh_filename',
                        default='',
                        help=helps['mesh_filename'])
    options = parser.parse_args()

    output_dir = options.output_dir

    output.set_output(filename=os.path.join(output_dir, 'output_log.txt'),
                      combined=options.silent == False)

    if options.conf is not None:
        mod = import_file(options.conf)

    else:
        mod = sys.modules[__name__]

    apply_units = mod.apply_units
    define = mod.define
    set_wave_dir = mod.set_wave_dir
    setup_n_eigs = mod.setup_n_eigs
    build_evp_matrices = mod.build_evp_matrices
    save_materials = mod.save_materials
    get_std_wave_fun = mod.get_std_wave_fun
    get_stepper = mod.get_stepper
    process_evp_results = mod.process_evp_results

    options.pars = [float(ii) for ii in options.pars.split(',')]
    options.unit_multipliers = [
        float(ii) for ii in options.unit_multipliers.split(',')
    ]
    options.wave_dir = [float(ii) for ii in options.wave_dir.split(',')]
    aux = options.range.split(',')
    options.range = [float(aux[0]), float(aux[1]), int(aux[2])]
    options.solver_conf = dict_from_string(options.solver_conf)

    if options.clear:
        remove_files_patterns(output_dir, ['*.h5', '*.vtk', '*.txt'],
                              ignores=['output_log.txt'],
                              verbose=True)

    filename = os.path.join(output_dir, 'options.txt')
    ensure_path(filename)
    save_options(filename, [('options', vars(options))],
                 quote_command_line=True)

    pars = apply_units(options.pars, options.unit_multipliers)
    output('material parameters with applied unit multipliers:')
    output(pars)

    if options.mode == 'omega':
        rng = copy(options.range)
        rng[:2] = apply_unit_multipliers(options.range[:2],
                                         ['wave_number', 'wave_number'],
                                         options.unit_multipliers)
        output('wave number range with applied unit multipliers:', rng)

    else:
        rng = copy(options.range)
        rng[:2] = apply_unit_multipliers(options.range[:2],
                                         ['frequency', 'frequency'],
                                         options.unit_multipliers)
        output('frequency range with applied unit multipliers:', rng)

    pb, wdir, bzone, mtxs = assemble_matrices(define, mod, pars, set_wave_dir,
                                              options)
    dim = pb.domain.shape.dim

    if dim != 2:
        options.plane = 'strain'

    if options.save_regions:
        pb.save_regions_as_groups(os.path.join(output_dir, 'regions'))

    if options.save_materials:
        save_materials(output_dir, pb, options)

    conf = pb.solver_confs['eig']
    eig_solver = Solver.any_from_conf(conf)

    n_eigs, options.n_eigs = setup_n_eigs(options, pb, mtxs)

    get_color = lambda ii: plt.cm.viridis((float(ii) / (options.n_eigs - 1)))
    plot_kwargs = [{
        'color': get_color(ii),
        'ls': '',
        'marker': 'o'
    } for ii in range(options.n_eigs)]

    log_names = []
    log_plot_kwargs = []
    if options.log_std_waves:
        std_wave_fun, log_names, log_plot_kwargs = get_std_wave_fun(
            pb, options)

    else:
        std_wave_fun = None

    stepper = get_stepper(rng, pb, options)

    if options.mode == 'omega':
        eigenshapes_filename = os.path.join(
            output_dir, 'frequency-eigenshapes-%s.vtk' % stepper.suffix)

        log = Log(
            [[r'$\lambda_{%d}$' % ii for ii in range(options.n_eigs)],
             [r'$\omega_{%d}$' % ii
              for ii in range(options.n_eigs)] + log_names],
            plot_kwargs=[plot_kwargs, plot_kwargs + log_plot_kwargs],
            formats=[['{:.5e}'] * options.n_eigs,
                     ['{:.5e}'] * (options.n_eigs + len(log_names))],
            yscales=['linear', 'linear'],
            xlabels=[r'$\kappa$', r'$\kappa$'],
            ylabels=[r'eigenvalues $\lambda_i$', r'frequencies $\omega_i$'],
            show_legends=options.show_legends,
            is_plot=options.show,
            log_filename=os.path.join(output_dir, 'frequencies.txt'),
            aggregate=1000,
            sleep=0.1)

        for iv, wmag in stepper:
            output('step %d: wave vector %s' % (iv, wmag * wdir))

            evp_mtxs = build_evp_matrices(mtxs, wmag, options.mode, pb)

            if options.eigs_only:
                eigs = eig_solver(*evp_mtxs, n_eigs=n_eigs, eigenvectors=False)
                svecs = None

            else:
                eigs, svecs = eig_solver(*evp_mtxs,
                                         n_eigs=n_eigs,
                                         eigenvectors=True)

            omegas, svecs, out = process_evp_results(eigs,
                                                     svecs,
                                                     wmag,
                                                     options.mode,
                                                     wdir,
                                                     bzone,
                                                     pb,
                                                     mtxs,
                                                     std_wave_fun=std_wave_fun)
            log(*out, x=[wmag, wmag])

            save_eigenvectors(eigenshapes_filename % iv, svecs, wmag, wdir, pb)

            gc.collect()

        log(save_figure=os.path.join(output_dir, 'frequencies.png'))
        log(finished=True)

    else:
        eigenshapes_filename = os.path.join(
            output_dir, 'wave-number-eigenshapes-%s.vtk' % stepper.suffix)

        log = Log([[r'$\kappa_{%d}$' % ii
                    for ii in range(options.n_eigs)] + log_names],
                  plot_kwargs=[plot_kwargs + log_plot_kwargs],
                  formats=[['{:.5e}'] * (options.n_eigs + len(log_names))],
                  yscales=['linear'],
                  xlabels=[r'$\omega$'],
                  ylabels=[r'wave numbers $\kappa_i$'],
                  show_legends=options.show_legends,
                  is_plot=options.show,
                  log_filename=os.path.join(output_dir, 'wave-numbers.txt'),
                  aggregate=1000,
                  sleep=0.1)
        for io, omega in stepper:
            output('step %d: frequency %s' % (io, omega))

            evp_mtxs = build_evp_matrices(mtxs, omega, options.mode, pb)

            if options.eigs_only:
                eigs = eig_solver(*evp_mtxs, n_eigs=n_eigs, eigenvectors=False)
                svecs = None

            else:
                eigs, svecs = eig_solver(*evp_mtxs,
                                         n_eigs=n_eigs,
                                         eigenvectors=True)

            kappas, svecs, out = process_evp_results(eigs,
                                                     svecs,
                                                     omega,
                                                     options.mode,
                                                     wdir,
                                                     bzone,
                                                     pb,
                                                     mtxs,
                                                     std_wave_fun=std_wave_fun)
            log(*out, x=[omega])

            save_eigenvectors(eigenshapes_filename % io, svecs, kappas, wdir,
                              pb)

            gc.collect()

        log(save_figure=os.path.join(output_dir, 'wave-numbers.png'))
        log(finished=True)
def main():
    from sfepy import data_dir

    parser = OptionParser(usage=usage, version='%prog')
    parser.add_option('--young',
                      metavar='float',
                      type=float,
                      action='store',
                      dest='young',
                      default=2000.0,
                      help=helps['young'])
    parser.add_option('--poisson',
                      metavar='float',
                      type=float,
                      action='store',
                      dest='poisson',
                      default=0.4,
                      help=helps['poisson'])
    parser.add_option('--load',
                      metavar='float',
                      type=float,
                      action='store',
                      dest='load',
                      default=-1000.0,
                      help=helps['load'])
    parser.add_option('--order',
                      metavar='int',
                      type=int,
                      action='store',
                      dest='order',
                      default=1,
                      help=helps['order'])
    parser.add_option('-r',
                      '--refine',
                      metavar='int',
                      type=int,
                      action='store',
                      dest='refine',
                      default=0,
                      help=helps['refine'])
    parser.add_option('-s',
                      '--show',
                      action="store_true",
                      dest='show',
                      default=False,
                      help=helps['show'])
    parser.add_option('-p',
                      '--probe',
                      action="store_true",
                      dest='probe',
                      default=False,
                      help=helps['probe'])
    options, args = parser.parse_args()

    assert_((0.0 < options.poisson < 0.5),
            "Poisson's ratio must be in ]0, 0.5[!")
    assert_((0 < options.order),
            'displacement approximation order must be at least 1!')

    output('using values:')
    output("  Young's modulus:", options.young)
    output("  Poisson's ratio:", options.poisson)
    output('  vertical load:', options.load)
    output('uniform mesh refinement level:', options.refine)

    # Build the problem definition.
    mesh = Mesh.from_file(data_dir + '/meshes/2d/its2D.mesh')
    domain = FEDomain('domain', mesh)

    if options.refine > 0:
        for ii in range(options.refine):
            output('refine %d...' % ii)
            domain = domain.refine()
            output('... %d nodes %d elements' %
                   (domain.shape.n_nod, domain.shape.n_el))

    omega = domain.create_region('Omega', 'all')
    left = domain.create_region('Left', 'vertices in x < 0.001', 'facet')
    bottom = domain.create_region('Bottom', 'vertices in y < 0.001', 'facet')
    top = domain.create_region('Top', 'vertex 2', 'vertex')

    field = Field.from_args('fu',
                            nm.float64,
                            'vector',
                            omega,
                            approx_order=options.order)

    u = FieldVariable('u', 'unknown', field)
    v = FieldVariable('v', 'test', field, primary_var_name='u')

    D = stiffness_from_youngpoisson(2, options.young, options.poisson)

    asphalt = Material('Asphalt', D=D)
    load = Material('Load', values={'.val': [0.0, options.load]})

    integral = Integral('i', order=2 * options.order)
    integral0 = Integral('i', order=0)

    t1 = Term.new('dw_lin_elastic(Asphalt.D, v, u)',
                  integral,
                  omega,
                  Asphalt=asphalt,
                  v=v,
                  u=u)
    t2 = Term.new('dw_point_load(Load.val, v)', integral0, top, Load=load, v=v)
    eq = Equation('balance', t1 - t2)
    eqs = Equations([eq])

    xsym = EssentialBC('XSym', bottom, {'u.1': 0.0})
    ysym = EssentialBC('YSym', left, {'u.0': 0.0})

    ls = ScipyDirect({})

    nls_status = IndexedStruct()
    nls = Newton({}, lin_solver=ls, status=nls_status)

    pb = Problem('elasticity', equations=eqs, nls=nls, ls=ls)

    pb.time_update(ebcs=Conditions([xsym, ysym]))

    # Solve the problem.
    state = pb.solve()
    output(nls_status)

    # Postprocess the solution.
    out = state.create_output_dict()
    out = stress_strain(out, pb, state, extend=True)
    pb.save_state('its2D_interactive.vtk', out=out)

    gdata = geometry_data['2_3']
    nc = len(gdata.coors)

    integral_vn = Integral('ivn',
                           coors=gdata.coors,
                           weights=[gdata.volume / nc] * nc)

    nodal_stress(out, pb, state, integrals=Integrals([integral_vn]))

    if options.probe:
        # Probe the solution.
        probes, labels = gen_lines(pb)

        sfield = Field.from_args('sym_tensor',
                                 nm.float64,
                                 3,
                                 omega,
                                 approx_order=options.order - 1)
        stress = FieldVariable('stress',
                               'parameter',
                               sfield,
                               primary_var_name='(set-to-None)')
        strain = FieldVariable('strain',
                               'parameter',
                               sfield,
                               primary_var_name='(set-to-None)')

        cfield = Field.from_args('component',
                                 nm.float64,
                                 1,
                                 omega,
                                 approx_order=options.order - 1)
        component = FieldVariable('component',
                                  'parameter',
                                  cfield,
                                  primary_var_name='(set-to-None)')

        ev = pb.evaluate
        order = 2 * (options.order - 1)
        strain_qp = ev('ev_cauchy_strain.%d.Omega(u)' % order, mode='qp')
        stress_qp = ev('ev_cauchy_stress.%d.Omega(Asphalt.D, u)' % order,
                       mode='qp',
                       copy_materials=False)

        project_by_component(strain, strain_qp, component, order)
        project_by_component(stress, stress_qp, component, order)

        all_results = []
        for ii, probe in enumerate(probes):
            fig, results = probe_results(u, strain, stress, probe, labels[ii])

            fig.savefig('its2D_interactive_probe_%d.png' % ii)
            all_results.append(results)

        for ii, results in enumerate(all_results):
            output('probe %d:' % ii)
            output.level += 2
            for key, res in ordered_iteritems(results):
                output(key + ':')
                val = res[1]
                output('  min: %+.2e, mean: %+.2e, max: %+.2e' %
                       (val.min(), val.mean(), val.max()))
            output.level -= 2

    if options.show:
        # Show the solution. If the approximation order is greater than 1, the
        # extra DOFs are simply thrown away.
        from sfepy.postprocess.viewer import Viewer

        view = Viewer('its2D_interactive.vtk')
        view(vector_mode='warp_norm',
             rel_scaling=1,
             is_scalar_bar=True,
             is_wireframe=True)
Exemple #51
0
    def __call__(self,
                 mtx_m,
                 mtx_d,
                 mtx_k,
                 n_eigs=None,
                 eigenvectors=None,
                 status=None,
                 conf=None):
        if conf.debug:
            ssym = status['matrix_info'] = {}
            ssym['|M - M^T|'] = max_diff_csr(mtx_m, mtx_m.T)
            ssym['|D - D^T|'] = max_diff_csr(mtx_d, mtx_d.T)
            ssym['|K - K^T|'] = max_diff_csr(mtx_k, mtx_k.T)
            ssym['|M - M^H|'] = max_diff_csr(mtx_m, mtx_m.H)
            ssym['|D - D^H|'] = max_diff_csr(mtx_d, mtx_d.H)
            ssym['|K - K^H|'] = max_diff_csr(mtx_k, mtx_k.H)

        if conf.method == 'companion':
            mtx_eye = -sps.eye(mtx_m.shape[0], dtype=mtx_m.dtype)

            mtx_a = sps.bmat([[mtx_d, mtx_k], [mtx_eye, None]])
            mtx_b = sps.bmat([[-mtx_m, None], [None, mtx_eye]])

        elif conf.method == 'cholesky':
            from sksparse.cholmod import cholesky

            factor = cholesky(mtx_m)
            perm = factor.P()
            ir = nm.arange(len(perm))
            mtx_p = sps.coo_matrix((nm.ones_like(perm), (ir, perm)))
            mtx_l = mtx_p.T * factor.L()

            if conf.debug:
                ssym['|S - LL^T|'] = max_diff_csr(mtx_m, mtx_l * mtx_l.T)

            mtx_eye = sps.eye(mtx_l.shape[0], dtype=nm.float64)

            mtx_a = sps.bmat([[-mtx_k, None], [None, mtx_eye]])
            mtx_b = sps.bmat([[mtx_d, mtx_l], [mtx_l.T, None]])

        else:
            raise ValueError('unknown method! (%s)' % conf.method)

        if conf.debug:
            ssym['|A - A^T|'] = max_diff_csr(mtx_a, mtx_a.T)
            ssym['|A - A^H|'] = max_diff_csr(mtx_a, mtx_a.H)
            ssym['|B - B^T|'] = max_diff_csr(mtx_b, mtx_b.T)
            ssym['|B - B^H|'] = max_diff_csr(mtx_b, mtx_b.H)

            for key, val in sorted(ssym.items()):
                output('{}: {}'.format(key, val))

        if conf.mode == 'normal':
            out = self.solver(mtx_a,
                              mtx_b,
                              n_eigs=n_eigs,
                              eigenvectors=eigenvectors,
                              status=status)

            if eigenvectors:
                eigs, vecs = out
                out = (eigs, vecs[:mtx_m.shape[0], :])

                if conf.debug:
                    res = mtx_a.dot(vecs) - eigs * mtx_b.dot(vecs)
                    status['lin. error'] = nm.linalg.norm(res, nm.inf)

        else:
            out = self.solver(mtx_b,
                              mtx_a,
                              n_eigs=n_eigs,
                              eigenvectors=eigenvectors,
                              status=status)

            if eigenvectors:
                eigs, vecs = out
                out = (1.0 / eigs, vecs[:mtx_m.shape[0], :])

                if conf.debug:
                    res = (1.0 / eigs) * mtx_b.dot(vecs) - mtx_a.dot(vecs)
                    status['lin. error'] = nm.linalg.norm(res, nm.inf)

            else:
                out = 1.0 / out

        if conf.debug and eigenvectors:
            eigs, vecs = out
            res = ((eigs**2 * (mtx_m.dot(vecs))) + (eigs * (mtx_d.dot(vecs))) +
                   (mtx_k.dot(vecs)))
            status['error'] = nm.linalg.norm(res, nm.inf)

        return out
def main():
    parser = ArgumentParser(description=__doc__)
    parser.add_argument('--version', action='version', version='%(prog)s')
    parser.add_argument('-b',
                        '--basis',
                        metavar='name',
                        action='store',
                        dest='basis',
                        default='lagrange',
                        help=helps['basis'])
    parser.add_argument('-n',
                        '--max-order',
                        metavar='order',
                        type=int,
                        action='store',
                        dest='max_order',
                        default=10,
                        help=helps['max_order'])
    parser.add_argument('-m',
                        '--matrix',
                        action='store',
                        dest='matrix_type',
                        choices=['laplace', 'elasticity', 'smass', 'vmass'],
                        default='laplace',
                        help=helps['matrix_type'])
    parser.add_argument('-g',
                        '--geometry',
                        metavar='name',
                        action='store',
                        dest='geometry',
                        default='2_4',
                        help=helps['geometry'])
    parser.add_argument('-o',
                        '--output-dir',
                        metavar='path',
                        action='store',
                        dest='output_dir',
                        default=None,
                        help=helps['output_dir'])
    parser.add_argument('--no-show',
                        action='store_false',
                        dest='show',
                        default=True,
                        help=helps['no_show'])
    options = parser.parse_args()

    dim, n_ep = int(options.geometry[0]), int(options.geometry[2])
    output('reference element geometry:')
    output('  dimension: %d, vertices: %d' % (dim, n_ep))

    n_c = {
        'laplace': 1,
        'elasticity': dim,
        'smass': 1,
        'vmass': dim
    }[options.matrix_type]

    output('matrix type:', options.matrix_type)
    output('number of variable components:', n_c)

    output('polynomial space:', options.basis)

    output('max. order:', options.max_order)

    mesh = Mesh.from_file(data_dir +
                          '/meshes/elements/%s_1.mesh' % options.geometry)
    domain = FEDomain('domain', mesh)
    omega = domain.create_region('Omega', 'all')

    orders = nm.arange(1, options.max_order + 1, dtype=nm.int32)
    conds = []

    for order in orders:
        output('order:', order, '...')

        field = Field.from_args('fu',
                                nm.float64,
                                n_c,
                                omega,
                                approx_order=order,
                                space='H1',
                                poly_space_base=options.basis)

        quad_order = 2 * field.approx_order
        output('quadrature order:', quad_order)

        integral = Integral('i', order=quad_order)
        qp, _ = integral.get_qp(options.geometry)
        output('number of quadrature points:', qp.shape[0])

        u = FieldVariable('u', 'unknown', field)
        v = FieldVariable('v', 'test', field, primary_var_name='u')

        m = Material('m', D=stiffness_from_lame(dim, 1.0, 1.0))

        if options.matrix_type == 'laplace':
            term = Term.new('dw_laplace(v, u)', integral, omega, v=v, u=u)
            n_zero = 1

        elif options.matrix_type == 'elasticity':
            term = Term.new('dw_lin_elastic(m.D, v, u)',
                            integral,
                            omega,
                            m=m,
                            v=v,
                            u=u)
            n_zero = (dim + 1) * dim // 2

        elif options.matrix_type in ('smass', 'vmass'):
            term = Term.new('dw_dot(v, u)', integral, omega, v=v, u=u)
            n_zero = 0

        term.setup()

        output('assembling...')
        timer = Timer(start=True)
        mtx, iels = term.evaluate(mode='weak', diff_var='u')
        output('...done in %.2f s' % timer.stop())
        mtx = mtx[0, 0]

        try:
            assert_(nm.max(nm.abs(mtx - mtx.T)) < 1e-10)

        except:
            from sfepy.base.base import debug
            debug()

        output('matrix shape:', mtx.shape)

        eigs = eig(mtx, method='eig.sgscipy', eigenvectors=False)
        eigs.sort()

        # Zero 'true' zeros.
        eigs[:n_zero] = 0.0

        ii = nm.where(eigs < 0.0)[0]
        if len(ii):
            output('matrix is not positive semi-definite!')

        ii = nm.where(eigs[n_zero:] < 1e-12)[0]
        if len(ii):
            output('matrix has more than %d zero eigenvalues!' % n_zero)

        output('smallest eigs:\n', eigs[:10])

        ii = nm.where(eigs > 0.0)[0]
        emin, emax = eigs[ii[[0, -1]]]

        output('min:', emin, 'max:', emax)

        cond = emax / emin
        conds.append(cond)

        output('condition number:', cond)

        output('...done')

    if options.output_dir is not None:
        indir = partial(op.join, options.output_dir)

    else:
        indir = None

    plt.rcParams['font.size'] = 12
    plt.rcParams['lines.linewidth'] = 3

    fig, ax = plt.subplots()
    ax.semilogy(orders, conds)
    ax.set_xticks(orders)
    ax.set_xticklabels(orders)
    ax.set_xlabel('polynomial order')
    ax.set_ylabel('condition number')
    ax.set_title(f'{options.basis.capitalize()} basis')
    ax.grid()
    plt.tight_layout()
    if indir is not None:
        fig.savefig(indir(f'{options.basis}-{options.matrix_type}-'
                          f'{options.geometry}-{options.max_order}-xlin.png'),
                    bbox_inches='tight')

    fig, ax = plt.subplots()
    ax.loglog(orders, conds)
    ax.set_xticks(orders)
    ax.set_xticklabels(orders)
    ax.set_xlabel('polynomial order')
    ax.set_ylabel('condition number')
    ax.set_title(f'{options.basis.capitalize()} basis')
    ax.grid()
    plt.tight_layout()
    if indir is not None:
        fig.savefig(indir(f'{options.basis}-{options.matrix_type}-'
                          f'{options.geometry}-{options.max_order}-xlog.png'),
                    bbox_inches='tight')

    if options.show:
        plt.show()
Exemple #53
0
def get_ref_coors(field,
                  coors,
                  strategy='kdtree',
                  close_limit=0.1,
                  cache=None,
                  verbose=True):
    """
    Get reference element coordinates and elements corresponding to given
    physical coordinates.

    Parameters
    ----------
    field : Field instance
        The field defining the approximation.
    coors : array
        The physical coordinates.
    strategy : str, optional
        The strategy for finding the elements that contain the
        coordinates. Only 'kdtree' is supported for the moment.
    close_limit : float, optional
        The maximum limit distance of a point from the closest
        element allowed for extrapolation.
    cache : Struct, optional
        To speed up a sequence of evaluations, the field mesh, the inverse
        connectivity of the field mesh and the KDTree instance can be cached as
        `cache.mesh`, `cache.offsets`, `cache.iconn` and
        `cache.kdtree`. Optionally, the cache can also contain the reference
        element coordinates as `cache.ref_coors`, `cache.cells` and
        `cache.status`, if the evaluation occurs in the same coordinates
        repeatedly. In that case the KDTree related data are ignored.
    verbose : bool
        If False, reduce verbosity.

    Returns
    -------
    ref_coors : array
        The reference coordinates.
    cells : array
        The cell indices corresponding to the reference coordinates.
    status : array
        The status: 0 is success, 1 is extrapolation within `close_limit`, 2 is
        extrapolation outside `close_limit`, 3 is failure.
    """
    ref_coors = get_default_attr(cache, 'ref_coors', None)
    if ref_coors is None:
        mesh = get_default_attr(cache, 'mesh', None)
        if mesh is None:
            mesh = field.create_mesh(extra_nodes=False)

        scoors = mesh.coors
        output('reference field: %d vertices' % scoors.shape[0],
               verbose=verbose)

        iconn = get_default_attr(cache, 'iconn', None)
        if iconn is None:
            offsets, iconn = make_inverse_connectivity(mesh.conns,
                                                       mesh.n_nod,
                                                       ret_offsets=True)

            ii = nm.where(offsets[1:] == offsets[:-1])[0]
            if len(ii):
                raise ValueError('some vertices not in any element! (%s)' % ii)

        else:
            offsets = cache.offsets

        if strategy == 'kdtree':
            kdtree = get_default_attr(cache, 'kdtree', None)
            if kdtree is None:
                from scipy.spatial import cKDTree as KDTree

                tt = time.clock()
                kdtree = KDTree(scoors)
                output('kdtree: %f s' % (time.clock() - tt), verbose=verbose)

            tt = time.clock()
            ics = kdtree.query(coors)[1]
            output('kdtree query: %f s' % (time.clock() - tt), verbose=verbose)

            tt = time.clock()
            ics = nm.asarray(ics, dtype=nm.int32)

            vertex_coorss, nodess, mtx_is = [], [], []
            conns = []
            for ig, ap in field.aps.iteritems():
                ps = ap.interp.gel.interp.poly_spaces['v']

                vertex_coorss.append(ps.geometry.coors)
                nodess.append(ps.nodes)
                mtx_is.append(ps.get_mtx_i())

                conns.append(mesh.conns[ig].copy())

            # Get reference element coordinates corresponding to
            # destination coordinates.
            ref_coors = nm.empty_like(coors)
            cells = nm.empty((coors.shape[0], 2), dtype=nm.int32)
            status = nm.empty((coors.shape[0], ), dtype=nm.int32)

            find_ref_coors(ref_coors, cells, status, coors, ics, offsets,
                           iconn, scoors, conns, vertex_coorss, nodess, mtx_is,
                           1, close_limit, 1e-15, 100, 1e-8)
            output('ref. coordinates: %f s' % (time.clock() - tt),
                   verbose=verbose)

        elif strategy == 'crawl':
            raise NotImplementedError

        else:
            raise ValueError('unknown search strategy! (%s)' % strategy)

    else:
        ref_coors = cache.ref_coors
        cells = cache.cells
        status = cache.status

    return ref_coors, cells, status
Exemple #54
0
def assemble_matrices(define, mod, pars, set_wave_dir, options):
    """
    Assemble the blocks of dispersion eigenvalue problem matrices.
    """
    define_problem = functools.partial(define,
                                       filename_mesh=options.mesh_filename,
                                       pars=pars,
                                       approx_order=options.order,
                                       refinement_level=options.refine,
                                       solver_conf=options.solver_conf,
                                       plane=options.plane,
                                       post_process=options.post_process)

    conf = ProblemConf.from_dict(define_problem(), mod)

    pb = Problem.from_conf(conf)
    pb.dispersion_options = options
    pb.set_output_dir(options.output_dir)
    dim = pb.domain.shape.dim

    # Set the normalized wave vector direction to the material(s).
    wdir = nm.asarray(options.wave_dir[:dim], dtype=nm.float64)
    wdir = wdir / nm.linalg.norm(wdir)
    set_wave_dir(pb, wdir)

    bbox = pb.domain.mesh.get_bounding_box()
    size = (bbox[1] - bbox[0]).max()
    scaling0 = apply_unit_multipliers([1.0], ['length'],
                                      options.unit_multipliers)[0]
    scaling = scaling0
    if options.mesh_size is not None:
        scaling *= options.mesh_size / size
    output('scaling factor of periodic cell mesh coordinates:', scaling)
    output('new mesh size with applied unit multipliers:', scaling * size)
    pb.domain.mesh.coors[:] *= scaling
    pb.set_mesh_coors(pb.domain.mesh.coors, update_fields=True)

    bzone = 2.0 * nm.pi / (scaling * size)
    output('1. Brillouin zone size:', bzone * scaling0)
    output('1. Brillouin zone size with applied unit multipliers:', bzone)

    pb.time_update()
    pb.update_materials()

    # Assemble the matrices.
    mtxs = {}
    for key, eq in pb.equations.iteritems():
        mtxs[key] = mtx = pb.mtx_a.copy()
        mtx = eq.evaluate(mode='weak', dw_mode='matrix', asm_obj=mtx)
        mtx.eliminate_zeros()
        output_array_stats(mtx.data, 'nonzeros in %s' % key)

        output('symmetry checks:')
        output('%s - %s^T:' % (key, key), max_diff_csr(mtx, mtx.T))
        output('%s - %s^H:' % (key, key), max_diff_csr(mtx, mtx.H))

    return pb, wdir, bzone, mtxs
Exemple #55
0
    def get_evaluate_cache(self,
                           cache=None,
                           share_geometry=False,
                           verbose=False):
        """
        Get the evaluate cache for :func:`Variable.evaluate_at()
        <sfepy.discrete.variables.Variable.evaluate_at()>`.

        Parameters
        ----------
        cache : Struct instance, optional
            Optionally, use the provided instance to store the cache data.
        share_geometry : bool
            Set to True to indicate that all the evaluations will work on the
            same region. Certain data are then computed only for the first
            probe and cached.
        verbose : bool
            If False, reduce verbosity.

        Returns
        -------
        cache : Struct instance
            The evaluate cache.
        """
        import time

        try:
            from scipy.spatial import cKDTree as KDTree
        except ImportError:
            from scipy.spatial import KDTree

        from sfepy.discrete.fem.geometry_element import create_geometry_elements

        if cache is None:
            cache = Struct(name='evaluate_cache')

        tt = time.clock()
        if (cache.get('cmesh', None) is None) or not share_geometry:
            mesh = self.create_mesh(extra_nodes=False)
            cache.cmesh = cmesh = mesh.cmesh

            gels = create_geometry_elements()

            cmesh.set_local_entities(gels)
            cmesh.setup_entities()

            cache.centroids = cmesh.get_centroids(cmesh.tdim)

            if self.gel.name != '3_8':
                cache.normals0 = cmesh.get_facet_normals()
                cache.normals1 = None

            else:
                cache.normals0 = cmesh.get_facet_normals(0)
                cache.normals1 = cmesh.get_facet_normals(1)

        output('cmesh setup: %f s' % (time.clock() - tt), verbose=verbose)

        tt = time.clock()
        if (cache.get('kdtree', None) is None) or not share_geometry:
            cache.kdtree = KDTree(cmesh.coors)

        output('kdtree: %f s' % (time.clock() - tt), verbose=verbose)

        return cache
Exemple #56
0
def generate_images(images_dir, examples_dir):
    """
    Generate images from results of running examples found in
    `examples_dir` directory.

    The generated images are stored to `images_dir`,
    """
    from sfepy.applications import solve_pde
    from sfepy.postprocess.viewer import Viewer
    from sfepy.postprocess.utils import mlab

    prefix = output.prefix

    output_dir = tempfile.mkdtemp()
    trunk = os.path.join(output_dir, 'result')
    options = Struct(output_filename_trunk=trunk,
                     output_format='vtk',
                     save_ebc=False,
                     save_ebc_nodes=False,
                     save_regions=False,
                     save_field_meshes=False,
                     save_regions_as_groups=False,
                     solve_not=False)
    default_views = {'': {}}

    ensure_path(images_dir + os.path.sep)

    view = Viewer('', offscreen=False)

    for ex_filename in locate_files('*.py', examples_dir):
        if _omit(ex_filename): continue

        output.level = 0
        output.prefix = prefix
        ebase = ex_filename.replace(examples_dir, '')[1:]
        output('trying "%s"...' % ebase)

        try:
            problem, state = solve_pde(ex_filename, options=options)

        except KeyboardInterrupt:
            raise

        except:
            problem = None
            output('***** failed! *****')

        if problem is not None:
            if ebase in custom:
                views = custom[ebase]

            else:
                views = default_views

            tsolver = problem.get_time_solver()
            if tsolver.ts is None:
                suffix = None

            else:
                suffix = tsolver.ts.suffix % (tsolver.ts.n_step - 1)

            filename = problem.get_output_name(suffix=suffix)

            for suffix, kwargs in six.iteritems(views):
                fig_filename = _get_fig_filename(ebase, images_dir, suffix)

                fname = edit_filename(filename, suffix=suffix)
                output('displaying results from "%s"' % fname)
                disp_name = fig_filename.replace(sfepy.data_dir, '')
                output('to "%s"...' % disp_name.lstrip(os.path.sep))

                view.filename = fname
                view(scene=view.scene,
                     show=False,
                     is_scalar_bar=True,
                     **kwargs)
                view.save_image(fig_filename)
                mlab.clf()

                output('...done')

            remove_files(output_dir)

        output('...done')
Exemple #57
0
def generate_rst_files(rst_dir, examples_dir, images_dir):
    """
    Generate Sphinx rst files for examples in `examples_dir` with images
    in `images_dir` and put them into `rst_dir`.

    Returns
    -------
    dir_map : dict
        The directory mapping of examples and corresponding rst files.
    """
    ensure_path(rst_dir + os.path.sep)

    output('generating rst files...')

    dir_map = {}
    for ex_filename in locate_files('*.py', examples_dir):
        if _omit(ex_filename): continue

        ebase = ex_filename.replace(examples_dir, '')[1:]
        base_dir = os.path.dirname(ebase)

        rst_filename = os.path.basename(ex_filename).replace('.py', '.rst')

        dir_map.setdefault(base_dir, []).append((ex_filename, rst_filename))

    for dirname, filenames in six.iteritems(dir_map):
        filenames = sorted(filenames, key=lambda a: a[1])
        dir_map[dirname] = filenames

    # Main index.
    mfd = open(os.path.join(rst_dir, 'index.rst'), 'w')
    mfd.write(_index % ('sfepy', 'Examples', '=' * 8))

    for dirname, filenames in ordered_iteritems(dir_map):
        full_dirname = os.path.join(rst_dir, dirname)
        ensure_path(full_dirname + os.path.sep)

        # Subdirectory index.
        ifd = open(os.path.join(full_dirname, 'index.rst'), 'w')
        ifd.write(_index % (dirname, dirname, '=' * len(dirname)))

        for ex_filename, rst_filename in filenames:
            full_rst_filename = os.path.join(full_dirname, rst_filename)
            output('"%s"' % full_rst_filename.replace(rst_dir, '')[1:])
            rst_filename_ns = rst_filename.replace('.rst', '')
            ebase = ex_filename.replace(examples_dir, '')[1:]

            rst_ex_filename = _make_sphinx_path(ex_filename)
            docstring = get_default(
                import_file(ex_filename).__doc__, 'missing description!')

            ifd.write('    %s\n' % rst_filename_ns)
            fig_include = ''
            fig_base = _get_fig_filenames(ebase, images_dir).next()
            for fig_filename in _get_fig_filenames(ebase, images_dir):
                rst_fig_filename = _make_sphinx_path(fig_filename)

                if os.path.exists(fig_filename):
                    fig_include += _image % rst_fig_filename + '\n'

            # Example rst file.
            fd = open(full_rst_filename, 'w')
            fd.write(_include %
                     (fig_base, ebase, '=' * len(ebase), docstring,
                      fig_include, rst_ex_filename, rst_ex_filename))
            fd.close()

        ifd.close()

        mfd.write('    %s/index\n' % dirname)

    mfd.close()

    output('...done')

    return dir_map
Exemple #58
0
def generate_gallery_html(examples_dir, output_filename, gallery_dir, rst_dir,
                          thumbnails_dir, dir_map, link_prefix):
    """
    Generate the gallery html file with thumbnail images and links to
    examples.

    Parameters
    ----------
    output_filename : str
        The output html file name.
    gallery_dir : str
        The top level directory of gallery files.
    rst_dir : str
        The full path to rst files of examples within `gallery_dir`.
    thumbnails_dir : str
        The full path to thumbnail images within `gallery_dir`.
    dir_map : dict
        The directory mapping returned by `generate_rst_files()`
    link_prefix : str, optional
        The prefix to prepend to links to individual pages of examples.
    """
    output('generating %s...' % output_filename)

    with open(_gallery_template_file, 'r') as fd:
        gallery_template = fd.read()

    div_lines = []
    sidebar = []
    for dirname, filenames in ordered_iteritems(dir_map):
        full_dirname = os.path.join(rst_dir, dirname)
        dirnamenew = dirname.replace("_", " ")
        sidebarline = _side_links % (dirname, dirnamenew.title())
        lines = []
        for ex_filename, rst_filename in filenames:
            full_rst_filename = os.path.join(full_dirname, rst_filename)

            ebase = full_rst_filename.replace(rst_dir, '')[1:]
            ebase = edit_filename(ebase, new_ext='.py')

            link_base = full_rst_filename.replace(gallery_dir, '')[1:]
            link = os.path.join(link_prefix,
                                os.path.splitext(link_base)[0] + '.html')

            _get_fig_filenames(ebase, thumbnails_dir).next()
            for thumbnail_filename in _get_fig_filenames(
                    ebase, thumbnails_dir):
                if not os.path.isfile(thumbnail_filename):
                    # Skip examples with no image (= failed examples).
                    continue

                thumbnail_name = thumbnail_filename.replace(gallery_dir,
                                                            '')[1:]
                path_to_file = os.path.join(examples_dir, ebase)
                docstring = get_default(
                    import_file(path_to_file).__doc__, 'missing description!')
                docstring = docstring.replace('e.g.', 'eg:')
                docstring = docstring.split('.')
                line = _link_template % (link, os.path.splitext(ebase)[0],
                                         thumbnail_name, link,
                                         docstring[0] + '.')
                lines.append(line)

        if (len(lines) != 0):
            div_lines.append(
                _div_line %
                (dirname, dirnamenew.title(), dirname, '\n'.join(lines)))
            sidebar.append(sidebarline)

    fd = open(output_filename, 'w')
    fd.write(gallery_template % ((link_prefix, ) * 7 +
                                 ('\n'.join(sidebar), '\n'.join(div_lines))))
    fd.close()

    output('...done')
Exemple #59
0
def adapt_time_step(ts, status, adt, problem=None):
    """
    Adapt the time step of `ts` according to the exit status of the
    nonlinear solver.

    The time step dt is reduced, if the nonlinear solver did not converge. If it
    converged in less then a specified number of iterations for several time
    steps, the time step is increased. This is governed by the following
    parameters:

    - red_factor : time step reduction factor
    - red_max : maximum time step reduction factor
    - inc_factor : time step increase factor
    - inc_on_iter : increase time step if the nonlinear solver converged in
      less than this amount of iterations...
    - inc_wait : ...for this number of consecutive time steps

    Parameters
    ----------
    ts : VariableTimeStepper instance
        The time stepper.
    status : IndexedStruct instance
        The nonlinear solver exit status.
    adt : Struct instance
        The adaptivity parameters of the time solver:
    problem : Problem instance, optional
        This canbe used in user-defined adaptivity functions. Not used here.

    Returns
    -------
    is_break : bool
        If True, the adaptivity loop should stop.
    """
    is_break = False

    if status.condition == 0:
        if status.n_iter <= adt.inc_on_iter:
            adt.wait += 1

            if adt.wait > adt.inc_wait:
                if adt.red < 1.0:
                    adt.red = adt.red * adt.inc_factor
                    ts.set_time_step(adt.dt0 * adt.red)
                    output('+++++ new time step: %e +++++' % ts.dt)
                adt.wait = 0

        else:
            adt.wait = 0

        is_break = True

    else:
        adt.red = adt.red * adt.red_factor
        if adt.red < adt.red_max:
            is_break = True

        else:
            ts.set_time_step(adt.dt0 * adt.red, update_time=True)
            output('----- new time step: %e -----' % ts.dt)
            adt.wait = 0

    return is_break
Exemple #60
0
    def __call__(self,
                 rhs,
                 x0=None,
                 conf=None,
                 eps_a=None,
                 eps_r=None,
                 i_max=None,
                 mtx=None,
                 status=None,
                 context=None,
                 **kwargs):
        solver_kwargs = self.build_solver_kwargs(conf)

        eps_r = get_default(eps_r, self.conf.eps_r)
        i_max = get_default(i_max, self.conf.i_max)

        setup_precond = get_default(kwargs.get('setup_precond', None),
                                    self.conf.setup_precond)
        callback = get_default(kwargs.get('callback', lambda sol: None),
                               self.conf.callback)

        self.iter = 0

        def iter_callback(sol):
            self.iter += 1
            msg = '%s: iteration %d' % (self.conf.name, self.iter)
            if conf.verbose > 2:
                if conf.method not in self._callbacks_res:
                    res = mtx * sol - rhs

                else:
                    res = sol

                rnorm = nm.linalg.norm(res)
                msg += ': |Ax-b| = %e' % rnorm
            output(msg, verbose=conf.verbose > 1)

            # Call an optional user-defined callback.
            callback(sol)

        precond = setup_precond(mtx, context)

        if conf.method == 'qmr':
            prec_args = {'M1': precond, 'M2': precond}

        else:
            prec_args = {'M': precond}

        solver_kwargs.update(prec_args)

        try:
            sol, info = self.solver(mtx,
                                    rhs,
                                    x0=x0,
                                    atol=eps_a,
                                    rtol=eps_r,
                                    maxiter=i_max,
                                    callback=iter_callback,
                                    **solver_kwargs)
        except TypeError:
            sol, info = self.solver(mtx,
                                    rhs,
                                    x0=x0,
                                    tol=eps_r,
                                    maxiter=i_max,
                                    callback=iter_callback,
                                    **solver_kwargs)

        output('%s: %s convergence: %s (%s, %d iterations)' %
               (self.conf.name, self.conf.method, info,
                self.converged_reasons[nm.sign(info)], self.iter),
               verbose=conf.verbose)

        return sol, self.iter