Example #1
0
def steady_state(pb):
    print """
    ##################### steady state ##################
    """
    pb.flag = 'linear'

    outbase = tools.get_output_base(pb)

    conf = pb.conf.copy()
    conf.equations, _ = define_steady_state_equations()
    ebcs_steady, _, _, _ = define_bc(conf.cg1,
                                     conf.cg2,
                                     is_steady=True,
                                     val_in=conf.val_w1,
                                     val_out=conf.val_w2)

    conf.edit('ebcs', ebcs_steady)
    lpb = Problem.from_conf(conf)
    lpb.time_update()
    lpb.init_solvers(nls_conf=lpb.solver_confs['newton'])
    lpb.set_linear(False)
    state = lpb.solve().get_parts()
    state_data = {ii: state[ii] for ii in lpb.conf.state_vars}

    eval_data = tools.eval_macro(lpb, state_data, is_steady=True)
    tools.write_macro_fields(outbase + '_steady.vtk', eval_data,
                             lpb.domain.mesh)
    return state_data
Example #2
0
    def from_conf( conf, options ):
        from sfepy.discrete import Problem

        problem = Problem.from_conf(conf, init_equations=False)
        test = Test( problem = problem,
                     conf = conf, options = options )
        return test
Example #3
0
def main():
    from sfepy.base.base import output
    from sfepy.base.conf import ProblemConf, get_standard_keywords
    from sfepy.discrete import Problem

    output.prefix = "therel:"

    required, other = get_standard_keywords()
    conf = ProblemConf.from_file(__file__, required, other)

    problem = Problem.from_conf(conf, init_equations=False)

    # Setup output directory according to options above.
    problem.setup_default_output()

    # First solve the stationary electric conduction problem.
    problem.set_equations({"eq": conf.equations["1"]})
    problem.time_update()
    state_el = problem.solve()
    problem.save_state(problem.get_output_name(suffix="el"), state_el)

    # Then solve the evolutionary heat conduction problem, using state_el.
    problem.set_equations({"eq": conf.equations["2"]})
    phi_var = problem.get_variables()["phi_known"]
    phi_var.set_data(state_el())
    time_solver = problem.get_time_solver()
    time_solver()

    output("results saved in %s" % problem.get_output_name(suffix="*"))
Example #4
0
    def from_conf(conf, options):
        from sfepy.discrete import Problem

        problem = Problem.from_conf(conf, init_equations=False)
        test = Test(problem=problem,
                    conf=conf, options=options)
        return test
Example #5
0
def main():
    from sfepy.base.base import output
    from sfepy.base.conf import ProblemConf, get_standard_keywords
    from sfepy.discrete import Problem

    output.prefix = 'therel:'

    required, other = get_standard_keywords()
    conf = ProblemConf.from_file(__file__, required, other)

    problem = Problem.from_conf(conf, init_equations=False)

    # Setup output directory according to options above.
    problem.setup_default_output()

    # First solve the stationary electric conduction problem.
    problem.set_equations({'eq' : conf.equations['1']})
    problem.time_update()
    state_el = problem.solve()
    problem.save_state(problem.get_output_name(suffix = 'el'), state_el)

    # Then solve the evolutionary heat conduction problem, using state_el.
    problem.set_equations({'eq' : conf.equations['2']})
    phi_var = problem.get_variables()['phi_known']
    phi_var.set_data(state_el())
    time_solver = problem.get_time_solver()
    time_solver.init_time()
    for _ in time_solver():
        pass

    output('results saved in %s' % problem.get_output_name(suffix = '*'))
Example #6
0
def main():
    from sfepy.base.base import output
    from sfepy.base.conf import ProblemConf, get_standard_keywords
    from sfepy.discrete import Problem

    output.prefix = 'therel:'

    required, other = get_standard_keywords()
    conf = ProblemConf.from_file(__file__, required, other)

    problem = Problem.from_conf(conf, init_equations=False)

    # Setup output directory according to options above.
    problem.setup_default_output()

    # First solve the stationary electric conduction problem.
    problem.set_equations({'eq' : conf.equations['1']})
    state_el = problem.solve()
    problem.save_state(problem.get_output_name(suffix = 'el'), state_el)

    # Then solve the evolutionary heat conduction problem, using state_el.
    problem.set_equations({'eq' : conf.equations['2']})
    phi_var = problem.get_variables()['phi_known']
    phi_var.set_data(state_el())
    problem.solve()

    output('results saved in %s' % problem.get_output_name(suffix = '*'))
def vary_omega1_size(problem):
    """Vary size of \Omega1. Saves also the regions into options['output_dir'].

    Input:
      problem: Problem instance
    Return:
      a generator object:
      1. creates new (modified) problem
      2. yields the new (modified) problem and output container
      3. use the output container for some logging
      4. yields None (to signal next iteration to Application)
    """
    from sfepy.discrete import Problem
    from sfepy.solvers.ts import get_print_info

    output.prefix = 'vary_omega1_size:'

    diameters = nm.linspace(0.1, 0.6, 7) + 0.001
    ofn_trunk, output_format = problem.ofn_trunk, problem.output_format
    output_dir = problem.output_dir
    join = os.path.join

    conf = problem.conf
    cf = conf.get_raw('functions')
    n_digit, aux, d_format = get_print_info(len(diameters) + 1)
    for ii, diameter in enumerate(diameters):
        output('iteration %d: diameter %3.2f' % (ii, diameter))

        cf['select_circ'] = (lambda coors, domain=None: select_circ(
            coors[:, 0], coors[:, 1], 0, diameter),
                             )
        conf.edit('functions', cf)
        problem = Problem.from_conf(conf)

        problem.save_regions(join(output_dir, ('regions_' + d_format) % ii),
                             ['Omega_1'])
        region = problem.domain.regions['Omega_1']
        if not region.has_cells():
            raise ValueError('region %s has no cells!' % region.name)

        ofn_trunk = ofn_trunk + '_' + (d_format % ii)
        problem.setup_output(output_filename_trunk=ofn_trunk,
                             output_dir=output_dir,
                             output_format=output_format)

        out = []
        yield problem, out

        out_problem, state = out[-1]

        filename = join(output_dir, ('log_%s.txt' % d_format) % ii)
        fd = open(filename, 'w')
        log_item = '$r(\Omega_1)$: %f\n' % diameter
        fd.write(log_item)
        fd.write('solution:\n')
        nm.savetxt(fd, state())
        fd.close()

        yield None
def main():
    from sfepy.base.base import output
    from sfepy.base.conf import ProblemConf, get_standard_keywords
    from sfepy.discrete import Problem
    from sfepy.base.plotutils import plt

    parser = OptionParser(usage=usage, version='%prog')
    parser.add_option('-n', '--no-plot',
                      action="store_true", dest='no_plot',
                      default=False, help=helps['no_plot'])
    options, args = parser.parse_args()

    required, other = get_standard_keywords()
    # Use this file as the input file.
    conf = ProblemConf.from_file(__file__, required, other)

    # Create problem instance, but do not set equations.
    problem = Problem.from_conf(conf, init_equations=False)

    # Solve the problem. Output is ignored, results stored by using the
    # step_hook.
    u_t = solve_branch(problem, linear_tension)
    u_c = solve_branch(problem, linear_compression)

    # Get pressure load by calling linear_*() for each time step.
    ts = problem.get_timestepper()
    load_t = nm.array([linear_tension(ts, nm.array([[0.0]]), 'qp')['val']
                       for aux in ts.iter_from(0)],
                      dtype=nm.float64).squeeze()
    load_c = nm.array([linear_compression(ts, nm.array([[0.0]]), 'qp')['val']
                       for aux in ts.iter_from(0)],
                      dtype=nm.float64).squeeze()

    # Join the branches.
    displacements = {}
    for key in u_t.keys():
        displacements[key] = nm.r_[u_c[key][::-1], u_t[key]]
    load = nm.r_[load_c[::-1], load_t]


    if plt is None:
        output('matplotlib cannot be imported, printing raw data!')
        output(displacements)
        output(load)
    else:
        legend = []
        for key, val in displacements.iteritems():
            plt.plot(load, val)
            legend.append(key)

        plt.legend(legend, loc = 2)
        plt.xlabel('tension [kPa]')
        plt.ylabel('displacement [mm]')
        plt.grid(True)

        plt.gcf().savefig('pressure_displacement.png')

        if not options.no_plot:
            plt.show()
Example #9
0
def vary_omega1_size( problem ):
    """Vary size of \Omega1. Saves also the regions into options['output_dir'].

    Input:
      problem: Problem instance
    Return:
      a generator object:
      1. creates new (modified) problem
      2. yields the new (modified) problem and output container
      3. use the output container for some logging
      4. yields None (to signal next iteration to Application)
    """
    from sfepy.discrete import Problem
    from sfepy.solvers.ts import get_print_info
    
    output.prefix = 'vary_omega1_size:'

    diameters = nm.linspace( 0.1, 0.6, 7 ) + 0.001
    ofn_trunk, output_format = problem.ofn_trunk, problem.output_format
    output_dir = problem.output_dir
    join = os.path.join

    conf = problem.conf
    cf = conf.get_raw( 'functions' )
    n_digit, aux, d_format = get_print_info( len( diameters ) + 1 )
    for ii, diameter in enumerate( diameters ):
        output( 'iteration %d: diameter %3.2f' % (ii, diameter) )

        cf['select_circ'] = (lambda coors, domain=None: 
                             select_circ(coors[:,0], coors[:,1], 0, diameter),)
        conf.edit('functions', cf)
        problem = Problem.from_conf(conf)

        problem.save_regions( join( output_dir, ('regions_' + d_format) % ii ),
                              ['Omega_1'] )
        region = problem.domain.regions['Omega_1']
        if not region.has_cells():
            raise ValueError('region %s has no cells!' % region.name)

        ofn_trunk = ofn_trunk + '_' + (d_format % ii)
        problem.setup_output(output_filename_trunk=ofn_trunk,
                             output_dir=output_dir,
                             output_format=output_format)

        out = []
        yield problem, out

        out_problem, state = out[-1]

        filename = join( output_dir,
                         ('log_%s.txt' % d_format) % ii )
        fd = open( filename, 'w' )
        log_item = '$r(\Omega_1)$: %f\n' % diameter
        fd.write( log_item )
        fd.write( 'solution:\n' )
        nm.savetxt(fd, state())
        fd.close()

        yield None
def main():
    from sfepy.base.base import output
    from sfepy.base.conf import ProblemConf, get_standard_keywords
    from sfepy.discrete import Problem
    from sfepy.base.plotutils import plt

    parser = OptionParser(usage=usage, version='%prog')
    parser.add_option('-n', '--no-plot',
                      action="store_true", dest='no_plot',
                      default=False, help=helps['no_plot'])
    options, args = parser.parse_args()

    required, other = get_standard_keywords()
    # Use this file as the input file.
    conf = ProblemConf.from_file(__file__, required, other)

    # Create problem instance, but do not set equations.
    problem = Problem.from_conf(conf, init_equations=False)

    # Solve the problem. Output is ignored, results stored by using the
    # step_hook.
    u_t = solve_branch(problem, linear_tension)
    u_c = solve_branch(problem, linear_compression)

    # Get pressure load by calling linear_*() for each time step.
    ts = problem.get_timestepper()
    load_t = nm.array([linear_tension(ts, nm.array([[0.0]]), 'qp')['val']
                       for aux in ts.iter_from(0)],
                      dtype=nm.float64).squeeze()
    load_c = nm.array([linear_compression(ts, nm.array([[0.0]]), 'qp')['val']
                       for aux in ts.iter_from(0)],
                      dtype=nm.float64).squeeze()

    # Join the branches.
    displacements = {}
    for key in u_t.keys():
        displacements[key] = nm.r_[u_c[key][::-1], u_t[key]]
    load = nm.r_[load_c[::-1], load_t]


    if plt is None:
        output('matplotlib cannot be imported, printing raw data!')
        output(displacements)
        output(load)
    else:
        legend = []
        for key, val in six.iteritems(displacements):
            plt.plot(load, val)
            legend.append(key)

        plt.legend(legend, loc = 2)
        plt.xlabel('tension [kPa]')
        plt.ylabel('displacement [mm]')
        plt.grid(True)

        plt.gcf().savefig('pressure_displacement.png')

        if not options.no_plot:
            plt.show()
Example #11
0
    def from_conf(conf, options):
        from sfepy.discrete import Problem

        problem = Problem.from_conf(conf)
        problem.time_update()

        test = Test(problem=problem, conf=conf, options=options)
        return test
Example #12
0
    def from_conf(conf, options):
        from sfepy.discrete import Problem

        problem = Problem.from_conf(conf)
        problem.time_update()

        test = Test(problem=problem, conf=conf, options=options)
        return test
Example #13
0
def assemble_matrices(define, mod, pars, set_wave_dir, options, wdir=None):
    """
    Assemble the blocks of dispersion eigenvalue problem matrices.
    """
    define_dict = define(filename_mesh=options.mesh_filename,
                         pars=pars,
                         approx_order=options.order,
                         refinement_level=options.refine,
                         solver_conf=options.solver_conf,
                         plane=options.plane,
                         post_process=options.post_process,
                         **options.define_kwargs)

    conf = ProblemConf.from_dict(define_dict, mod)

    pb = Problem.from_conf(conf)
    pb.dispersion_options = options
    pb.set_output_dir(options.output_dir)
    dim = pb.domain.shape.dim

    # Set the normalized wave vector direction to the material(s).
    if wdir is None:
        wdir = nm.asarray(options.wave_dir[:dim], dtype=nm.float64)
        wdir = wdir / nm.linalg.norm(wdir)
    set_wave_dir(pb, wdir)

    bbox = pb.domain.mesh.get_bounding_box()
    size = (bbox[1] - bbox[0]).max()
    scaling0 = apply_unit_multipliers([1.0], ['length'],
                                      options.unit_multipliers)[0]
    scaling = scaling0
    if options.mesh_size is not None:
        scaling *= options.mesh_size / size
    output('scaling factor of periodic cell mesh coordinates:', scaling)
    output('new mesh size with applied unit multipliers:', scaling * size)
    pb.domain.mesh.coors[:] *= scaling
    pb.set_mesh_coors(pb.domain.mesh.coors, update_fields=True)

    bzone = 2.0 * nm.pi / (scaling * size)
    output('1. Brillouin zone size:', bzone * scaling0)
    output('1. Brillouin zone size with applied unit multipliers:', bzone)

    pb.time_update()
    pb.update_materials()

    # Assemble the matrices.
    mtxs = {}
    for key, eq in pb.equations.iteritems():
        mtxs[key] = mtx = pb.mtx_a.copy()
        mtx = eq.evaluate(mode='weak', dw_mode='matrix', asm_obj=mtx)
        mtx.eliminate_zeros()
        output_array_stats(mtx.data, 'nonzeros in %s' % key)

        output('symmetry checks:')
        output('%s - %s^T:' % (key, key), max_diff_csr(mtx, mtx.T))
        output('%s - %s^H:' % (key, key), max_diff_csr(mtx, mtx.H))

    return pb, wdir, bzone, mtxs
    def test_stokes_slip_bc(self):
        import scipy.sparse as sp

        from sfepy.base.conf import ProblemConf
        from sfepy.discrete import Problem
        import examples.navier_stokes.stokes_slip_bc as ssb

        conf = ProblemConf.from_module(ssb)
        pb = Problem.from_conf(conf, init_solvers=False)
        pb.time_update()
        variables = pb.get_variables()

        adi = variables.adi
        lcdi = variables.lcdi
        mtx = variables.mtx_lcbc

        ok = adi.var_names == lcdi.var_names
        self.report('same adi-lcdi ordering:', ok)

        ublock = mtx[adi.indx['u']]
        ir, ic = ublock.nonzero()
        ir += adi.indx['u'].start

        i0, i1 = adi.indx['u'].start, adi.indx['u'].stop
        _ok0 = (i0 <= ir).all() and (ir < i1).all()
        self.report('u block rows in [%d %d[: %s' % (i0, i1, _ok0))

        i0, i1 = lcdi.indx['u'].start, lcdi.indx['u'].stop
        _ok1 = (i0 <= ic).all() and (ic < i1).all()
        self.report('u block cols in [%d %d[: %s' % (i0, i1, _ok1))

        ok = ok and _ok0 and _ok1

        pblock = mtx[adi.indx['p']]
        ir, ic, iv = sp.find(pblock)
        ir += adi.indx['p'].start

        i0, i1 = adi.indx['p'].start, adi.indx['p'].stop
        _ok0 = (i0 <= ir).all() and (ir < i1).all()
        self.report('p block rows in [%d %d[: %s' % (i0, i1, _ok0))

        i0, i1 = lcdi.indx['p'].start, lcdi.indx['p'].stop
        _ok1 = (i0 <= ic).all() and (ic < i1).all()
        self.report('p block cols in [%d %d[: %s' % (i0, i1, _ok1))

        ok = ok and _ok0 and _ok1

        _ok0 = (len(ir) == adi.n_dof['p'])
        self.report('p block size correct:', _ok0)
        _ok1 = ((ir - adi.indx['p'].start) == (ic -
                                               lcdi.indx['p'].start)).all()
        self.report('p block diagonal:', _ok1)
        _ok2 = (iv == 1.0).all()
        self.report('p block identity:', _ok2)

        ok = ok and _ok0 and _ok1 and _ok2

        return ok
Example #15
0
    def test_stokes_slip_bc(self):
        import scipy.sparse as sp

        from sfepy.base.conf import ProblemConf
        from sfepy.discrete import Problem
        import examples.navier_stokes.stokes_slip_bc as ssb

        conf = ProblemConf.from_module(ssb)
        pb = Problem.from_conf(conf, init_solvers=False)
        pb.time_update()
        variables = pb.get_variables()

        adi = variables.adi
        lcdi = variables.lcdi
        mtx = variables.mtx_lcbc

        ok = adi.var_names == lcdi.var_names
        self.report('same adi-lcdi ordering:', ok)

        ublock = mtx[adi.indx['u']]
        ir, ic = ublock.nonzero()
        ir += adi.indx['u'].start

        i0, i1 = adi.indx['u'].start, adi.indx['u'].stop
        _ok0 = (i0 <= ir).all() and  (ir < i1).all()
        self.report('u block rows in [%d %d[: %s' % (i0, i1, _ok0))

        i0, i1 = lcdi.indx['u'].start, lcdi.indx['u'].stop
        _ok1 = (i0 <= ic).all() and  (ic < i1).all()
        self.report('u block cols in [%d %d[: %s' % (i0, i1, _ok1))

        ok = ok and _ok0 and _ok1

        pblock = mtx[adi.indx['p']]
        ir, ic, iv = sp.find(pblock)
        ir += adi.indx['p'].start

        i0, i1 = adi.indx['p'].start, adi.indx['p'].stop
        _ok0 = (i0 <= ir).all() and  (ir < i1).all()
        self.report('p block rows in [%d %d[: %s' % (i0, i1, _ok0))

        i0, i1 = lcdi.indx['p'].start, lcdi.indx['p'].stop
        _ok1 = (i0 <= ic).all() and  (ic < i1).all()
        self.report('p block cols in [%d %d[: %s' % (i0, i1, _ok1))

        ok = ok and _ok0 and _ok1

        _ok0 = (len(ir) == adi.n_dof['p'])
        self.report('p block size correct:', _ok0)
        _ok1 = ((ir - adi.indx['p'].start) == (ic - lcdi.indx['p'].start)).all()
        self.report('p block diagonal:', _ok1)
        _ok2 = (iv == 1.0).all()
        self.report('p block identity:', _ok2)

        ok = ok and _ok0 and _ok1 and _ok2

        return ok
Example #16
0
def assemble_matrices(define, mod, pars, set_wave_dir, options):
    """
    Assemble the blocks of dispersion eigenvalue problem matrices.
    """
    define_problem = functools.partial(define,
                                       filename_mesh=options.mesh_filename,
                                       pars=pars,
                                       approx_order=options.order,
                                       refinement_level=options.refine,
                                       solver_conf=options.solver_conf,
                                       plane=options.plane,
                                       post_process=options.post_process)

    conf = ProblemConf.from_dict(define_problem(), mod)

    pb = Problem.from_conf(conf)
    pb.dispersion_options = options
    pb.set_output_dir(options.output_dir)
    dim = pb.domain.shape.dim

    # Set the normalized wave vector direction to the material(s).
    wdir = nm.asarray(options.wave_dir[:dim], dtype=nm.float64)
    wdir = wdir / nm.linalg.norm(wdir)
    set_wave_dir(pb, wdir)

    bbox = pb.domain.mesh.get_bounding_box()
    size = (bbox[1] - bbox[0]).max()
    scaling0 = apply_unit_multipliers([1.0], ['length'],
                                      options.unit_multipliers)[0]
    scaling = scaling0
    if options.mesh_size is not None:
        scaling *= options.mesh_size / size
    output('scaling factor of periodic cell mesh coordinates:', scaling)
    output('new mesh size with applied unit multipliers:', scaling * size)
    pb.domain.mesh.coors[:] *= scaling
    pb.set_mesh_coors(pb.domain.mesh.coors, update_fields=True)

    bzone = 2.0 * nm.pi / (scaling * size)
    output('1. Brillouin zone size:', bzone * scaling0)
    output('1. Brillouin zone size with applied unit multipliers:', bzone)

    pb.time_update()
    pb.update_materials()

    # Assemble the matrices.
    mtxs = {}
    for key, eq in pb.equations.iteritems():
        mtxs[key] = mtx = pb.mtx_a.copy()
        mtx = eq.evaluate(mode='weak', dw_mode='matrix', asm_obj=mtx)
        mtx.eliminate_zeros()
        output_array_stats(mtx.data, 'nonzeros in %s' % key)

        output('symmetry checks:')
        output('%s - %s^T:' % (key, key), max_diff_csr(mtx, mtx.T))
        output('%s - %s^H:' % (key, key), max_diff_csr(mtx, mtx.H))

    return pb, wdir, bzone, mtxs
    def from_conf(conf, options):
        from sfepy.discrete import Problem

        pb = Problem.from_conf(conf, init_solvers=False)
        pb.time_update()
        mtx = pb.equations.evaluate(mode='weak', dw_mode='matrix',
                                    asm_obj=pb.mtx_a)

        test = Test(mtx=mtx, conf=conf, options=options)
        return test
    def from_conf(conf, options):
        from sfepy.discrete import Problem

        pb = Problem.from_conf(conf, init_solvers=False)
        pb.time_update()
        mtx = pb.equations.evaluate(mode='weak',
                                    dw_mode='matrix',
                                    asm_obj=pb.mtx_a)

        test = Test(mtx=mtx, conf=conf, options=options)
        return test
Example #19
0
def one_simulation(material_type,
                   define_args,
                   coef_tension=0.25,
                   coef_compression=-0.25,
                   plot_mesh_bool=False,
                   return_load=False):

    #parser = ArgumentParser(description=__doc__,
    #                        formatter_class=RawDescriptionHelpFormatter)
    #parser.add_argument('--version', action='version', version='%(prog)s')
    #options = parser.parse_args()
    output.set_output(filename='sfepy_log.txt', quiet=True)

    required, other = get_standard_keywords()
    # Use this file as the input file.
    conf = ProblemConf.from_file(__file__,
                                 required,
                                 other,
                                 define_args=define_args)

    # Create problem instance, but do not set equations.
    problem = Problem.from_conf(conf, init_equations=False)
    if plot_mesh_bool:
        plot_mesh(problem)

    # Solve the problem. Output is ignored, results stored by using the
    # step_hook.
    linear_tension = partial(linear_pressure, coef=coef_tension)
    u_t = solve_branch(problem, linear_tension, material_type)
    linear_compression = partial(linear_pressure, coef=coef_compression)
    u_c = solve_branch(problem, linear_compression, material_type)

    # Get pressure load by calling linear_*() for each time step.
    ts = problem.get_timestepper()
    load_t = np.array([
        linear_tension(ts, np.array([[0.0]]), 'qp')['val']
        for aux in ts.iter_from(0)
    ],
                      dtype=np.float64).squeeze()
    load_c = np.array([
        linear_compression(ts, np.array([[0.0]]), 'qp')['val']
        for aux in ts.iter_from(0)
    ],
                      dtype=np.float64).squeeze()

    # Join the branches.
    displacements = np.r_[u_c[::-1], u_t]
    load = np.r_[load_c[::-1], load_t]

    if return_load:
        return displacements, load
    else:
        return displacements
Example #20
0
    def __init__(self, conf, options, output_prefix, init_equations=True, **kwargs):
        """`kwargs` are passed to  Problem.from_conf()

        Command-line options have precedence over conf.options."""
        Application.__init__(self, conf, options, output_prefix)
        self.setup_options()

        is_eqs = init_equations
        if hasattr(options, "solve_not") and options.solve_not:
            is_eqs = False
        self.problem = Problem.from_conf(conf, init_equations=is_eqs, **kwargs)

        self.setup_output_info(self.problem, self.options)
Example #21
0
    def __init__(self, conf, options, output_prefix,
                 init_equations=True, **kwargs):
        """`kwargs` are passed to  Problem.from_conf()

        Command-line options have precedence over conf.options."""
        Application.__init__( self, conf, options, output_prefix )
        self.setup_options()

        is_eqs = init_equations
        if hasattr(options, 'solve_not') and options.solve_not:
            is_eqs = False
        self.problem = Problem.from_conf(conf, init_equations=is_eqs, **kwargs)

        self.setup_output_info( self.problem, self.options )
Example #22
0
def create_problem(conf):
    try:
        conf.options.save_times = 0
    except AttributeError:
        pass
    pb = Problem.from_conf(conf)
    try:
        conf.options.pre_process_hook(pb)
    except AttributeError:
        pass
    n_cells = pb.domain.shape.n_el
    vols = pb.domain.cmesh.get_volumes(1)
    h = nm.mean(vols)
    if "2_3" in pb.domain.geom_els:
        h = nm.mean(nm.sqrt(4 * vols))
    elif "2_4" in pb.domain.geom_els:
        h = nm.mean(nm.sqrt(2 * vols))
    return h, n_cells, pb, vols
Example #23
0
def save_only(conf, save_names, problem=None):
    """
    Save information available prior to setting equations and
    solving them.
    """
    if problem is None:
        problem = Problem.from_conf(conf, init_equations=False)

    if save_names.regions is not None:
        problem.save_regions(save_names.regions)

    if save_names.regions_as_groups is not None:
        problem.save_regions_as_groups(save_names.regions_as_groups)

    if save_names.field_meshes is not None:
        problem.save_field_meshes(save_names.field_meshes)

    if save_names.ebc is not None:
        problem.save_ebc(save_names.ebc, force=False)

    if save_names.ebc_nodes is not None:
        problem.save_ebc(save_names.ebc_nodes, force=True)
def save_only(conf, save_names, problem=None):
    """
    Save information available prior to setting equations and
    solving them.
    """
    if problem is None:
        problem = Problem.from_conf(conf, init_equations=False)

    if save_names.regions is not None:
        problem.save_regions(save_names.regions)

    if save_names.regions_as_groups is not None:
        problem.save_regions_as_groups(save_names.regions_as_groups)

    if save_names.field_meshes is not None:
        problem.save_field_meshes(save_names.field_meshes)

    if save_names.ebc is not None:
        problem.save_ebc(save_names.ebc, force=False)

    if save_names.ebc_nodes is not None:
        problem.save_ebc(save_names.ebc_nodes, force=True)
    def test_ebc(self):
        import numpy as nm
        from sfepy.discrete import Problem

        pb = Problem.from_conf(self.test_conf)
        pb.time_update()

        vvs = pb.get_variables()
        setv = vvs.set_state_part
        make_full = vvs.make_full_vec

        svec_u = nm.ones((vvs.adi.n_dof['u'], ), dtype=nm.float64)
        svec_phi = nm.empty((vvs.adi.n_dof['phi'], ), dtype=nm.float64)
        svec_phi.fill(2.0)

        svec = vvs.create_stripped_state_vector()
        setv(svec, svec_u, 'u', stripped=True)
        setv(svec, svec_phi, 'phi', stripped=True)

        vec = make_full(svec)

        ii_u = vvs.di.indx['u'].start + vvs['u'].eq_map.eqi
        ii_phi = vvs.di.indx['phi'].start + vvs['phi'].eq_map.eqi

        ok_ebc = vvs.has_ebc(vec)
        ok_u = nm.all(vec[ii_u] == svec_u)
        ok_phi = nm.all(vec[ii_phi] == svec_phi)

        msg = '%s: %s'
        self.report(msg % ('ebc', ok_ebc))
        self.report(msg % ('u', ok_u))
        self.report(msg % ('phi', ok_phi))

        ok = ok_ebc and ok_u and ok_phi

        return ok
    def test_ebc( self ):
        import numpy as nm
        from sfepy.discrete import Problem

        pb = Problem.from_conf(self.test_conf)
        pb.time_update()

        vvs = pb.get_variables()
        setv = vvs.set_state_part
        make_full = vvs.make_full_vec

        svec_u = nm.ones( (vvs.adi.n_dof['u'],), dtype = nm.float64 )
        svec_phi = nm.empty( (vvs.adi.n_dof['phi'],), dtype = nm.float64 )
        svec_phi.fill( 2.0 )

        svec = vvs.create_stripped_state_vector()
        setv( svec, svec_u, 'u', stripped = True )
        setv( svec, svec_phi, 'phi', stripped = True )

        vec = make_full( svec )

        ii_u = vvs.di.indx['u'].start + vvs['u'].eq_map.eqi
        ii_phi = vvs.di.indx['phi'].start + vvs['phi'].eq_map.eqi

        ok_ebc = vvs.has_ebc( vec )
        ok_u = nm.all( vec[ii_u] == svec_u )
        ok_phi = nm.all( vec[ii_phi] == svec_phi )

        msg = '%s: %s'
        self.report( msg % ('ebc', ok_ebc) )
        self.report( msg % ('u', ok_u) )
        self.report( msg % ('phi', ok_phi) )

        ok = ok_ebc and ok_u and ok_phi

        return ok
Example #27
0
def main():
    # Aluminium and epoxy.
    default_pars = '70e9,0.35,2.799e3, 3.8e9,0.27,1.142e3'
    default_solver_conf = ("kind='eig.scipy',method='eigh',tol=1.0e-5,"
                           "maxiter=1000,which='LM',sigma=0.0")

    parser = ArgumentParser(description=__doc__,
                            formatter_class=RawDescriptionHelpFormatter)
    parser.add_argument('--pars',
                        metavar='young1,poisson1,density1'
                        ',young2,poisson2,density2',
                        action='store',
                        dest='pars',
                        default=default_pars,
                        help=helps['pars'])
    parser.add_argument('--conf',
                        metavar='filename',
                        action='store',
                        dest='conf',
                        default=None,
                        help=helps['conf'])
    parser.add_argument('--mesh-size',
                        type=float,
                        metavar='float',
                        action='store',
                        dest='mesh_size',
                        default=None,
                        help=helps['mesh_size'])
    parser.add_argument('--unit-multipliers',
                        metavar='c_time,c_length,c_mass',
                        action='store',
                        dest='unit_multipliers',
                        default='1.0,1.0,1.0',
                        help=helps['unit_multipliers'])
    parser.add_argument('--plane',
                        action='store',
                        dest='plane',
                        choices=['strain', 'stress'],
                        default='strain',
                        help=helps['plane'])
    parser.add_argument('--wave-dir',
                        metavar='float,float[,float]',
                        action='store',
                        dest='wave_dir',
                        default='1.0,0.0,0.0',
                        help=helps['wave_dir'])
    parser.add_argument('--mode',
                        action='store',
                        dest='mode',
                        choices=['omega', 'kappa'],
                        default='omega',
                        help=helps['mode'])
    parser.add_argument('--range',
                        metavar='start,stop,count',
                        action='store',
                        dest='range',
                        default='10,100,10',
                        help=helps['range'])
    parser.add_argument('--order',
                        metavar='int',
                        type=int,
                        action='store',
                        dest='order',
                        default=1,
                        help=helps['order'])
    parser.add_argument('--refine',
                        metavar='int',
                        type=int,
                        action='store',
                        dest='refine',
                        default=0,
                        help=helps['refine'])
    parser.add_argument('-n',
                        '--n-eigs',
                        metavar='int',
                        type=int,
                        action='store',
                        dest='n_eigs',
                        default=6,
                        help=helps['n_eigs'])
    parser.add_argument('--eigs-only',
                        action='store_true',
                        dest='eigs_only',
                        default=False,
                        help=helps['eigs_only'])
    parser.add_argument('--solver-conf',
                        metavar='dict-like',
                        action='store',
                        dest='solver_conf',
                        default=default_solver_conf,
                        help=helps['solver_conf'])
    parser.add_argument('--save-materials',
                        action='store_true',
                        dest='save_materials',
                        default=False,
                        help=helps['save_materials'])
    parser.add_argument('--log-std-waves',
                        action='store_true',
                        dest='log_std_waves',
                        default=False,
                        help=helps['log_std_waves'])
    parser.add_argument('--silent',
                        action='store_true',
                        dest='silent',
                        default=False,
                        help=helps['silent'])
    parser.add_argument('-c',
                        '--clear',
                        action='store_true',
                        dest='clear',
                        default=False,
                        help=helps['clear'])
    parser.add_argument('-o',
                        '--output-dir',
                        metavar='path',
                        action='store',
                        dest='output_dir',
                        default='output',
                        help=helps['output_dir'])
    parser.add_argument('mesh_filename',
                        default='',
                        help=helps['mesh_filename'])
    options = parser.parse_args()

    output_dir = options.output_dir

    output.set_output(filename=os.path.join(output_dir, 'output_log.txt'),
                      combined=options.silent == False)

    if options.conf is not None:
        mod = import_file(options.conf)
        apply_units = mod.apply_units
        define = mod.define
        set_wave_dir = mod.set_wave_dir

    else:
        apply_units = apply_units_le
        define = define_le
        set_wave_dir = set_wave_dir_le

    options.pars = [float(ii) for ii in options.pars.split(',')]
    options.unit_multipliers = [
        float(ii) for ii in options.unit_multipliers.split(',')
    ]
    options.wave_dir = [float(ii) for ii in options.wave_dir.split(',')]
    aux = options.range.split(',')
    options.range = [float(aux[0]), float(aux[1]), int(aux[2])]
    options.solver_conf = dict_from_string(options.solver_conf)

    if options.clear:
        remove_files_patterns(output_dir, ['*.h5', '*.vtk', '*.txt'],
                              ignores=['output_log.txt'],
                              verbose=True)

    filename = os.path.join(output_dir, 'options.txt')
    ensure_path(filename)
    save_options(filename, [('options', vars(options))])

    pars = apply_units(options.pars, options.unit_multipliers)
    output('material parameters with applied unit multipliers:')
    output(pars)

    if options.mode == 'omega':
        rng = copy(options.range)
        rng[:2] = apply_unit_multipliers(options.range[:2],
                                         ['wave_number', 'wave_number'],
                                         options.unit_multipliers)
        output('wave number range with applied unit multipliers:', rng)

    else:
        rng = copy(options.range)
        rng[:2] = apply_unit_multipliers(options.range[:2],
                                         ['frequency', 'frequency'],
                                         options.unit_multipliers)
        output('frequency range with applied unit multipliers:', rng)

    define_problem = functools.partial(define,
                                       filename_mesh=options.mesh_filename,
                                       pars=pars,
                                       approx_order=options.order,
                                       refinement_level=options.refine,
                                       solver_conf=options.solver_conf,
                                       plane=options.plane)

    conf = ProblemConf.from_dict(define_problem(), sys.modules[__name__])

    pb = Problem.from_conf(conf)
    dim = pb.domain.shape.dim

    if dim != 2:
        options.plane = 'strain'

    wdir = nm.asarray(options.wave_dir[:dim], dtype=nm.float64)
    wdir = wdir / nm.linalg.norm(wdir)

    stepper = TimeStepper(rng[0], rng[1], dt=None, n_step=rng[2])

    bbox = pb.domain.mesh.get_bounding_box()
    size = (bbox[1] - bbox[0]).max()
    scaling0 = apply_unit_multipliers([1.0], ['length'],
                                      options.unit_multipliers)[0]
    scaling = scaling0
    if options.mesh_size is not None:
        scaling *= options.mesh_size / size
    output('scaling factor of periodic cell mesh coordinates:', scaling)
    output('new mesh size with applied unit multipliers:', scaling * size)
    pb.domain.mesh.coors[:] *= scaling
    pb.set_mesh_coors(pb.domain.mesh.coors, update_fields=True)

    bzone = 2.0 * nm.pi / (scaling * size)
    output('1. Brillouin zone size:', bzone * scaling0)
    output('1. Brillouin zone size with applied unit multipliers:', bzone)

    pb.time_update()
    pb.update_materials()

    if options.save_materials or options.log_std_waves:
        stiffness = pb.evaluate('ev_integrate_mat.2.Omega(m.D, u)',
                                mode='el_avg',
                                copy_materials=False,
                                verbose=False)
        young, poisson = mc.youngpoisson_from_stiffness(stiffness,
                                                        plane=options.plane)
        density = pb.evaluate('ev_integrate_mat.2.Omega(m.density, u)',
                              mode='el_avg',
                              copy_materials=False,
                              verbose=False)

    if options.save_materials:
        out = {}
        out['young'] = Struct(name='young',
                              mode='cell',
                              data=young[..., None, None])
        out['poisson'] = Struct(name='poisson',
                                mode='cell',
                                data=poisson[..., None, None])
        out['density'] = Struct(name='density', mode='cell', data=density)
        materials_filename = os.path.join(output_dir, 'materials.vtk')
        pb.save_state(materials_filename, out=out)

    # Set the normalized wave vector direction to the material(s).
    set_wave_dir(pb.get_materials(), wdir)

    conf = pb.solver_confs['eig']
    eig_solver = Solver.any_from_conf(conf)

    # Assemble the matrices.
    mtx_m = pb.mtx_a.copy()
    eq_m = pb.equations['M']
    mtx_m = eq_m.evaluate(mode='weak', dw_mode='matrix', asm_obj=mtx_m)
    mtx_m.eliminate_zeros()

    mtx_k = pb.mtx_a.copy()
    eq_k = pb.equations['K']
    mtx_k = eq_k.evaluate(mode='weak', dw_mode='matrix', asm_obj=mtx_k)
    mtx_k.eliminate_zeros()

    mtx_s = pb.mtx_a.copy()
    eq_s = pb.equations['S']
    mtx_s = eq_s.evaluate(mode='weak', dw_mode='matrix', asm_obj=mtx_s)
    mtx_s.eliminate_zeros()

    mtx_r = pb.mtx_a.copy()
    eq_r = pb.equations['R']
    mtx_r = eq_r.evaluate(mode='weak', dw_mode='matrix', asm_obj=mtx_r)
    mtx_r.eliminate_zeros()

    output('symmetry checks of real blocks:')
    output('M - M^T:', _max_diff_csr(mtx_m, mtx_m.T))
    output('K - K^T:', _max_diff_csr(mtx_k, mtx_k.T))
    output('S - S^T:', _max_diff_csr(mtx_s, mtx_s.T))
    output('R + R^T:', _max_diff_csr(mtx_r, -mtx_r.T))

    n_eigs = options.n_eigs
    if options.n_eigs > mtx_k.shape[0]:
        options.n_eigs = mtx_k.shape[0]
        n_eigs = None

    if options.mode == 'omega':
        eigenshapes_filename = os.path.join(
            output_dir, 'frequency-eigenshapes-%s.vtk' % stepper.suffix)

        extra = []
        extra_plot_kwargs = []
        if options.log_std_waves:
            lam, mu = mc.lame_from_youngpoisson(young,
                                                poisson,
                                                plane=options.plane)
            alam = nm.average(lam)
            amu = nm.average(mu)
            adensity = nm.average(density)

            cp = nm.sqrt((alam + 2.0 * amu) / adensity)
            cs = nm.sqrt(amu / adensity)
            output('average p-wave speed:', cp)
            output('average shear wave speed:', cs)

            extra = [r'$\omega_p$', r'$\omega_s$']
            extra_plot_kwargs = [{
                'ls': '--',
                'color': 'k'
            }, {
                'ls': '--',
                'color': 'gray'
            }]

        log = Log(
            [[r'$\lambda_{%d}$' % ii for ii in range(options.n_eigs)],
             [r'$\omega_{%d}$' % ii for ii in range(options.n_eigs)] + extra],
            plot_kwargs=[{}, [{}] * options.n_eigs + extra_plot_kwargs],
            yscales=['linear', 'linear'],
            xlabels=[r'$\kappa$', r'$\kappa$'],
            ylabels=[r'eigenvalues $\lambda_i$', r'frequencies $\omega_i$'],
            log_filename=os.path.join(output_dir, 'frequencies.txt'),
            aggregate=1000,
            sleep=0.1)

        for iv, wmag in stepper:
            output('step %d: wave vector %s' % (iv, wmag * wdir))

            mtx_a = mtx_k + wmag**2 * mtx_s + (1j * wmag) * mtx_r
            mtx_b = mtx_m

            output('A - A^H:', _max_diff_csr(mtx_a, mtx_a.H))

            if options.eigs_only:
                eigs = eig_solver(mtx_a,
                                  mtx_b,
                                  n_eigs=n_eigs,
                                  eigenvectors=False)
                svecs = None

            else:
                eigs, svecs = eig_solver(mtx_a,
                                         mtx_b,
                                         n_eigs=options.n_eigs,
                                         eigenvectors=True)
            omegas = nm.sqrt(eigs)

            output('eigs, omegas:\n', nm.c_[eigs, omegas])

            out = tuple(eigs) + tuple(omegas)
            if options.log_std_waves:
                out = out + (cp * wmag, cs * wmag)
            log(*out, x=[wmag, wmag])

            save_eigenvectors(eigenshapes_filename % iv, svecs, pb)

        log(save_figure=os.path.join(output_dir, 'frequencies.png'))
        log(finished=True)

    else:
        import scipy.sparse as sps
        from sksparse.cholmod import cholesky

        eigenshapes_filename = os.path.join(
            output_dir, 'wave-number-eigenshapes-%s.vtk' % stepper.suffix)

        factor = cholesky(mtx_s)
        perm = factor.P()
        ir = nm.arange(len(perm))
        mtx_p = sps.coo_matrix((nm.ones_like(perm), (ir, perm)))
        mtx_l = mtx_p.T * factor.L()
        mtx_eye = sps.eye(mtx_l.shape[0], dtype=nm.float64)

        output('S - LL^T:', _max_diff_csr(mtx_s, mtx_l * mtx_l.T))

        log = Log([[r'$\kappa_{%d}$' % ii for ii in range(options.n_eigs)]],
                  plot_kwargs=[{
                      'ls': 'None',
                      'marker': 'o'
                  }],
                  yscales=['linear'],
                  xlabels=[r'$\omega$'],
                  ylabels=[r'wave numbers $\kappa_i$'],
                  log_filename=os.path.join(output_dir, 'wave-numbers.txt'),
                  aggregate=1000,
                  sleep=0.1)
        for io, omega in stepper:
            output('step %d: frequency %s' % (io, omega))

            mtx_a = sps.bmat([[mtx_k - omega**2 * mtx_m, None],
                              [None, mtx_eye]])
            mtx_b = sps.bmat([[1j * mtx_r, mtx_l], [mtx_l.T, None]])

            output('A - A^T:', _max_diff_csr(mtx_a, mtx_a.T))
            output('A - A^H:', _max_diff_csr(mtx_a, mtx_a.T))
            output('B - B^H:', _max_diff_csr(mtx_b, mtx_b.H))

            if options.eigs_only:
                eigs = eig_solver(mtx_a,
                                  mtx_b,
                                  n_eigs=n_eigs,
                                  eigenvectors=False)
                svecs = None

            else:
                eigs, svecs = eig_solver(mtx_a,
                                         mtx_b,
                                         n_eigs=options.n_eigs,
                                         eigenvectors=True)
            kappas = eigs

            output('kappas:\n', kappas[:, None])

            out = tuple(kappas)
            log(*out, x=[omega])

            save_eigenvectors(eigenshapes_filename % io, svecs, pb)

        log(save_figure=os.path.join(output_dir, 'wave-numbers.png'))
        log(finished=True)
Example #28
0
def recover_micro_hook(micro_filename, region, macro,
                       naming_scheme='step_iel',
                       recovery_file_tag='',
                       define_args=None):
    # Create a micro-problem instance.
    required, other = get_standard_keywords()
    required.remove('equations')
    conf = ProblemConf.from_file(micro_filename, required, other,
                                 verbose=False, define_args=define_args)

    coefs_filename = conf.options.get('coefs_filename', 'coefs')
    output_dir = conf.options.get('output_dir', '.')
    coefs_filename = op.join(output_dir, coefs_filename) + '.h5'

    # Coefficients and correctors
    coefs = Coefficients.from_file_hdf5(coefs_filename)
    corrs = get_correctors_from_file(dump_names=coefs.dump_names)

    recovery_hook = conf.options.get('recovery_hook', None)

    if recovery_hook is not None:
        recovery_hook = conf.get_function(recovery_hook)
        pb = Problem.from_conf(conf, init_equations=False, init_solvers=False)

        format = get_print_info(pb.domain.mesh.n_el, fill='0')[1]

        for ii, iel in enumerate(region.cells):
            print('ii: %d, iel: %d' % (ii, iel))

            local_macro = {}
            for k, v in six.iteritems(macro):
                local_macro[k] = v[ii, 0]

            out = recovery_hook(pb, corrs, local_macro)

            if ii == 0:
                new_keys = []
                new_data = {}
                new_idxs = []
                for k in six.iterkeys(local_macro):
                    if k not in macro:
                        new_keys.append(k)
                        new_data[k] = []

            new_idxs.append(ii)
            for jj in new_keys:
                new_data[jj].append(local_macro[jj])

            # save data
            if out is not None:
                suffix = format % iel
                micro_name = pb.get_output_name(extra='recovered_'
                                                + recovery_file_tag + suffix)
                filename = op.join(output_dir, op.basename(micro_name))
                fpv = pb.conf.options.get('file_per_var', False)
                pb.save_state(filename, out=out,
                              file_per_var=fpv)

        for jj in new_keys:
            lout = new_data[jj]
            macro[jj] = nm.zeros((nm.max(new_idxs) + 1, 1) + lout[0].shape,
                                 dtype=lout[0].dtype)
            out = macro[jj]
            for kk, ii in enumerate(new_idxs):
                out[ii, 0] = lout[kk]
Example #29
0
def recover_micro_hook_eps(micro_filename, region,
                           eval_var, nodal_values, const_values, eps0,
                           recovery_file_tag='',
                           define_args=None):
    # Create a micro-problem instance.
    required, other = get_standard_keywords()
    required.remove('equations')
    conf = ProblemConf.from_file(micro_filename, required, other,
                                 verbose=False, define_args=define_args)

    coefs_filename = conf.options.get('coefs_filename', 'coefs')
    output_dir = conf.options.get('output_dir', '.')
    coefs_filename = op.join(output_dir, coefs_filename) + '.h5'

    # Coefficients and correctors
    coefs = Coefficients.from_file_hdf5(coefs_filename)
    corrs = get_correctors_from_file(dump_names=coefs.dump_names)

    recovery_hook = conf.options.get('recovery_hook', None)

    if recovery_hook is not None:
        recovery_hook = conf.get_function(recovery_hook)
        pb = Problem.from_conf(conf, init_equations=False, init_solvers=False)

        # Get tiling of a given region
        rcoors = region.domain.mesh.coors[region.get_entities(0), :]
        rcmin = nm.min(rcoors, axis=0)
        rcmax = nm.max(rcoors, axis=0)
        nn = nm.round((rcmax - rcmin) / eps0)
        if nm.prod(nn) == 0:
            output('inconsistency in recovery region and microstructure size!')
            return

        cs = []
        for ii, n in enumerate(nn):
            cs.append(nm.arange(n) * eps0 + rcmin[ii])

        x0 = nm.empty((int(nm.prod(nn)), nn.shape[0]), dtype=nm.float64)
        for ii, icoor in enumerate(nm.meshgrid(*cs, indexing='ij')):
            x0[:, ii] = icoor.flatten()

        mesh = pb.domain.mesh
        coors, conn, outs, ndoffset = [], [], [], 0
        # Recover region
        for ii, c0 in enumerate(x0):
            local_macro = {'eps0': eps0}
            local_coors = pb.domain.mesh.coors * eps0 + c0
            # Inside recovery region?
            v = nm.ones((eval_var.field.region.entities[0].shape[0], 1))
            v[region.entities[0]] = 0
            no = nm.sum(v)
            aux = eval_var.field.evaluate_at(local_coors, v)
            if (nm.sum(aux) / no) > 1e-3:
                continue

            output('ii: %d' % ii)

            for k, v in six.iteritems(nodal_values):
                local_macro[k] = eval_var.field.evaluate_at(local_coors, v)
            for k, v in six.iteritems(const_values):
                local_macro[k] = v

            outs.append(recovery_hook(pb, corrs, local_macro))
            coors.append(local_coors)
            conn.append(mesh.get_conn(mesh.descs[0]) + ndoffset)
            ndoffset += mesh.n_nod

    # Collect output variables
    outvars = {}
    for k, v in six.iteritems(outs[0]):
        if v.var_name in outvars:
            outvars[v.var_name].append(k)
        else:
            outvars[v.var_name] = [k]

    # Split output by variables/regions
    pvs = pb.create_variables(outvars.keys())
    outregs = {k: pvs[k].field.region.get_entities(-1) for k in outvars.keys()}
    nrve = len(coors)
    coors = nm.vstack(coors)
    ngroups = nm.tile(mesh.cmesh.vertex_groups.squeeze(), (nrve,))
    conn = nm.vstack(conn)
    cgroups = nm.tile(mesh.cmesh.cell_groups.squeeze(), (nrve,))

    # Get region mesh and data
    for k, cidxs in six.iteritems(outregs):
        gcidxs = nm.hstack([cidxs + mesh.n_el * ii for ii in range(nrve)])
        rconn = conn[gcidxs]
        remap = -nm.ones((coors.shape[0],), dtype=nm.int32)
        remap[rconn] = 1
        vidxs = nm.where(remap > 0)[0]
        remap[vidxs] = nm.arange(len(vidxs))
        rconn = remap[rconn]
        rcoors = coors[vidxs, :]

        out = {}
        for ifield in outvars[k]:
            data = [outs[ii][ifield].data for ii in range(nrve)]
            out[ifield] = Struct(name='output_data',
                                 mode=outs[0][ifield].mode,
                                 dofs=None,
                                 var_name=k,
                                 data=nm.vstack(data))

        micro_name = pb.get_output_name(extra='recovered%s_%s'
                                        % (recovery_file_tag, k))
        filename = op.join(output_dir, op.basename(micro_name))
        mesh_out = Mesh.from_data('recovery_%s' % k, rcoors, ngroups[vidxs],
                                  [rconn], [cgroups[gcidxs]], [mesh.descs[0]])
        mesh_out.write(filename, io='auto', out=out)
Example #30
0
File: probe.py Project: Gkdnz/sfepy
def generate_probes(filename_input, filename_results, options,
                    conf=None, problem=None, probes=None, labels=None,
                    probe_hooks=None):
    """
    Generate probe figures and data files.
    """
    if conf is None:
        required, other = get_standard_keywords()
        conf = ProblemConf.from_file(filename_input, required, other)

    opts = conf.options

    if options.auto_dir:
        output_dir = opts.get_('output_dir', '.')
        filename_results = os.path.join(output_dir, filename_results)

    output('results in: %s' % filename_results)

    io = MeshIO.any_from_filename(filename_results)
    step = options.step if options.step >= 0 else io.read_last_step()
    all_data = io.read_data(step)
    output('loaded:', all_data.keys())
    output('from step:', step)

    if options.only_names is None:
        data = all_data
    else:
        data = {}
        for key, val in all_data.iteritems():
            if key in options.only_names:
                data[key] = val

    if problem is None:
        problem = Problem.from_conf(conf,
                                    init_equations=False, init_solvers=False)

    if probes is None:
        gen_probes = conf.get_function(conf.options.gen_probes)
        probes, labels = gen_probes(problem)

    if probe_hooks is None:
        probe_hooks = {None : conf.get_function(conf.options.probe_hook)}

    if options.output_filename_trunk is None:
            options.output_filename_trunk = problem.ofn_trunk

    filename_template = options.output_filename_trunk \
                        + ('_%%d.%s' % options.output_format)
    if options.same_dir:
        filename_template = os.path.join(os.path.dirname(filename_results),
                                         filename_template)

    output_dir = os.path.dirname(filename_results)

    for ip, probe in enumerate(probes):
        output(ip, probe.name)

        probe.set_options(close_limit=options.close_limit)

        for key, probe_hook in probe_hooks.iteritems():

            out = probe_hook(data, probe, labels[ip], problem)
            if out is None: continue
            if isinstance(out, tuple):
                fig, results = out
            else:
                fig = out

            if key is not None:
                filename = filename_template % (key, ip)

            else:
                filename = filename_template % ip

            if fig is not None:
                if isinstance(fig, dict):
                    for fig_name, fig_fig in fig.iteritems():
                        fig_filename = edit_filename(filename,
                                                     suffix='_' + fig_name)
                        fig_fig.savefig(fig_filename)
                        output('figure ->', os.path.normpath(fig_filename))

                else:
                    fig.savefig(filename)
                    output('figure ->', os.path.normpath(filename))

            if results is not None:
                txt_filename = edit_filename(filename, new_ext='.txt')

                write_results(txt_filename, probe, results)

                output('data ->', os.path.normpath(txt_filename))
Example #31
0
def main():
    import os
    from sfepy.base.base import spause, output
    from sfepy.base.conf import ProblemConf, get_standard_keywords
    from sfepy.discrete import Problem
    import sfepy.homogenization.coefs_base as cb

    parser = ArgumentParser(description=__doc__)
    parser.add_argument('--version', action='version', version='%(prog)s')
    parser.add_argument('-n', '--no-pauses',
                        action="store_true", dest='no_pauses',
                        default=False, help=helps['no_pauses'])
    options = parser.parse_args()

    if options.no_pauses:
        def spause(*args):
            output(*args)

    nm.set_printoptions(precision=3)

    spause(r""">>>
First, this file will be read in place of an input
(problem description) file.
Press 'q' to quit the example, press any other key to continue...""")
    required, other = get_standard_keywords()
    required.remove('equations')
    # Use this file as the input file.
    conf = ProblemConf.from_file(__file__, required, other)
    print(list(conf.to_dict().keys()))
    spause(r""">>>
...the read input as a dict (keys only for brevity).
['q'/other key to quit/continue...]""")

    spause(r""">>>
Now the input will be used to create a Problem instance.
['q'/other key to quit/continue...]""")
    problem = Problem.from_conf(conf, init_equations=False)
    # The homogenization mini-apps need the output_dir.
    output_dir = ''
    problem.output_dir = output_dir
    print(problem)
    spause(r""">>>
...the Problem instance.
['q'/other key to quit/continue...]""")

    spause(r""">>>
The homogenized elastic coefficient $E_{ijkl}$ is expressed
using $\Pi$ operators, computed now. In fact, those operators are permuted
coordinates of the mesh nodes.
['q'/other key to quit/continue...]""")
    req = conf.requirements['pis']
    mini_app = cb.ShapeDimDim('pis', problem, req)
    pis = mini_app()
    print(pis)
    spause(r""">>>
...the $\Pi$ operators.
['q'/other key to quit/continue...]""")

    spause(r""">>>
Next, $E_{ijkl}$ needs so called steady state correctors $\bar{\omega}^{rs}$,
computed now.
['q'/other key to quit/continue...]""")
    req = conf.requirements['corrs_rs']

    save_name = req.get('save_name', '')
    name = os.path.join(output_dir, save_name)

    mini_app = cb.CorrDimDim('steady rs correctors', problem, req)
    mini_app.setup_output(save_format='vtk',
                          file_per_var=False)
    corrs_rs = mini_app(data={'pis': pis})
    print(corrs_rs)
    spause(r""">>>
...the $\bar{\omega}^{rs}$ correctors.
The results are saved in: %s.%s

Try to display them with:

   python postproc.py %s.%s

['q'/other key to quit/continue...]""" % (2 * (name, problem.output_format)))

    spause(r""">>>
Then the volume of the domain is needed.
['q'/other key to quit/continue...]""")
    volume = problem.evaluate('d_volume.i3.Y(uc)')
    print(volume)

    spause(r""">>>
...the volume.
['q'/other key to quit/continue...]""")

    spause(r""">>>
Finally, $E_{ijkl}$ can be computed.
['q'/other key to quit/continue...]""")
    mini_app = cb.CoefSymSym('homogenized elastic tensor',
                             problem, conf.coefs['E'])
    c_e = mini_app(volume, data={'pis': pis, 'corrs_rs' : corrs_rs})
    print(r""">>>
The homogenized elastic coefficient $E_{ijkl}$, symmetric storage
with rows, columns in 11, 22, 12 ordering:""")
    print(c_e)
Example #32
0
    def init_subproblems(self, conf, **kwargs):
        from sfepy.discrete.state import State
        from sfepy.discrete import Problem
        from sfepy.base.conf import ProblemConf, get_standard_keywords
        from scipy.spatial import cKDTree as KDTree

        # init subproblems
        problem = self.context
        pb_vars = problem.get_variables()
        # get "master" DofInfo and last index
        pb_adi_indx = problem.equations.variables.adi.indx
        self.adi_indx = pb_adi_indx.copy()
        last_indx = -1
        for ii in six.itervalues(self.adi_indx):
            last_indx = nm.max([last_indx, ii.stop])

        # coupling variables
        self.cvars_to_pb = {}
        for jj in conf.coupling_variables:
            self.cvars_to_pb[jj] = [None, None]
            if jj in pb_vars.names:
                if pb_vars[jj].dual_var_name is not None:
                    self.cvars_to_pb[jj][0] = -1

                else:
                    self.cvars_to_pb[jj][1] = -1

        # init subproblems
        self.subpb = []
        required, other = get_standard_keywords()
        master_prefix = output.get_output_prefix()
        for ii, ifname in enumerate(conf.others):
            sub_prefix = master_prefix[:-1] + '-sub%d:' % (ii + 1)
            output.set_output_prefix(sub_prefix)
            kwargs['master_problem'] = problem
            confi = ProblemConf.from_file(ifname,
                                          required,
                                          other,
                                          define_args=kwargs)
            pbi = Problem.from_conf(confi, init_equations=True)
            sti = State(pbi.equations.variables)
            pbi.equations.set_data(None, ignore_unknown=True)
            pbi.time_update()
            pbi.update_materials()
            sti.apply_ebc()
            pbi_vars = pbi.get_variables()
            output.set_output_prefix(master_prefix)
            self.subpb.append([pbi, sti, None])

            # append "slave" DofInfo
            for jj in pbi_vars.names:
                if not (pbi_vars[jj].is_state()):
                    continue

                didx = pbi.equations.variables.adi.indx[jj]
                ndof = didx.stop - didx.start
                if jj in self.adi_indx:
                    if ndof != \
                      (self.adi_indx[jj].stop - self.adi_indx[jj].start):
                        raise ValueError('DOFs do not match!')

                else:
                    self.adi_indx.update(
                        {jj: slice(last_indx, last_indx + ndof, None)})
                    last_indx += ndof

            for jj in conf.coupling_variables:
                if jj in pbi_vars.names:
                    if pbi_vars[jj].dual_var_name is not None:
                        self.cvars_to_pb[jj][0] = ii

                    else:
                        self.cvars_to_pb[jj][1] = ii

        self.subpb.append([problem, None, None])

        self.cvars_to_pb_map = {}
        for varname, pbs in six.iteritems(self.cvars_to_pb):
            # match field nodes
            coors = []
            for ii in pbs:
                pbi = self.subpb[ii][0]
                pbi_vars = pbi.get_variables()
                fcoors = pbi_vars[varname].field.coors
                dc = nm.abs(nm.max(fcoors, axis=0)\
                            - nm.min(fcoors, axis=0))
                ax = nm.where(dc > 1e-9)[0]
                coors.append(fcoors[:, ax])

            if len(coors[0]) != len(coors[1]):
                raise ValueError('number of nodes does not match!')

            kdtree = KDTree(coors[0])
            map_12 = kdtree.query(coors[1])[1]

            pbi1 = self.subpb[pbs[0]][0]
            pbi1_vars = pbi1.get_variables()
            eq_map_1 = pbi1_vars[varname].eq_map

            pbi2 = self.subpb[pbs[1]][0]
            pbi2_vars = pbi2.get_variables()
            eq_map_2 = pbi2_vars[varname].eq_map

            dpn = eq_map_2.dpn
            nnd = map_12.shape[0]

            map_12_nd = nm.zeros((nnd * dpn, ), dtype=nm.int32)
            if dpn > 1:
                for ii in range(dpn):
                    map_12_nd[ii::dpn] = map_12 * dpn + ii
            else:
                map_12_nd = map_12

            idx = nm.where(eq_map_2.eq >= 0)[0]
            self.cvars_to_pb_map[varname] = eq_map_1.eq[map_12[idx]]
Example #33
0
def recover_micro_hook(micro_filename, region, macro,
                       naming_scheme='step_iel',
                       recovery_file_tag='',
                       define_args=None, verbose=False):
    # Create a micro-problem instance.
    required, other = get_standard_keywords()
    required.remove('equations')
    conf = ProblemConf.from_file(micro_filename, required, other,
                                 verbose=False, define_args=define_args)

    coefs_filename = conf.options.get('coefs_filename', 'coefs')
    output_dir = conf.options.get('output_dir', '.')
    coefs_filename = op.join(output_dir, coefs_filename) + '.h5'

    # Coefficients and correctors
    coefs = Coefficients.from_file_hdf5(coefs_filename)
    corrs = get_correctors_from_file(dump_names=coefs.dump_names)

    recovery_hook = conf.options.get('recovery_hook', None)

    if recovery_hook is not None:
        recovery_hook = conf.get_function(recovery_hook)
        pb = Problem.from_conf(conf, init_equations=False, init_solvers=False)

        format = get_print_info(pb.domain.mesh.n_el, fill='0')[1]

        output('recovering microsctructures...')
        tt = time.clock()
        output_fun = output.output_function
        output_level = output.level
        for ii, iel in enumerate(region.cells):
            output.level = output_level
            output('micro: %d (el=%d)' % (ii, iel))

            local_macro = {}
            for k, v in six.iteritems(macro):
                local_macro[k] = v[ii, 0]

            output.set_output(quiet=not(verbose))
            out = recovery_hook(pb, corrs, local_macro)
            output.output_function = output_fun

            if ii == 0:
                new_keys = []
                new_data = {}
                new_idxs = []
                for k in six.iterkeys(local_macro):
                    if k not in macro:
                        new_keys.append(k)
                        new_data[k] = []

            new_idxs.append(ii)
            for jj in new_keys:
                new_data[jj].append(local_macro[jj])

            # save data
            if out is not None:
                suffix = format % iel
                micro_name = pb.get_output_name(extra='recovered_'
                                                + recovery_file_tag + suffix)
                filename = op.join(output_dir, op.basename(micro_name))
                fpv = pb.conf.options.get('file_per_var', False)
                pb.save_state(filename, out=out,
                              file_per_var=fpv)

        output('...done in %.2f s' % (time.clock() - tt))

        for jj in new_keys:
            lout = new_data[jj]
            macro[jj] = nm.zeros((nm.max(new_idxs) + 1, 1) + lout[0].shape,
                                 dtype=lout[0].dtype)
            out = macro[jj]
            for kk, ii in enumerate(new_idxs):
                out[ii, 0] = lout[kk]
Example #34
0
def generate_probes(filename_input,
                    filename_results,
                    options,
                    conf=None,
                    problem=None,
                    probes=None,
                    labels=None,
                    probe_hooks=None):
    """
    Generate probe figures and data files.
    """
    if conf is None:
        required, other = get_standard_keywords()
        conf = ProblemConf.from_file(filename_input, required, other)

    opts = conf.options

    if options.auto_dir:
        output_dir = opts.get_('output_dir', '.')
        filename_results = os.path.join(output_dir, filename_results)

    output('results in: %s' % filename_results)

    io = MeshIO.any_from_filename(filename_results)
    step = options.step if options.step >= 0 else io.read_last_step()
    all_data = io.read_data(step)
    output('loaded:', list(all_data.keys()))
    output('from step:', step)

    if options.only_names is None:
        data = all_data
    else:
        data = {}
        for key, val in six.iteritems(all_data):
            if key in options.only_names:
                data[key] = val

    if problem is None:
        problem = Problem.from_conf(conf,
                                    init_equations=False,
                                    init_solvers=False)

    if probes is None:
        gen_probes = conf.get_function(conf.options.gen_probes)
        probes, labels = gen_probes(problem)

    if probe_hooks is None:
        probe_hooks = {None: conf.get_function(conf.options.probe_hook)}

    if options.output_filename_trunk is None:
        options.output_filename_trunk = problem.ofn_trunk

    filename_template = options.output_filename_trunk \
                        + ('_%%d.%s' % options.output_format)
    if options.same_dir:
        filename_template = os.path.join(os.path.dirname(filename_results),
                                         filename_template)

    output_dir = os.path.dirname(filename_results)

    for ip, probe in enumerate(probes):
        output(ip, probe.name)

        probe.set_options(close_limit=options.close_limit)

        for key, probe_hook in six.iteritems(probe_hooks):

            out = probe_hook(data, probe, labels[ip], problem)
            if out is None: continue
            if isinstance(out, tuple):
                fig, results = out
            else:
                fig = out

            if key is not None:
                filename = filename_template % (key, ip)

            else:
                filename = filename_template % ip

            if fig is not None:
                if isinstance(fig, dict):
                    for fig_name, fig_fig in six.iteritems(fig):
                        fig_filename = edit_filename(filename,
                                                     suffix='_' + fig_name)
                        fig_fig.savefig(fig_filename)
                        output('figure ->', os.path.normpath(fig_filename))

                else:
                    fig.savefig(filename)
                    output('figure ->', os.path.normpath(filename))

            if results is not None:
                txt_filename = edit_filename(filename, new_ext='.txt')

                write_results(txt_filename, probe, results)

                output('data ->', os.path.normpath(txt_filename))
Example #35
0
def recover_micro_hook_eps(micro_filename, region,
                           eval_var, nodal_values, const_values, eps0,
                           recovery_file_tag='',
                           define_args=None, verbose=False):
    # Create a micro-problem instance.
    required, other = get_standard_keywords()
    required.remove('equations')
    conf = ProblemConf.from_file(micro_filename, required, other,
                                 verbose=False, define_args=define_args)

    coefs_filename = conf.options.get('coefs_filename', 'coefs')
    output_dir = conf.options.get('output_dir', '.')
    coefs_filename = op.join(output_dir, coefs_filename) + '.h5'

    # Coefficients and correctors
    coefs = Coefficients.from_file_hdf5(coefs_filename)
    corrs = get_correctors_from_file(dump_names=coefs.dump_names)

    recovery_hook = conf.options.get('recovery_hook', None)

    if recovery_hook is not None:
        recovery_hook = conf.get_function(recovery_hook)
        pb = Problem.from_conf(conf, init_equations=False, init_solvers=False)

        # Get tiling of a given region
        rcoors = region.domain.mesh.coors[region.get_entities(0), :]
        rcmin = nm.min(rcoors, axis=0)
        rcmax = nm.max(rcoors, axis=0)
        nn = nm.round((rcmax - rcmin) / eps0)
        if nm.prod(nn) == 0:
            output('inconsistency in recovery region and microstructure size!')
            return

        cs = []
        for ii, n in enumerate(nn):
            cs.append(nm.arange(n) * eps0 + rcmin[ii])

        x0 = nm.empty((int(nm.prod(nn)), nn.shape[0]), dtype=nm.float64)
        for ii, icoor in enumerate(nm.meshgrid(*cs, indexing='ij')):
            x0[:, ii] = icoor.flatten()

        mesh = pb.domain.mesh
        coors, conn, outs, ndoffset = [], [], [], 0
        # Recover region
        mic_coors = (mesh.coors - mesh.get_bounding_box()[0, :]) * eps0
        evfield = eval_var.field

        output('recovering microsctructures...')
        tt = time.clock()
        output_fun = output.output_function
        output_level = output.level

        for ii, c0 in enumerate(x0):
            local_macro = {'eps0': eps0}
            local_coors = mic_coors + c0
            # Inside recovery region?
            v = nm.ones((evfield.region.entities[0].shape[0], 1))
            v[evfield.vertex_remap[region.entities[0]]] = 0
            no = nm.sum(v)
            aux = evfield.evaluate_at(local_coors, v)
            if (nm.sum(aux) / no) > 1e-3:
                continue

            output.level = output_level
            output('micro: %d' % ii)

            for k, v in six.iteritems(nodal_values):
                local_macro[k] = evfield.evaluate_at(local_coors, v)
            for k, v in six.iteritems(const_values):
                local_macro[k] = v

            output.set_output(quiet=not(verbose))
            outs.append(recovery_hook(pb, corrs, local_macro))
            output.output_function = output_fun
            coors.append(local_coors)
            conn.append(mesh.get_conn(mesh.descs[0]) + ndoffset)
            ndoffset += mesh.n_nod

    output('...done in %.2f s' % (time.clock() - tt))

    # Collect output variables
    outvars = {}
    for k, v in six.iteritems(outs[0]):
        if v.var_name in outvars:
            outvars[v.var_name].append(k)
        else:
            outvars[v.var_name] = [k]

    # Split output by variables/regions
    pvs = pb.create_variables(outvars.keys())
    outregs = {k: pvs[k].field.region.get_entities(-1) for k in outvars.keys()}
    nrve = len(coors)
    coors = nm.vstack(coors)
    ngroups = nm.tile(mesh.cmesh.vertex_groups.squeeze(), (nrve,))
    conn = nm.vstack(conn)
    cgroups = nm.tile(mesh.cmesh.cell_groups.squeeze(), (nrve,))

    # Get region mesh and data
    for k, cidxs in six.iteritems(outregs):
        gcidxs = nm.hstack([cidxs + mesh.n_el * ii for ii in range(nrve)])
        rconn = conn[gcidxs]
        remap = -nm.ones((coors.shape[0],), dtype=nm.int32)
        remap[rconn] = 1
        vidxs = nm.where(remap > 0)[0]
        remap[vidxs] = nm.arange(len(vidxs))
        rconn = remap[rconn]
        rcoors = coors[vidxs, :]

        out = {}
        for ifield in outvars[k]:
            data = [outs[ii][ifield].data for ii in range(nrve)]
            out[ifield] = Struct(name='output_data',
                                 mode=outs[0][ifield].mode,
                                 dofs=None,
                                 var_name=k,
                                 data=nm.vstack(data))

        micro_name = pb.get_output_name(extra='recovered%s_%s'
                                        % (recovery_file_tag, k))
        filename = op.join(output_dir, op.basename(micro_name))
        mesh_out = Mesh.from_data('recovery_%s' % k, rcoors, ngroups[vidxs],
                                  [rconn], [cgroups[gcidxs]], [mesh.descs[0]])
        mesh_out.write(filename, io='auto', out=out)
Example #36
0
File: ls.py Project: LeiDai/sfepy
    def __init__(self, conf, problem, **kwargs):
        from sfepy.discrete.state import State
        from sfepy.discrete import Problem
        from sfepy.base.conf import ProblemConf, get_standard_keywords
        from scipy.spatial import cKDTree as KDTree

        ScipyDirect.__init__(self, conf, **kwargs)

        # init subproblems
        pb_vars = problem.get_variables()
        # get "master" DofInfo and last index
        pb_adi_indx = problem.equations.variables.adi.indx
        self.adi_indx = pb_adi_indx.copy()
        last_indx = -1
        for ii in self.adi_indx.itervalues():
            last_indx = nm.max([last_indx, ii.stop])

        # coupling variables
        self.cvars_to_pb = {}
        for jj in conf.coupling_variables:
            self.cvars_to_pb[jj] = [None, None]
            if jj in pb_vars.names:
                if pb_vars[jj].dual_var_name is not None:
                    self.cvars_to_pb[jj][0] = -1

                else:
                    self.cvars_to_pb[jj][1] = -1

        # init subproblems
        self.subpb = []
        required, other = get_standard_keywords()
        master_prefix = output.get_output_prefix()
        for ii, ifname in enumerate(conf.others):
            sub_prefix = master_prefix[:-1] + '-sub%d:' % (ii + 1)
            output.set_output_prefix(sub_prefix)
            kwargs['master_problem'] = problem
            confi = ProblemConf.from_file(ifname, required, other,
                                          define_args=kwargs)
            pbi = Problem.from_conf(confi, init_equations=True)
            sti = State(pbi.equations.variables)
            pbi.equations.set_data(None, ignore_unknown=True)
            pbi.time_update()
            pbi.update_materials()
            sti.apply_ebc()
            pbi_vars = pbi.get_variables()
            output.set_output_prefix(master_prefix)
            self.subpb.append([pbi, sti, None])

            # append "slave" DofInfo
            for jj in pbi_vars.names:
                if not(pbi_vars[jj].is_state()):
                    continue

                didx = pbi.equations.variables.adi.indx[jj]
                ndof = didx.stop - didx.start
                if jj in self.adi_indx:
                    if ndof != \
                      (self.adi_indx[jj].stop - self.adi_indx[jj].start):
                        raise ValueError('DOFs do not match!')

                else:
                    self.adi_indx.update({
                        jj: slice(last_indx, last_indx + ndof, None)})
                    last_indx += ndof

            for jj in conf.coupling_variables:
                if jj in pbi_vars.names:
                    if pbi_vars[jj].dual_var_name is not None:
                        self.cvars_to_pb[jj][0] = ii

                    else:
                        self.cvars_to_pb[jj][1] = ii

        self.subpb.append([problem, None, None])

        self.cvars_to_pb_map = {}
        for varname, pbs in self.cvars_to_pb.iteritems():
            # match field nodes
            coors = []
            for ii in pbs:
                pbi = self.subpb[ii][0]
                pbi_vars = pbi.get_variables()
                fcoors = pbi_vars[varname].field.coors
                dc = nm.abs(nm.max(fcoors, axis=0)\
                            - nm.min(fcoors, axis=0))
                ax = nm.where(dc > 1e-9)[0]
                coors.append(fcoors[:,ax])

            if len(coors[0]) != len(coors[1]):
                raise ValueError('number of nodes does not match!')

            kdtree = KDTree(coors[0])
            map_12 = kdtree.query(coors[1])[1]

            pbi1 = self.subpb[pbs[0]][0]
            pbi1_vars = pbi1.get_variables()
            eq_map_1 = pbi1_vars[varname].eq_map

            pbi2 = self.subpb[pbs[1]][0]
            pbi2_vars = pbi2.get_variables()
            eq_map_2 = pbi2_vars[varname].eq_map

            dpn = eq_map_2.dpn
            nnd = map_12.shape[0]

            map_12_nd = nm.zeros((nnd * dpn,), dtype=nm.int32)
            if dpn > 1:
                for ii in range(dpn):
                    map_12_nd[ii::dpn] = map_12 * dpn + ii
            else:
                map_12_nd = map_12

            idx = nm.where(eq_map_2.eq >= 0)[0]
            self.cvars_to_pb_map[varname] = eq_map_1.eq[map_12[idx]]
Example #37
0
def main():
    import os
    from sfepy.base.base import spause, output
    from sfepy.base.conf import ProblemConf, get_standard_keywords
    from sfepy.discrete import Problem
    import sfepy.homogenization.coefs_base as cb

    parser = OptionParser(usage=usage, version='%prog')
    parser.add_option('-n', '--no-pauses',
                      action="store_true", dest='no_pauses',
                      default=False, help=help['no_pauses'])
    options, args = parser.parse_args()

    if options.no_pauses:
        def spause(*args):
            output(*args)

    nm.set_printoptions(precision=3)

    spause(r""">>>
First, this file will be read in place of an input
(problem description) file.
Press 'q' to quit the example, press any other key to continue...""")
    required, other = get_standard_keywords()
    required.remove('equations')
    # Use this file as the input file.
    conf = ProblemConf.from_file(__file__, required, other)
    print(list(conf.to_dict().keys()))
    spause(r""">>>
...the read input as a dict (keys only for brevity).
['q'/other key to quit/continue...]""")

    spause(r""">>>
Now the input will be used to create a Problem instance.
['q'/other key to quit/continue...]""")
    problem = Problem.from_conf(conf, init_equations=False)
    # The homogenization mini-apps need the output_dir.
    output_dir = ''
    problem.output_dir = output_dir
    print(problem)
    spause(r""">>>
...the Problem instance.
['q'/other key to quit/continue...]""")

    spause(r""">>>
The homogenized elastic coefficient $E_{ijkl}$ is expressed
using $\Pi$ operators, computed now. In fact, those operators are permuted
coordinates of the mesh nodes.
['q'/other key to quit/continue...]""")
    req = conf.requirements['pis']
    mini_app = cb.ShapeDimDim('pis', problem, req)
    pis = mini_app()
    print(pis)
    spause(r""">>>
...the $\Pi$ operators.
['q'/other key to quit/continue...]""")

    spause(r""">>>
Next, $E_{ijkl}$ needs so called steady state correctors $\bar{\omega}^{rs}$,
computed now.
['q'/other key to quit/continue...]""")
    req = conf.requirements['corrs_rs']

    save_name = req.get('save_name', '')
    name = os.path.join(output_dir, save_name)

    mini_app = cb.CorrDimDim('steady rs correctors', problem, req)
    mini_app.setup_output(save_format='vtk',
                          file_per_var=False)
    corrs_rs = mini_app(data={'pis': pis})
    print(corrs_rs)
    spause(r""">>>
...the $\bar{\omega}^{rs}$ correctors.
The results are saved in: %s.%s

Try to display them with:

   python postproc.py %s.%s

['q'/other key to quit/continue...]""" % (2 * (name, problem.output_format)))

    spause(r""">>>
Then the volume of the domain is needed.
['q'/other key to quit/continue...]""")
    volume = problem.evaluate('d_volume.i3.Y(uc)')
    print(volume)

    spause(r""">>>
...the volume.
['q'/other key to quit/continue...]""")

    spause(r""">>>
Finally, $E_{ijkl}$ can be computed.
['q'/other key to quit/continue...]""")
    mini_app = cb.CoefSymSym('homogenized elastic tensor',
                             problem, conf.coefs['E'])
    c_e = mini_app(volume, data={'pis': pis, 'corrs_rs' : corrs_rs})
    print(r""">>>
The homogenized elastic coefficient $E_{ijkl}$, symmetric storage
with rows, columns in 11, 22, 12 ordering:""")
    print(c_e)