Exemple #1
0
    def evaluate(self, annotate=True):
        """ Computes the functional value by running the forward model. """

        log(INFO, 'Start evaluation of j')
        timer = Timer("j evaluation")

        farm = self.solver.problem.parameters.tidal_farm

        # Configure dolfin-adjoint
        adj_reset()
        parameters["adjoint"]["record_all"] = True

        # Solve the shallow water system and integrate the functional of
        # interest.
        final_only = (not self.solver.problem._is_transient or
                      self._problem_params.functional_final_time_only)
        self.time_integrator = TimeIntegrator(self.solver.problem,
                                              self._functional, final_only)

        for sol in self.solver.solve(annotate=annotate):
            self.time_integrator.add(sol["time"], sol["state"], sol["tf"],
                                     sol["is_final"])

        j = self.time_integrator.integrate()

        timer.stop()

        log(INFO, 'Runtime: %f s.' % timer.elapsed()[0])
        log(INFO, 'j = %e.' % float(j))

        return j
    def evaluate(self, annotate=True):
        """ Computes the functional value by running the forward model. """

        log(INFO, 'Start evaluation of j')
        timer = Timer("j evaluation")

        farm = self.solver.problem.parameters.tidal_farm

        # Configure dolfin-adjoint
        adj_reset()
        parameters["adjoint"]["record_all"] = True

        # Solve the shallow water system and integrate the functional of
        # interest.
        final_only = (not self.solver.problem._is_transient
                      or self._problem_params.functional_final_time_only)
        self.time_integrator = TimeIntegrator(self.solver.problem,
                                              self._functional, final_only)

        for sol in self.solver.solve(annotate=annotate):
            self.time_integrator.add(sol["time"], sol["state"], sol["tf"],
                                     sol["is_final"])

        j = self.time_integrator.integrate()

        timer.stop()

        log(INFO, 'Runtime: %f s.' % timer.elapsed()[0])
        log(INFO, 'j = %e.' % float(j))

        return j
Exemple #3
0
    def __init__(self, grid, fields):
        timer = Timer('setup'); timer.start()
        mesh = fields[0].function_space().mesh()
        # Same meshes for all
        assert all(mesh.id() == f.function_space().mesh().id() for f in fields)

        # Locate each point
        limit = mesh.num_entities_global(mesh.topology().dim())
        bbox_tree = mesh.bounding_box_tree()

        npoints = np.prod(grid.ns)
        cells_for_x = [None]*npoints
        for i, x in enumerate(grid.points()):
            cell = bbox_tree.compute_first_entity_collision(Point(*x))
            if -1 < cell < limit:
                cells_for_x[i] = Cell(mesh, cell)
        assert not any(c is None for c in cells_for_x)

        # For each field I want to build a function which which evals
        # it at all the points
        self.data = {}
        self.eval_fields = []
        for u in fields:
            # Alloc
            key = u.name()
            self.data[key] = [np.zeros(npoints) for _ in range(u.value_size())]
            # Attach the eval for u
            self.eval_fields.append(eval_u(u, grid.points(), cells_for_x, self.data[key]))
        info('Probe setup took %g' % timer.stop())
        self.grid = grid
        # Get values at construction
        self.update()
Exemple #4
0
def python_timings(mesh, Nrange, read_matrix=False):
    '''Run across meshes solving lumped EVP and recording their sizes and exec time.'''
    # We know from julia that this idea(lumping) works. 
    # So we are only after timing
    data = []

    if read_matrix:
        matrices = read_matrices(mesh, root='./jl_matrices')
    else:
        matrices = (get_1d_matrices(mesh, N) for N in Nrange)

    for A, M in matrices:
        row = [A.shape[0]]

        M0 = lump(M, -0.5)                             #
        A0 = M0.dot(A.dot(M0))                           #
        d, u = np.diagonal(A0, 0), np.diagonal(A0, 1)   # 

        t = Timer('EVP')
        eigw, eigv = s3d_eig(d, u)
        row.append(t.stop())

        # Assembling the preconditioner
        t = Timer('ASSEMBLE')
        H = eigv.dot(np.diag(eigw**-0.5).dot(eigv.T))
        row.append(t.stop())

        # Action of preconditioner(matrix)
        x = np.random.rand(H.shape[1])
        t = Timer('ACTION')
        y = H*x
        row.append(t.stop())

        # The original GEVP
        t = Timer('GEVP')
        eigw, eigv = eigh(A, M)
        row.append(t.stop())

        print row
        data.append(row)

    return data
Exemple #5
0
def python_timings(mesh, Nrange, read_matrix=False):
    '''Run across meshes solving lumped EVP and recording their sizes and exec time.'''
    # We know from julia that this idea(lumping) works.
    # So we are only after timing
    data = []

    if read_matrix:
        matrices = read_matrices(mesh, root='./jl_matrices')
    else:
        matrices = (get_1d_matrices(mesh, N) for N in Nrange)

    for A, M in matrices:
        row = [A.shape[0]]

        M0 = lump(M, -0.5)  #
        A0 = M0.dot(A.dot(M0))  #
        d, u = np.diagonal(A0, 0), np.diagonal(A0, 1)  #

        t = Timer('EVP')
        eigw, eigv = s3d_eig(d, u)
        row.append(t.stop())

        # Assembling the preconditioner
        t = Timer('ASSEMBLE')
        H = eigv.dot(np.diag(eigw**-0.5).dot(eigv.T))
        row.append(t.stop())

        # Action of preconditioner(matrix)
        x = np.random.rand(H.shape[1])
        t = Timer('ACTION')
        y = H * x
        row.append(t.stop())

        # The original GEVP
        t = Timer('GEVP')
        eigw, eigv = eigh(A, M)
        row.append(t.stop())

        print row
        data.append(row)

    return data
Exemple #6
0
    def derivative(self, forget=False, **kwargs):
        """ Computes the first derivative of the functional with respect to its
        controls by solving the adjoint equations. """

        log(INFO, 'Start evaluation of dj')
        timer = Timer("dj evaluation")

        self.functional = self.time_integrator.dolfin_adjoint_functional(self.solver.state)
        dj = compute_gradient(self.functional, self.controls, forget=forget, **kwargs)
        parameters["adjoint"]["stop_annotating"] = False

        log(INFO, "Runtime: " + str(timer.stop()) + " s")

        return enlisting.enlist(dj)
    def derivative(self, forget=False, **kwargs):
        """ Computes the first derivative of the functional with respect to its
        controls by solving the adjoint equations. """

        log(INFO, 'Start evaluation of dj')
        timer = Timer("dj evaluation")

        if not hasattr(self, "time_integrator"):
            self.evaluate()
        self.functional = self.time_integrator.dolfin_adjoint_functional(
            self.solver.state)
        dj = compute_gradient(self.functional,
                              self.controls,
                              forget=forget,
                              **kwargs)
        parameters["adjoint"]["stop_annotating"] = False

        log(INFO, "Runtime: " + str(timer.stop()) + " s")

        return enlisting.enlist(dj)
Exemple #8
0
 def update(self):
     '''Evaluate now (with the fields as they are at the moment)'''
     timer = Timer('update'); timer.start()
     status = [f() for f in self.eval_fields]
     info('Probe update took %g' % timer.stop())
     return all(status)
    def solve(self):
        """
        Solve optmal control problem

        """

        msg = "You need to build the problem before solving it"
        assert hasattr(self, "opt_type"), msg

        module, method = self.opt_type.split("_")

        logger.info("\n" + "Starting optimization".center(100, "-"))
        logger.info(
            "Scale: {}, \nDerivative Scale: {}".format(
                self.rd.scale, self.rd.derivative_scale
            )
        )
        logger.info(
            "Tolerace: {}, \nMaximum iterations: {}\n".format(self.tol, self.max_iter)
        )

        t = Timer()
        t.start()

        if self.oneD:

            res = minimize_1d(self.rd, self.x[0], **self.options)
            x = res["x"]

        else:

            if module == "scipy":
                res = scipy_minimize(self.rd, self.x, **self.options)
                x = res["x"]

            elif module == "pyOpt":

                obj, x, d = self.problem(**self.options)

            elif module == "moola":

                sol = self.solver.solve()
                x = sol["control"].data

            elif module == "ipopt":
                x = self.solver.solve(self.x)

            else:
                msg = (
                    "Unknown optimizatin type {}. "
                    "Define the optimization type as 'module-method', "
                    "where module is e.g scipy, pyOpt and methos is "
                    "eg slsqp."
                )
                raise ValueError(msg)

        run_time = t.stop()

        opt_result = {}

        opt_result["x"] = x
        opt_result["nfev"] = self.rd.iter
        opt_result["nit"] = self.rd.iter
        opt_result["njev"] = self.rd.nr_der_calls
        opt_result["ncrash"] = self.rd.nr_crashes
        opt_result["run_time"] = run_time
        opt_result["controls"] = self.rd.controls_lst
        opt_result["func_vals"] = self.rd.func_values_lst
        opt_result["forward_times"] = self.rd.forward_times
        opt_result["backward_times"] = self.rd.backward_times
        opt_result["grad_norm"] = self.rd.grad_norm

        return self.rd, opt_result
Exemple #10
0
    print(dx, 'vs', x.max(axis=0) - x.min(axis=0), xmin, x.min(axis=0))

    data = {}
    cell_dim = tile.topology().dim()
    facet_dim = cell_dim - 1

    if args.facet_tags:
        data = load_data(tile, (h5, args.facet_tags), facet_dim, data)

    if args.cell_tags:
        data = load_data(tile, (h5, args.cell_tags), cell_dim, data)

    t = Timer('tile')
    mesh, mesh_data = TileMesh(tile, shape, mesh_data=data)
    info('\nTiling took %g s; nvertices %d, nfacets %d, ncells %d' %
         (t.stop(), mesh.num_vertices(), mesh.init(2), mesh.num_cells()))

    x_ = mesh.coordinates()
    print('Final mesh size', x_.min(axis=0), x_.max(axis=0) - x_.min(axis=0))

    # Saving
    t = Timer('save')
    h5_file = '%s_%d_%d.h5' % (root, shape[0], shape[1])

    out = HDF5File(mesh.mpi_comm(), h5_file, 'w')
    out.write(mesh, 'mesh')

    tt = Timer('data')
    # To mesh functions
    if mesh_data:
        mfs = mf_from_data(mesh, mesh_data)
Exemple #11
0
    tile = Mesh()
    h5.read(tile, 'mesh', False)

    data = {}
    cell_dim = tile.topology().dim()
    facet_dim = cell_dim - 1

    if args.facet_tags: 
        data = load_data(tile, h5, args.facet_tags, facet_dim, data)
    
    if args.cell_tags: 
        data = load_data(tile, h5, args.cell_tags, cell_dim, data)

    t = Timer('tile')
    mesh, mesh_data = TileMesh(tile, shape, mesh_data=data)
    info('\nTiling took %g s; nvertices %d, ncells %d' % (t.stop(),
                                                          mesh.num_vertices(),
                                                          mesh.num_cells()))

    # Saving
    t = Timer('save')
    h5_file = '%s_%d_%d.h5' % (root, shape[0], shape[1])
        
    out = HDF5File(mesh.mpi_comm(), h5_file, 'w')
    out.write(mesh, 'mesh')
    
    tt = Timer('data')
    # To mesh functions
    if mesh_data:
        mfs = mf_from_data(mesh, mesh_data)
Exemple #12
0
            # Store field
            if step % store_step == 0 or step == 1:
                output_field.write(psi_h, t)

            # Avoid getting accused of cheating, compute
            # L2 error and mass error at half rotation
            if int(np.floor(2 * step - num_steps)) == 0:
                psi0_expression.t = step * float(dt)
                l2_error_half = sqrt(
                    assemble(
                        dot(psi_h - psi0_expression, psi_h - psi0_expression) *
                        dx))
                area_half = assemble(psi_h * dx)
            del t1
        timer.stop()

        # Compute error (we should accurately recover initial condition)
        psi0_expression.t = step * float(dt)
        l2_error = sqrt(
            assemble(
                dot(psi_h - psi0_expression, psi_h - psi0_expression) * dx))

        # The global mass conservation error should be zero
        area_end = assemble(psi_h * dx)

        if comm.Get_rank() == 0:
            print("l2 error " + str(l2_error))

            # Store in error error table
            num_cells_t = mesh.num_entities_global(2)
Exemple #13
0
def TileMesh(tile, shape, mesh_data={}, TOL=1E-9):
    '''
    [tile tile;
     tile tile;
     tile tile;
     tile tile]

    The shape is an ntuple describing the number of pieces put next 
    to each other in the i-th axis. mesh_data : (tdim, tag) -> [entities] 
    is the way to encode mesh data of the tile.
    '''
    # Sanity for glueing
    gdim = tile.geometry().dim()
    assert len(shape) <= gdim, (shape, gdim)
    # While evolve is general mesh writing is limited to simplices only (FIXME)
    # so we bail out early
    assert str(tile.ufl_cell()) in ('interval', 'triangle', 'tetrahedron')

    t = Timer('evolve')
    # Do nothing
    if all(v == 1 for v in shape): return tile, mesh_data

    # We want to evolve cells, vertices of the mesh using geometry information
    # and periodicity info
    x = tile.coordinates()
    min_x = np.min(x, axis=0)
    max_x = np.max(x, axis=0)
    shifts = max_x - min_x
    
    shifts_x = []  # Geometry
    vertex_mappings = []  # Periodicity
    # Compute geometrical shift for EVERY direction:
    for axis in range(len(shape)):
        shift = shifts[axis]
        # Vector to shift cell vertices
        shift_x = np.zeros(gdim); shift_x[axis] = shift
        shifts_x.append(shift_x)

        # Compute periodicity in the vertices
        to_master = lambda x, shift=shift_x: x - shift
        # Mapping facets
        master = CompiledSubDomain('near(x[i], A, tol)', i=axis, A=min_x[axis], tol=TOL)
        slave = CompiledSubDomain('near(x[i], A, tol)', i=axis, A=max_x[axis], tol=TOL)

        error, vertex_mapping = compute_vertex_periodicity(tile, master, slave, to_master)
        # Fail when exended direction is no periodic
        assert error < 10*TOL, error
        
        vertex_mappings.append(vertex_mapping)
    # The final piece of information is cells
    cells = np.empty((tile.num_cells(), tile.ufl_cell().num_vertices()), dtype='uintp')
    cells.ravel()[:] = tile.cells().flat
        
    # Evolve the mesh by tiling along the last direction in shape
    while shape:
        x, cells, vertex_mappings, shape = \
            evolve(x, cells, vertex_mappings, shape, shifts_x, mesh_data=mesh_data)
        
    info('\tEvolve took %g s ' % t.stop())

    # We evolve data but the mesh function is made out of it outside
    mesh = make_mesh(x, cells, tdim=tile.topology().dim(), gdim=gdim)

    return mesh, mesh_data
    def solve(self):
        """
        Solve optmal control problem

        """

        # msg = "You need to build the problem before solving it"
        # assert hasattr(self, "opt_type"), msg

        module = self.parameters["opt_lib"]

        logger.info("Starting optimization")
        # logger.info(
        #     "Scale: {}, \nDerivative Scale: {}".format(
        #         self.J.scale, self.J.derivative_scale
        #     )
        # )
        # logger.info(
        #     "Tolerace: {}, \nMaximum iterations: {}\n".format(self.tol, self.max_iter)
        # )

        t = Timer()
        t.start()

        if self.parameters["nvar"] == 1:

            res = minimize_1d(self.J, self.x[0], **self.options)
            x = res["x"]

        else:

            if module == "scipy":
                res = scipy_minimize(self.J, self.x, **self.options)
                x = res["x"]

            elif module == "pyOpt":
                obj, x, d = self.problem(**self.options)

            elif module == "moola":
                sol = self.solver.solve()
                x = sol["control"].data

            elif module == "ipopt":
                x = self.solver.solve(self.x)

            else:
                msg = (
                    "Unknown optimizatin type {}. "
                    "Define the optimization type as 'module-method', "
                    "where module is e.g scipy, pyOpt and methos is "
                    "eg slsqp."
                )
                raise ValueError(msg)

        run_time = t.stop()
        # opt_result = {}
        # opt_result['initial_contorl'] = self.initial_control
        # opt_result["optimal_control"] = x
        # opt_result["run_time"] = run_time
        # opt_result["nfev"] = self.J.iter
        # opt_result["nit"] = self.J.iter
        # opt_result["njev"] = self.J.nr_der_calls
        # opt_result["ncrash"] = self.J.nr_crashes
        # opt_result["controls"] = self.J.controls_lst
        # opt_result["func_vals"] = self.J.func_values_lst
        # opt_result["forwaJ_times"] = self.J.forwaJ_times
        # opt_result["backwaJ_times"] = self.J.backwaJ_times
        # opt_result["grad_norm"] = self.J.grad_norm
        return optimization_results(
            initial_control=self.initial_control, optimal_control=x, run_time=run_time
        )
def test(path, type='mf'):
    '''Evolve the tile in (n, n) pattern checking volume/surface properties'''

    comm = mpi_comm_world()
    h5 = HDF5File(comm, path, 'r')
    tile = Mesh()
    h5.read(tile, 'mesh', False)

    init_container = lambda type, dim: (
        MeshFunction('size_t', tile, dim, 0)
        if type == 'mf' else MeshValueCollection('size_t', tile, dim))

    for n in (2, 4):
        data = {}
        checks = {}
        for dim, name in zip((2, 3), ('surfaces', 'volumes')):
            # Get the collection
            collection = init_container(type, dim)
            h5.read(collection, name)

            if type == 'mvc': collection = as_meshf(collection)

            # Data to evolve
            tile.init(dim, 0)
            e2v = tile.topology()(dim, 0)
            # Only want to evolve tag 1 (interfaces) for the facets.
            data[(dim, 1)] = np.array(
                [e2v(e.index()) for e in SubsetIterator(collection, 1)],
                dtype='uintp')

            if dim == 2:
                check = lambda m, f: assemble(
                    FacetArea(m) * ds(domain=m,
                                      subdomain_data=f,
                                      subdomain_id=1) + avg(FacetArea(m)) *
                    dS(domain=m, subdomain_data=f, subdomain_id=1))
            else:
                check = lambda m, f: assemble(
                    CellVolume(m) * dx(
                        domain=m, subdomain_data=f, subdomain_id=1))

            checks[
                dim] = lambda m, f, t=tile, c=collection, n=n, check=check: abs(
                    check(m, f) - n**2 * check(t, c)) / (n**2 * check(t, c))

        t = Timer('x')
        mesh, mesh_data = TileMesh(tile, (n, n), mesh_data=data)
        info('\tTiling took %g s. Ncells %d, nvertices %d, \n' %
             (t.stop(), mesh.num_vertices(), mesh.num_cells()))

        foos = mf_from_data(mesh, mesh_data)
        # Mesh Functions
        from_mf = np.array([checks[dim](mesh, foos[dim]) for dim in (2, 3)])

        mvcs = mvc_from_data(mesh, mesh_data)
        foos = as_meshf(mvcs)
        # Mesh ValueCollections
        from_mvc = np.array([checks[dim](mesh, foos[dim]) for dim in (2, 3)])

        assert np.linalg.norm(from_mf - from_mvc) < 1E-13
        # I ignore shared facets so there is bound to be some error in facets
        # Volume should match well
        print from_mf
Exemple #16
0
    h5.read(mesh, 'mesh', False)

    surfaces = MeshFunction('size_t', mesh, mesh.topology().dim()-1, 0)
    h5.read(surfaces, 'surfaces')

    volumes = MeshFunction('size_t', mesh, mesh.topology().dim(), 0)
    h5.read(volumes, 'volumes')
    
    h5.close()

    # Remove layer
    tt = Timer('cleanup')
    ncells_x, ncells_y = args.m, args.n
    surfaces, volumes = deactivate_cells(surfaces, volumes, ncells_x, ncells_y)

    info('Removing boundary took %g s' % tt.stop())

    # Write 
    tt = Timer('write')
    # Completely new
    if not args.in_place:
        h5_file = ''.join(['_'.join([root, 'noBdry']), ext])

        h5 = HDF5File(get_comm_world(), h5_file, 'w')
        # FIXME: replacing
        h5.write(mesh, 'mesh')
        h5.write(surfaces, 'surfaces')
        h5.write(volumes, 'volumes')
        h5.close()
    else:
        import h5py
Exemple #17
0
def main(module_name, ncases, params, petsc_params):
    '''
    Run the test case in module with ncases. Optionally store results
    in savedir. For some modules there are multiple (which) choices of 
    preconditioners.
    '''

    # Unpack
    for k, v in params.items():
        exec(k + '=v', locals())

    RED = '\033[1;37;31m%s\033[0m'
    print RED % ('\tRunning %s' % module_name)

    module = __import__(module_name)  # no importlib in python2.7

    # Setup the MMS case
    u_true, rhs_data = module.setup_mms(eps)

    # Setup the convergence monitor
    if log:
        params = [('solver', solver), ('precond', str(precond)),
                  ('eps', str(eps))]

        path = '_'.join([module_name] + ['%s=%s' % pv for pv in params])
        path = os.path.join(save_dir if save_dir else '.', path)
        path = '.'.join([path, 'txt'])
    else:
        path = ''

    memory, residuals = [], []
    monitor = module.setup_error_monitor(u_true, memory, path=path)

    # Sometimes it is usedful to transform the solution before computing
    # the error. e.g. consider subdomains
    if hasattr(module, 'setup_transform'):
        # NOTE: transform take two args for case and the current computed
        # solution
        transform = module.setup_transform
    else:
        transform = lambda i, x: x

    print '=' * 79
    print '\t\t\tProblem eps = %g' % eps
    print '=' * 79
    for i in ncases:
        a, L, W = module.setup_problem(i, rhs_data, eps=eps)

        # Assemble blocks
        t = Timer('assembly')
        t.start()
        AA, bb = map(ii_assemble, (a, L))
        print '\tAssembled blocks in %g s' % t.stop()

        wh = ii_Function(W)

        if solver == 'direct':
            # Turn into a (monolithic) PETScMatrix/Vector
            t = Timer('conversion')
            t.start()
            AAm, bbm = map(ii_convert, (AA, bb))
            print '\tConversion to PETScMatrix/Vector took %g s' % t.stop()

            t = Timer('solve')
            t.start()
            LUSolver('umfpack').solve(AAm, wh.vector(), bbm)
            print '\tSolver took %g s' % t.stop()

            niters = 1
        else:
            # Here we define a Krylov solver using PETSc
            BB = module.setup_preconditioner(W, precond, eps=eps)
            ## AA and BB as block_mat
            ksp = PETSc.KSP().create()

            # Default is minres
            if '-ksp_type' not in petsc_params:
                petsc_params['-ksp_type'] = 'minres'

            opts = PETSc.Options()
            for key, value in petsc_params.iteritems():
                opts.setValue(key, None if value == 'none' else value)

            ksp.setOperators(ii_PETScOperator(AA))

            ksp.setNormType(PETSc.KSP.NormType.NORM_PRECONDITIONED)
            # ksp.setTolerances(rtol=1E-6, atol=None, divtol=None, max_it=300)
            ksp.setConvergenceHistory()
            # We attach the wrapped preconditioner defined by the module
            ksp.setPC(ii_PETScPreconditioner(BB, ksp))

            ksp.setFromOptions()

            print ksp.getTolerances()

            # Want the iterations to start from random
            wh.block_vec().randomize()
            # Solve, note the past object must be PETSc.Vec
            t = Timer('solve')
            t.start()
            ksp.solve(as_petsc_nest(bb), wh.petsc_vec())
            print '\tSolver took %g s' % t.stop()

            niters = ksp.getIterationNumber()

            residuals.append(ksp.getConvergenceHistory())

        # Let's check the final size of the residual
        r_norm = (bb - AA * wh.block_vec()).norm()

        # Convergence?
        monitor.send((transform(i, wh), niters, r_norm))

    # Only send the final
    if save_dir:
        path = os.path.join(save_dir, module_name)
        for i, wh_i in enumerate(wh):
            # Renaming to make it easier to save state in Visit/Pareview
            wh_i.rename('u', str(i))

            File('%s_%d.pvd' % (path, i)) << wh_i

    # Plot relative residual norm
    if plot:
        plt.figure()
        [
            plt.semilogy(res / res[0], label=str(i))
            for i, res in enumerate(residuals, 1)
        ]
        plt.legend(loc='best')
        plt.show()
Exemple #18
0
data = MeshFunction('int', Mesh(path), 'widths.xml.gz')

out = subprocess.check_output(['gmsh', '--version'], stderr=subprocess.STDOUT)
assert out.split('.')[0] == '3', 'Gmsh 3+ is required'

ccall = 'gmsh -3 -optimize %s' % geo
subprocess.call(ccall, shell=True)

# Convert
xml_file = 'vasc_GMSH.xml'
msh_file = 'vacs_GMSH.msh'
convert2xml(msh_file, xml_file)

# Throw away the line function as it is expensive to store all the edges
meshXd = Mesh(xml_file)
# Make sure that the assumption of correspondence hold
assert np.linalg.norm(mesh.coordinates() -
                      meshXd.coordinates()[:mesh.num_vertices()]) < 1E-13

line_f = MeshFunction('size_t', meshXd, 'vasc_GMSH_edge_region.xml')

# Just a list of edges which are 1
tag_file = 'vasc_GMSH_vessel_tags.txt'
np.savetxt(tag_file, [edge.index() for edge in SubsetIterator(line_f, 1)])

# Transfer is not done here because I don't wantt to create embedded
# mesh etc
dt = timer.stop()
print 'Done in %g s' % dt, 'look for', xml_file, 'and', tag_file
Exemple #19
0
def main(module_name, ncases, params, petsc_params):
    '''
    Run the test case in module with ncases. Optionally store results
    in savedir. For some modules there are multiple (which) choices of 
    preconditioners.
    '''
    
    # Unpack
    for k, v in params.items(): exec(k + '=v', locals())

    RED = '\033[1;37;31m%s\033[0m'
    print RED % ('\tRunning %s with %d preconditioner' % (module_name, precond))

    module = __import__(module_name)  # no importlib in python2.7

    # Setup the MMS case
    u_true, rhs_data = module.setup_mms(eps)

    # Setup the convergence monitor
    if log:
        params = [('solver', solver), ('precond', str(precond)), ('eps', str(eps))]
        
        path = '_'.join([module_name] + ['%s=%s' % pv for pv in params])
        path = os.path.join(save_dir if save_dir else '.', path)
        path = '.'.join([path, 'txt'])
    else:
        path = ''

    memory, residuals  = [], []
    monitor = module.setup_error_monitor(u_true, memory, path=path)

    # Sometimes it is usedful to transform the solution before computing
    # the error. e.g. consider subdomains
    if hasattr(module, 'setup_transform'):
        # NOTE: transform take two args for case and the current computed
        # solution
        transform = module.setup_transform
    else:
        transform = lambda i, x: x

    print '='*79
    print '\t\t\tProblem eps = %r' % eps
    print '='*79
    for i in ncases:
        a, L, W = module.setup_problem(i, rhs_data, eps=eps)
        
        # Assemble blocks
        t = Timer('assembly'); t.start()
        AA, bb = map(ii_assemble, (a, L))
        print '\tAssembled blocks in %g s' % t.stop()

        # Check the symmetry
        wh = ii_Function(W)

        assert (AA*wh.block_vec() - (AA.T)*wh.block_vec()).norm() < 1E-10

        if solver == 'direct':
            # Turn into a (monolithic) PETScMatrix/Vector
            t = Timer('conversion'); t.start()        
            AAm, bbm = map(ii_convert, (AA, bb))
            print '\tConversion to PETScMatrix/Vector took %g s' % t.stop()

            t = Timer('solve'); t.start()
            LUSolver('umfpack').solve(AAm, wh.vector(), bbm)
            print '\tSolver took %g s' % t.stop()

            niters = 1

        if solver == 'iterative':
            # Here we define a Krylov solver using PETSc
            BB = module.setup_preconditioner(W, precond, eps=eps)

            ## AA and BB as block_mat
            ksp = PETSc.KSP().create()

            # Default is minres
            if '-ksp_type' not in petsc_params: petsc_params['-ksp_type'] = 'minres'
            
            opts = PETSc.Options()
            for key, value in petsc_params.iteritems():
                opts.setValue(key, None if value == 'none' else value)

            ksp.setOperators(ii_PETScOperator(AA))

            ksp.setNormType(PETSc.KSP.NormType.NORM_PRECONDITIONED)
            # ksp.setTolerances(rtol=1E-6, atol=None, divtol=None, max_it=300)
            ksp.setConvergenceHistory()
            # We attach the wrapped preconditioner defined by the module
            ksp.setPC(ii_PETScPreconditioner(BB, ksp))
            
            ksp.setFromOptions()
            
            print ksp.getTolerances()
            
            # Want the iterations to start from random
            wh.block_vec().randomize()

            # Solve, note the past object must be PETSc.Vec
            t = Timer('solve'); t.start()            
            ksp.solve(as_petsc_nest(bb), wh.petsc_vec())
            print '\tSolver took %g s' % t.stop()

            niters = ksp.getIterationNumber()

            residuals.append(ksp.getConvergenceHistory())
            
        # Let's check the final size of the residual
        r_norm = (bb - AA*wh.block_vec()).norm()
        # Convergence?
        monitor.send((transform(i, wh), W, niters, r_norm))
        
    # Only send the final
    if save_dir:
        path = os.path.join(save_dir, module_name)
        for i, wh_i in enumerate(wh):
            # Renaming to make it easier to save state in Visit/Pareview
            wh_i.rename('u', str(i))
            
            File('%s_%d.pvd' % (path, i)) << wh_i

    # Plot relative residual norm
    if plot:
        plt.figure()
        [plt.semilogy(res/res[0], label=str(i)) for i, res in enumerate(residuals, 1)]
        plt.legend(loc='best')
        plt.show()
Exemple #20
0
    def get_bcs(self, weak_dof_values=None):
        """
        Get bc type and bc value for each dof in the function space
        If a weak_dof_values array is given then this is used for Dirichlet BCs
        instead of the strong BCs given by the BC function (these will typically
        differ by a small bit)
        """
        sim = self.simulation
        im = self.function_space.dofmap().index_map()
        num_owned_dofs = im.size(im.MapSize.OWNED)
        boundary_dof_type = numpy.zeros(num_owned_dofs, numpy.intc)
        boundary_dof_value = numpy.zeros(num_owned_dofs, float)

        if not self.active:
            return boundary_dof_type, boundary_dof_value

        # This is potentially slow, so we time this code
        timer = Timer("Ocellaris get slope limiter boundary conditions")

        # Collect BCs - field name for u0 can be u (FreeSlip) and u0 (CppCodedValue)
        dirichlet = {}
        neumann = {}
        robin = {}
        slip = {}
        for field_name in self.field_names:
            # Collect Dirichlet BCs for this field
            for bc in sim.data['dirichlet_bcs'].get(field_name, []):
                region_number = bc.subdomain_id - 1
                dirichlet[region_number] = bc

            # Collect Neumann BCs for this field
            for bc in sim.data['neumann_bcs'].get(field_name, []):
                region_number = bc.subdomain_id - 1
                neumann[region_number] = bc

            # Collect Robin BCs for this field
            for bc in sim.data['robin_bcs'].get(field_name, []):
                region_number = bc.subdomain_id - 1
                robin[region_number] = bc

            # Collect Slip BCs for this field
            for bc in sim.data['slip_bcs'].get(field_name, []):
                region_number = bc.subdomain_id - 1
                slip[region_number] = bc

        fname = ', '.join(self.field_names)
        regions = sim.data['boundary']
        for region_number, dofs in self.region_dofs.items():
            boundary_region = regions[region_number]

            # Get the BC object
            if region_number in dirichlet:
                bc_type = self.BC_TYPE_DIRICHLET
                value = dirichlet[region_number].func()
            elif region_number in neumann:
                bc_type = self.BC_TYPE_NEUMANN
                value = neumann[region_number].func()
            elif region_number in robin:
                bc_type = self.BC_TYPE_ROBIN
                value = robin[region_number].dfunc()
            elif region_number in slip:
                value = None
                for dof in dofs:
                    boundary_dof_type[dof] = self.BC_TYPE_OTHER
                continue
            else:
                self._warn(
                    'WARNING: Slope limiter found no BC for field %s '
                    'in region %s' % (fname, boundary_region.name)
                )
                continue

            if bc_type == self.BC_TYPE_DIRICHLET and weak_dof_values is not None:
                # Take values from a field which has (weak) Dirichlet BCs applied
                for dof in dofs:
                    boundary_dof_type[dof] = bc_type
                    boundary_dof_value[dof] = weak_dof_values[dof]

            elif isinstance(value, Constant):
                # Get constant value
                val = value.values()
                assert val.size == 1
                val = val[0]

                for dof in dofs:
                    boundary_dof_type[dof] = bc_type
                    boundary_dof_value[dof] = val

            elif hasattr(value, 'eval_cell'):
                # Get values from an Expression of some sort
                dof_to_cell = self._get_dof_to_cell_mapping()
                mesh = self.function_space.mesh()
                val = numpy.zeros(1, float)
                for dof in dofs:
                    cid, coords = dof_to_cell[dof]
                    cell = Cell(mesh, cid)
                    value.eval_cell(val, coords, cell)
                    boundary_dof_type[dof] = bc_type
                    boundary_dof_value[dof] = val[0]

            else:
                self._warn(
                    'WARNING: Field %s has unsupported limiter BC %r in '
                    'region %s' % (fname, type(value), boundary_region.name)
                )

        timer.stop()
        return boundary_dof_type, boundary_dof_value
Exemple #21
0
from dolfin import UnitSquareMesh, Timer
import matplotlib.pyplot as plt
import numpy as np
from tieler import TileMesh

tile = UnitSquareMesh(1, 1)

ns = [128, 256, 1024, 2048, 4096]
dts = []
for n in ns:
    shape = (n + 1, n - 1)  # To get odd as well

    t = Timer('s')
    mesh, mesh_data = TileMesh(tile, shape, mesh_data={})
    # Get tile for mesh write as well
    dts.append(t.stop())
    print mesh.num_cells()

a, b = np.polyfit(np.log(ns), np.log(dts), 1)

ns, dts = map(np.array, (ns, dts))

plt.figure()
plt.loglog(ns, dts, basex=2., basey=2., marker='x')
plt.loglog(ns,
           np.exp(b) * ns**a,
           basex=2.,
           basey=2.,
           linestyle='dashed',
           label='O(N^%.2f)' % a)
plt.ylabel('T')
Exemple #22
0
def analyze(problem, cases, alpha, norm_type, logfile):
    '''Convergence study of module over ncases for fixed alpha'''
    mms_data = problem.setup_mms(alpha)  # Only depend on physical parameters

    # Compat
    try:
        error_monitor, error_types = problem.setup_error_monitor(mms_data, alpha, norm_type)
    except TypeError:
        error_monitor, error_types = problem.setup_error_monitor(mms_data, alpha)
    
    # Annotate columnts
    columns = ['ndofs', 'h'] + sum((['e[%s]' % t,'r[%s]' % t] for t in error_types), []) + ['subspaces']
    header = ' '.join(columns)
    print GREEN % header
    
    # Stuff for command line printing as we go
    formats = ['%d', '%.2E'] + sum((['%.4E', '\033[1;37;34m%.2f\033[0m'] for _ in error_types), [])
    msg = ' '.join(formats)

    # Make record of what the result colums are
    with open(logfile, 'a') as f: f.write('# %s\n' % header)

    msg_has_subspaces = False
    case0, ncases = cases
    
    e0, h0, rate = None, None, None
    # Exec in context so that results not lost on crash
    msg_history = []
    with open(logfile, 'a') as stream:
        for n in [4*2**i for i in range(case0, case0 + ncases)]:
            # Setting up the problem means obtaining a block_mat, block_vec
            # and a list space or matrix, vector and function space
            AA, bb, W = problem.setup_problem(n, mms_data, alpha)
            # Since direct solver expects monolithic
            info('\tConversion'); cvrt_timer = Timer('cvrt')
            A, b = map(ii_convert, (AA, bb))  # This is do nothing for monolithic

            # print np.sort(np.abs(np.linalg.eigvalsh(A.array())))
            info('\tDone (Conversion) %g' % cvrt_timer.stop())
            
            # wh = direct_solve(A, b, W, 'mumps')

            wh = direct_solve(A, b, W)  # Output is always iiFunction
            
            print [np.any(np.isnan(whi.vector().get_local())) for whi in wh]
            print [np.any(np.isinf(whi.vector().get_local())) for whi in wh]
            print [whi.vector().norm('l2') for whi in wh]
            
            # And later want list space
            W = wh.function_space()

            error = np.fromiter(error_monitor(wh), dtype=float)
            h = W[0].mesh().hmin()
            subspaces = [Wi.dim() for Wi in W]
            ndofs = sum(subspaces)
        
            if e0 is None:
                rate = np.nan*np.ones_like(error)
            else:
                rate = np.log(error/e0)/np.log(h/h0)
            h0, e0 = h, error

            # ndofs h zip of errors and rates
            row = [ndofs, h] + list(sum(zip(error, rate), ())) + subspaces

            if not msg_has_subspaces:
                msg = ' '.join([msg] + ['%d']*len(subspaces))
            msg_has_subspaces = True

            msg_history.append(row)
            print '='*79
            print RED % str(alpha)
            print GREEN % header
            for msg_row in msg_history:
                print msg % tuple(msg_row)
            print '='*79
                
            stream.write('%s\n' % ' '.join(map(str, row)))
    # Out for plotting
    return wh, mms_data