def evaluate(self, annotate=True):
        """ Computes the functional value by running the forward model. """

        log(INFO, 'Start evaluation of j')
        timer = Timer("j evaluation")

        farm = self.solver.problem.parameters.tidal_farm

        # Configure dolfin-adjoint
        adj_reset()
        parameters["adjoint"]["record_all"] = True

        # Solve the shallow water system and integrate the functional of
        # interest.
        final_only = (not self.solver.problem._is_transient
                      or self._problem_params.functional_final_time_only)
        self.time_integrator = TimeIntegrator(self.solver.problem,
                                              self._functional, final_only)

        for sol in self.solver.solve(annotate=annotate):
            self.time_integrator.add(sol["time"], sol["state"], sol["tf"],
                                     sol["is_final"])

        j = self.time_integrator.integrate()

        timer.stop()

        log(INFO, 'Runtime: %f s.' % timer.elapsed()[0])
        log(INFO, 'j = %e.' % float(j))

        return j
Exemple #2
0
    def evaluate(self, annotate=True):
        """ Computes the functional value by running the forward model. """

        log(INFO, 'Start evaluation of j')
        timer = Timer("j evaluation")

        farm = self.solver.problem.parameters.tidal_farm

        # Configure dolfin-adjoint
        adj_reset()
        parameters["adjoint"]["record_all"] = True

        # Solve the shallow water system and integrate the functional of
        # interest.
        final_only = (not self.solver.problem._is_transient or
                      self._problem_params.functional_final_time_only)
        self.time_integrator = TimeIntegrator(self.solver.problem,
                                              self._functional, final_only)

        for sol in self.solver.solve(annotate=annotate):
            self.time_integrator.add(sol["time"], sol["state"], sol["tf"],
                                     sol["is_final"])

        j = self.time_integrator.integrate()

        timer.stop()

        log(INFO, 'Runtime: %f s.' % timer.elapsed()[0])
        log(INFO, 'j = %e.' % float(j))

        return j
    def setup_ksp(self, ksp, assemble_func, iset, spd=False, const=False):
        """Assemble into operator of given ksp if not yet assembled"""
        mat = ksp.getOperators()[0]
        prefix = ksp.getOptionsPrefix()
        if mat.type is None or not mat.isAssembled():
            # Assemble matrix
            destruction = True if const else None
            dolfin_mat = self.get_work_dolfin_mat(assemble_func,
                                                  mat.comm,
                                                  can_be_destroyed=destruction,
                                                  can_be_shared=True)
            assemble_func(dolfin_mat)
            mat = self._get_deep_submat(dolfin_mat.mat(), iset, submat=None)

            # Use eventual spd flag
            mat.setOption(PETSc.Mat.Option.SPD, spd)

            # Set correct options prefix
            mat.setOptionsPrefix(prefix)

            # Use also as preconditioner matrix
            ksp.setOperators(mat, mat)
            assert ksp.getOperators()[0].isAssembled()

            # Set up ksp
            with Timer("FENaPack: {} setup".format(prefix)):
                ksp.setUp()

        elif not const:
            # Assemble matrix and set up ksp
            mat = self._assemble_operator_deep(assemble_func, iset, submat=mat)
            assert mat.getOptionsPrefix() == prefix
            ksp.setOperators(mat, mat)
            with Timer("FENaPack: {} setup".format(prefix)):
                ksp.setUp()
Exemple #4
0
    def __init__(self, grid, fields):
        timer = Timer('setup'); timer.start()
        mesh = fields[0].function_space().mesh()
        # Same meshes for all
        assert all(mesh.id() == f.function_space().mesh().id() for f in fields)

        # Locate each point
        limit = mesh.num_entities_global(mesh.topology().dim())
        bbox_tree = mesh.bounding_box_tree()

        npoints = np.prod(grid.ns)
        cells_for_x = [None]*npoints
        for i, x in enumerate(grid.points()):
            cell = bbox_tree.compute_first_entity_collision(Point(*x))
            if -1 < cell < limit:
                cells_for_x[i] = Cell(mesh, cell)
        assert not any(c is None for c in cells_for_x)

        # For each field I want to build a function which which evals
        # it at all the points
        self.data = {}
        self.eval_fields = []
        for u in fields:
            # Alloc
            key = u.name()
            self.data[key] = [np.zeros(npoints) for _ in range(u.value_size())]
            # Attach the eval for u
            self.eval_fields.append(eval_u(u, grid.points(), cells_for_x, self.data[key]))
        info('Probe setup took %g' % timer.stop())
        self.grid = grid
        # Get values at construction
        self.update()
Exemple #5
0
    def derivative(self, forget=False, **kwargs):
        """ Computes the first derivative of the functional with respect to its
        controls by solving the adjoint equations. """

        log(INFO, 'Start evaluation of dj')
        timer = Timer("dj evaluation")

        self.functional = self.time_integrator.dolfin_adjoint_functional(self.solver.state)
        dj = compute_gradient(self.functional, self.controls, forget=forget, **kwargs)
        parameters["adjoint"]["stop_annotating"] = False

        log(INFO, "Runtime: " + str(timer.stop()) + " s")

        return enlisting.enlist(dj)
Exemple #6
0
    def update(self):
        if not self.active:
            return

        with Timer('Ocellaris update hydrostatic pressure'):
            A = self.tensor_lhs
            b = assemble(self.form_rhs)

            if self.null_space is None:
                # Create vector that spans the null space, and normalize
                null_space_vector = b.copy()
                null_space_vector[:] = sqrt(1.0 / null_space_vector.size())

                # Create null space basis object and attach to PETSc matrix
                self.null_space = VectorSpaceBasis([null_space_vector])
                as_backend_type(A).set_nullspace(self.null_space)

            self.null_space.orthogonalize(b)
            self.solver.solve(A, self.func.vector(), b)

        if not self.every_timestep:
            # Give initial values for p, but do not continuously compute p_hydrostatic
            sim = self.simulation
            p = sim.data['p']
            if p.vector().max() == p.vector().min() == 0.0:
                sim.log.info(
                    'Initial pressure field is identically zero, initializing to hydrostatic'
                )
                p.interpolate(self.func)

            # Disable further hydrostatic pressure calculations
            self.func.vector().zero()
            del sim.data['p_hydrostatic']
            del self.func
            self.active = False
    def setup_ksp_Rp(self, ksp, Mu, Bt):
        """Setup pressure Laplacian ksp based on velocity mass matrix ``Mu``
        and discrete gradient ``Bt`` and assemble matrix
        """
        mat = ksp.getOperators()[0]
        prefix = ksp.getOptionsPrefix()
        const = self.assembler.get_pcd_form("mu").is_constant() \
                  and self.assembler.get_pcd_form("gp").is_constant()
        if mat.type is None or not mat.isAssembled() or not const:
            # Get approximate Laplacian
            mat = self._build_approx_Ap(Mu, Bt, mat)

            # Use eventual spd flag
            mat.setOption(PETSc.Mat.Option.SPD, True)

            # Set correct options prefix
            mat.setOptionsPrefix(prefix)

            # Use also as preconditioner matrix
            ksp.setOperators(mat, mat)
            assert ksp.getOperators()[0].isAssembled()

            # Setup ksp
            with Timer("FENaPack: {} setup".format(prefix)):
                ksp.setUp()
    def derivative(self, forget=False, **kwargs):
        """ Computes the first derivative of the functional with respect to its
        controls by solving the adjoint equations. """

        log(INFO, 'Start evaluation of dj')
        timer = Timer("dj evaluation")

        if not hasattr(self, "time_integrator"):
            self.evaluate()
        self.functional = self.time_integrator.dolfin_adjoint_functional(
            self.solver.state)
        dj = compute_gradient(self.functional,
                              self.controls,
                              forget=forget,
                              **kwargs)
        parameters["adjoint"]["stop_annotating"] = False

        log(INFO, "Runtime: " + str(timer.stop()) + " s")

        return enlisting.enlist(dj)
Exemple #9
0
def python_timings(mesh, Nrange, read_matrix=False):
    '''Run across meshes solving lumped EVP and recording their sizes and exec time.'''
    # We know from julia that this idea(lumping) works. 
    # So we are only after timing
    data = []

    if read_matrix:
        matrices = read_matrices(mesh, root='./jl_matrices')
    else:
        matrices = (get_1d_matrices(mesh, N) for N in Nrange)

    for A, M in matrices:
        row = [A.shape[0]]

        M0 = lump(M, -0.5)                             #
        A0 = M0.dot(A.dot(M0))                           #
        d, u = np.diagonal(A0, 0), np.diagonal(A0, 1)   # 

        t = Timer('EVP')
        eigw, eigv = s3d_eig(d, u)
        row.append(t.stop())

        # Assembling the preconditioner
        t = Timer('ASSEMBLE')
        H = eigv.dot(np.diag(eigw**-0.5).dot(eigv.T))
        row.append(t.stop())

        # Action of preconditioner(matrix)
        x = np.random.rand(H.shape[1])
        t = Timer('ACTION')
        y = H*x
        row.append(t.stop())

        # The original GEVP
        t = Timer('GEVP')
        eigw, eigv = eigh(A, M)
        row.append(t.stop())

        print row
        data.append(row)

    return data
Exemple #10
0
    def __call__(self, assemb_rhs=True):
        """
        Compute the projection
        """
        timer = Timer("Projecting {}".format(self.name()))

        if assemb_rhs:
            self.assemble_rhs()

        for bc in self.bcs:
            bc.apply(self.rhs)

        if self.method.lower() == "default":
            self.sol.solve(self.A, self.vector(), self.rhs)

        else:
            self.vector().zero()
            self.vector().axpy(1.0, self.rhs * self.ML)
Exemple #11
0
def python_timings(mesh, Nrange, read_matrix=False):
    '''Run across meshes solving lumped EVP and recording their sizes and exec time.'''
    # We know from julia that this idea(lumping) works.
    # So we are only after timing
    data = []

    if read_matrix:
        matrices = read_matrices(mesh, root='./jl_matrices')
    else:
        matrices = (get_1d_matrices(mesh, N) for N in Nrange)

    for A, M in matrices:
        row = [A.shape[0]]

        M0 = lump(M, -0.5)  #
        A0 = M0.dot(A.dot(M0))  #
        d, u = np.diagonal(A0, 0), np.diagonal(A0, 1)  #

        t = Timer('EVP')
        eigw, eigv = s3d_eig(d, u)
        row.append(t.stop())

        # Assembling the preconditioner
        t = Timer('ASSEMBLE')
        H = eigv.dot(np.diag(eigw**-0.5).dot(eigv.T))
        row.append(t.stop())

        # Action of preconditioner(matrix)
        x = np.random.rand(H.shape[1])
        t = Timer('ACTION')
        y = H * x
        row.append(t.stop())

        # The original GEVP
        t = Timer('GEVP')
        eigw, eigv = eigh(A, M)
        row.append(t.stop())

        print row
        data.append(row)

    return data
Exemple #12
0
# Loop and output
step = 0
t = 0.

# Store at step 0
xdmf_rho.write(rho0, t)
xdmf_u.write(Uh.sub(0), t)
xdmf_p.write(Uh.sub(1), t)

p.dump2file(mesh, fname_list, property_list, 'wb')

dump_list = [0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.]
with open(pressure_table, "wb") as PT:
    pickle.dump(dump_list, PT)

timer = Timer("[P] Total time consumed")
timer.start()

while step < num_steps:
    step += 1
    t += float(dt)

    if comm.Get_rank() == 0:
        print("Step " + str(step) + ', time = ' + str(t))

    # Advect
    t1 = Timer("[P] advect particles")
    ap.do_step(float(dt))
    del (t1)

    # Project density and specific momentum
    def solve(self):
        """
        Solve optmal control problem

        """

        # msg = "You need to build the problem before solving it"
        # assert hasattr(self, "opt_type"), msg

        module = self.parameters["opt_lib"]

        logger.info("Starting optimization")
        # logger.info(
        #     "Scale: {}, \nDerivative Scale: {}".format(
        #         self.J.scale, self.J.derivative_scale
        #     )
        # )
        # logger.info(
        #     "Tolerace: {}, \nMaximum iterations: {}\n".format(self.tol, self.max_iter)
        # )

        t = Timer()
        t.start()

        if self.parameters["nvar"] == 1:

            res = minimize_1d(self.J, self.x[0], **self.options)
            x = res["x"]

        else:

            if module == "scipy":
                res = scipy_minimize(self.J, self.x, **self.options)
                x = res["x"]

            elif module == "pyOpt":
                obj, x, d = self.problem(**self.options)

            elif module == "moola":
                sol = self.solver.solve()
                x = sol["control"].data

            elif module == "ipopt":
                x = self.solver.solve(self.x)

            else:
                msg = (
                    "Unknown optimizatin type {}. "
                    "Define the optimization type as 'module-method', "
                    "where module is e.g scipy, pyOpt and methos is "
                    "eg slsqp."
                )
                raise ValueError(msg)

        run_time = t.stop()
        # opt_result = {}
        # opt_result['initial_contorl'] = self.initial_control
        # opt_result["optimal_control"] = x
        # opt_result["run_time"] = run_time
        # opt_result["nfev"] = self.J.iter
        # opt_result["nit"] = self.J.iter
        # opt_result["njev"] = self.J.nr_der_calls
        # opt_result["ncrash"] = self.J.nr_crashes
        # opt_result["controls"] = self.J.controls_lst
        # opt_result["func_vals"] = self.J.func_values_lst
        # opt_result["forwaJ_times"] = self.J.forwaJ_times
        # opt_result["backwaJ_times"] = self.J.backwaJ_times
        # opt_result["grad_norm"] = self.J.grad_norm
        return optimization_results(
            initial_control=self.initial_control, optimal_control=x, run_time=run_time
        )
    write_file.write("%-12s %-12s %-15s %-20s %-15s %-15s \n" %
                     ("Time step", "Number of steps", "Number of cells",
                      "Number of particles", "Projection", "Solver"))
    write_file.write("%-12.5g %-15d %-15d %-20d %-15s %-15s \n" %
                     (float(dt), num_steps, nc, npt, projection_type, solver))

with open(conservation_data, "w") as write_file:
    writer = csv.writer(write_file)
    writer.writerow([
        "Time", "Total mass", "Mass conservation (incl. bndry flux)",
        "Mass conservation (excl. bndry flux)",
        "Momentum conservation (incl. bndry flux)",
        "Momentum conservation (excl. bndry flux)", "Rho_min", "Rho_max"
    ])

timer = Timer("[P] Total time consumed")
timer.start()

while step < num_steps:
    step += 1
    t += float(dt)

    if comm.Get_rank() == 0:
        print("Step " + str(step) + ', time = ' + str(t))

    # Advect
    t1 = Timer("[P] advect particles")
    ap.do_step(float(dt))
    del (t1)

    # Project density and specific momentum
def test(path, type='mf'):
    '''Evolve the tile in (n, n) pattern checking volume/surface properties'''

    comm = mpi_comm_world()
    h5 = HDF5File(comm, path, 'r')
    tile = Mesh()
    h5.read(tile, 'mesh', False)

    init_container = lambda type, dim: (
        MeshFunction('size_t', tile, dim, 0)
        if type == 'mf' else MeshValueCollection('size_t', tile, dim))

    for n in (2, 4):
        data = {}
        checks = {}
        for dim, name in zip((2, 3), ('surfaces', 'volumes')):
            # Get the collection
            collection = init_container(type, dim)
            h5.read(collection, name)

            if type == 'mvc': collection = as_meshf(collection)

            # Data to evolve
            tile.init(dim, 0)
            e2v = tile.topology()(dim, 0)
            # Only want to evolve tag 1 (interfaces) for the facets.
            data[(dim, 1)] = np.array(
                [e2v(e.index()) for e in SubsetIterator(collection, 1)],
                dtype='uintp')

            if dim == 2:
                check = lambda m, f: assemble(
                    FacetArea(m) * ds(domain=m,
                                      subdomain_data=f,
                                      subdomain_id=1) + avg(FacetArea(m)) *
                    dS(domain=m, subdomain_data=f, subdomain_id=1))
            else:
                check = lambda m, f: assemble(
                    CellVolume(m) * dx(
                        domain=m, subdomain_data=f, subdomain_id=1))

            checks[
                dim] = lambda m, f, t=tile, c=collection, n=n, check=check: abs(
                    check(m, f) - n**2 * check(t, c)) / (n**2 * check(t, c))

        t = Timer('x')
        mesh, mesh_data = TileMesh(tile, (n, n), mesh_data=data)
        info('\tTiling took %g s. Ncells %d, nvertices %d, \n' %
             (t.stop(), mesh.num_vertices(), mesh.num_cells()))

        foos = mf_from_data(mesh, mesh_data)
        # Mesh Functions
        from_mf = np.array([checks[dim](mesh, foos[dim]) for dim in (2, 3)])

        mvcs = mvc_from_data(mesh, mesh_data)
        foos = as_meshf(mvcs)
        # Mesh ValueCollections
        from_mvc = np.array([checks[dim](mesh, foos[dim]) for dim in (2, 3)])

        assert np.linalg.norm(from_mf - from_mvc) < 1E-13
        # I ignore shared facets so there is bound to be some error in facets
        # Volume should match well
        print from_mf
Exemple #16
0
def main(module_name, ncases, params, petsc_params):
    '''
    Run the test case in module with ncases. Optionally store results
    in savedir. For some modules there are multiple (which) choices of 
    preconditioners.
    '''

    # Unpack
    for k, v in params.items():
        exec(k + '=v', locals())

    RED = '\033[1;37;31m%s\033[0m'
    print RED % ('\tRunning %s' % module_name)

    module = __import__(module_name)  # no importlib in python2.7

    # Setup the MMS case
    u_true, rhs_data = module.setup_mms(eps)

    # Setup the convergence monitor
    if log:
        params = [('solver', solver), ('precond', str(precond)),
                  ('eps', str(eps))]

        path = '_'.join([module_name] + ['%s=%s' % pv for pv in params])
        path = os.path.join(save_dir if save_dir else '.', path)
        path = '.'.join([path, 'txt'])
    else:
        path = ''

    memory, residuals = [], []
    monitor = module.setup_error_monitor(u_true, memory, path=path)

    # Sometimes it is usedful to transform the solution before computing
    # the error. e.g. consider subdomains
    if hasattr(module, 'setup_transform'):
        # NOTE: transform take two args for case and the current computed
        # solution
        transform = module.setup_transform
    else:
        transform = lambda i, x: x

    print '=' * 79
    print '\t\t\tProblem eps = %g' % eps
    print '=' * 79
    for i in ncases:
        a, L, W = module.setup_problem(i, rhs_data, eps=eps)

        # Assemble blocks
        t = Timer('assembly')
        t.start()
        AA, bb = map(ii_assemble, (a, L))
        print '\tAssembled blocks in %g s' % t.stop()

        wh = ii_Function(W)

        if solver == 'direct':
            # Turn into a (monolithic) PETScMatrix/Vector
            t = Timer('conversion')
            t.start()
            AAm, bbm = map(ii_convert, (AA, bb))
            print '\tConversion to PETScMatrix/Vector took %g s' % t.stop()

            t = Timer('solve')
            t.start()
            LUSolver('umfpack').solve(AAm, wh.vector(), bbm)
            print '\tSolver took %g s' % t.stop()

            niters = 1
        else:
            # Here we define a Krylov solver using PETSc
            BB = module.setup_preconditioner(W, precond, eps=eps)
            ## AA and BB as block_mat
            ksp = PETSc.KSP().create()

            # Default is minres
            if '-ksp_type' not in petsc_params:
                petsc_params['-ksp_type'] = 'minres'

            opts = PETSc.Options()
            for key, value in petsc_params.iteritems():
                opts.setValue(key, None if value == 'none' else value)

            ksp.setOperators(ii_PETScOperator(AA))

            ksp.setNormType(PETSc.KSP.NormType.NORM_PRECONDITIONED)
            # ksp.setTolerances(rtol=1E-6, atol=None, divtol=None, max_it=300)
            ksp.setConvergenceHistory()
            # We attach the wrapped preconditioner defined by the module
            ksp.setPC(ii_PETScPreconditioner(BB, ksp))

            ksp.setFromOptions()

            print ksp.getTolerances()

            # Want the iterations to start from random
            wh.block_vec().randomize()
            # Solve, note the past object must be PETSc.Vec
            t = Timer('solve')
            t.start()
            ksp.solve(as_petsc_nest(bb), wh.petsc_vec())
            print '\tSolver took %g s' % t.stop()

            niters = ksp.getIterationNumber()

            residuals.append(ksp.getConvergenceHistory())

        # Let's check the final size of the residual
        r_norm = (bb - AA * wh.block_vec()).norm()

        # Convergence?
        monitor.send((transform(i, wh), niters, r_norm))

    # Only send the final
    if save_dir:
        path = os.path.join(save_dir, module_name)
        for i, wh_i in enumerate(wh):
            # Renaming to make it easier to save state in Visit/Pareview
            wh_i.rename('u', str(i))

            File('%s_%d.pvd' % (path, i)) << wh_i

    # Plot relative residual norm
    if plot:
        plt.figure()
        [
            plt.semilogy(res / res[0], label=str(i))
            for i, res in enumerate(residuals, 1)
        ]
        plt.legend(loc='best')
        plt.show()
Exemple #17
0
def TileMesh(tile, shape, mesh_data={}, TOL=1E-9):
    '''
    [tile tile;
     tile tile;
     tile tile;
     tile tile]

    The shape is an ntuple describing the number of pieces put next 
    to each other in the i-th axis. mesh_data : (tdim, tag) -> [entities] 
    is the way to encode mesh data of the tile.
    '''
    # Sanity for glueing
    gdim = tile.geometry().dim()
    assert len(shape) <= gdim, (shape, gdim)
    # While evolve is general mesh writing is limited to simplices only (FIXME)
    # so we bail out early
    assert str(tile.ufl_cell()) in ('interval', 'triangle', 'tetrahedron')

    t = Timer('evolve')
    # Do nothing
    if all(v == 1 for v in shape): return tile, mesh_data

    # We want to evolve cells, vertices of the mesh using geometry information
    # and periodicity info
    x = tile.coordinates()
    min_x = np.min(x, axis=0)
    max_x = np.max(x, axis=0)
    shifts = max_x - min_x
    
    shifts_x = []  # Geometry
    vertex_mappings = []  # Periodicity
    # Compute geometrical shift for EVERY direction:
    for axis in range(len(shape)):
        shift = shifts[axis]
        # Vector to shift cell vertices
        shift_x = np.zeros(gdim); shift_x[axis] = shift
        shifts_x.append(shift_x)

        # Compute periodicity in the vertices
        to_master = lambda x, shift=shift_x: x - shift
        # Mapping facets
        master = CompiledSubDomain('near(x[i], A, tol)', i=axis, A=min_x[axis], tol=TOL)
        slave = CompiledSubDomain('near(x[i], A, tol)', i=axis, A=max_x[axis], tol=TOL)

        error, vertex_mapping = compute_vertex_periodicity(tile, master, slave, to_master)
        # Fail when exended direction is no periodic
        assert error < 10*TOL, error
        
        vertex_mappings.append(vertex_mapping)
    # The final piece of information is cells
    cells = np.empty((tile.num_cells(), tile.ufl_cell().num_vertices()), dtype='uintp')
    cells.ravel()[:] = tile.cells().flat
        
    # Evolve the mesh by tiling along the last direction in shape
    while shape:
        x, cells, vertex_mappings, shape = \
            evolve(x, cells, vertex_mappings, shape, shifts_x, mesh_data=mesh_data)
        
    info('\tEvolve took %g s ' % t.stop())

    # We evolve data but the mesh function is made out of it outside
    mesh = make_mesh(x, cells, tdim=tile.topology().dim(), gdim=gdim)

    return mesh, mesh_data
Exemple #18
0
    # Load the original 
    h5 = HDF5File(get_comm_world(), args.mesh, 'r')
    mesh = Mesh()
    h5.read(mesh, 'mesh', False)

    surfaces = MeshFunction('size_t', mesh, mesh.topology().dim()-1, 0)
    h5.read(surfaces, 'surfaces')

    volumes = MeshFunction('size_t', mesh, mesh.topology().dim(), 0)
    h5.read(volumes, 'volumes')
    
    h5.close()

    # Remove layer
    tt = Timer('cleanup')
    ncells_x, ncells_y = args.m, args.n
    surfaces, volumes = deactivate_cells(surfaces, volumes, ncells_x, ncells_y)

    info('Removing boundary took %g s' % tt.stop())

    # Write 
    tt = Timer('write')
    # Completely new
    if not args.in_place:
        h5_file = ''.join(['_'.join([root, 'noBdry']), ext])

        h5 = HDF5File(get_comm_world(), h5_file, 'w')
        # FIXME: replacing
        h5.write(mesh, 'mesh')
        h5.write(surfaces, 'surfaces')
Exemple #19
0
    def get_bcs(self, weak_dof_values=None):
        """
        Get bc type and bc value for each dof in the function space
        If a weak_dof_values array is given then this is used for Dirichlet BCs
        instead of the strong BCs given by the BC function (these will typically
        differ by a small bit)
        """
        sim = self.simulation
        im = self.function_space.dofmap().index_map()
        num_owned_dofs = im.size(im.MapSize.OWNED)
        boundary_dof_type = numpy.zeros(num_owned_dofs, numpy.intc)
        boundary_dof_value = numpy.zeros(num_owned_dofs, float)

        if not self.active:
            return boundary_dof_type, boundary_dof_value

        # This is potentially slow, so we time this code
        timer = Timer("Ocellaris get slope limiter boundary conditions")

        # Collect BCs - field name for u0 can be u (FreeSlip) and u0 (CppCodedValue)
        dirichlet = {}
        neumann = {}
        robin = {}
        slip = {}
        for field_name in self.field_names:
            # Collect Dirichlet BCs for this field
            for bc in sim.data['dirichlet_bcs'].get(field_name, []):
                region_number = bc.subdomain_id - 1
                dirichlet[region_number] = bc

            # Collect Neumann BCs for this field
            for bc in sim.data['neumann_bcs'].get(field_name, []):
                region_number = bc.subdomain_id - 1
                neumann[region_number] = bc

            # Collect Robin BCs for this field
            for bc in sim.data['robin_bcs'].get(field_name, []):
                region_number = bc.subdomain_id - 1
                robin[region_number] = bc

            # Collect Slip BCs for this field
            for bc in sim.data['slip_bcs'].get(field_name, []):
                region_number = bc.subdomain_id - 1
                slip[region_number] = bc

        fname = ', '.join(self.field_names)
        regions = sim.data['boundary']
        for region_number, dofs in self.region_dofs.items():
            boundary_region = regions[region_number]

            # Get the BC object
            if region_number in dirichlet:
                bc_type = self.BC_TYPE_DIRICHLET
                value = dirichlet[region_number].func()
            elif region_number in neumann:
                bc_type = self.BC_TYPE_NEUMANN
                value = neumann[region_number].func()
            elif region_number in robin:
                bc_type = self.BC_TYPE_ROBIN
                value = robin[region_number].dfunc()
            elif region_number in slip:
                value = None
                for dof in dofs:
                    boundary_dof_type[dof] = self.BC_TYPE_OTHER
                continue
            else:
                self._warn(
                    'WARNING: Slope limiter found no BC for field %s '
                    'in region %s' % (fname, boundary_region.name)
                )
                continue

            if bc_type == self.BC_TYPE_DIRICHLET and weak_dof_values is not None:
                # Take values from a field which has (weak) Dirichlet BCs applied
                for dof in dofs:
                    boundary_dof_type[dof] = bc_type
                    boundary_dof_value[dof] = weak_dof_values[dof]

            elif isinstance(value, Constant):
                # Get constant value
                val = value.values()
                assert val.size == 1
                val = val[0]

                for dof in dofs:
                    boundary_dof_type[dof] = bc_type
                    boundary_dof_value[dof] = val

            elif hasattr(value, 'eval_cell'):
                # Get values from an Expression of some sort
                dof_to_cell = self._get_dof_to_cell_mapping()
                mesh = self.function_space.mesh()
                val = numpy.zeros(1, float)
                for dof in dofs:
                    cid, coords = dof_to_cell[dof]
                    cell = Cell(mesh, cid)
                    value.eval_cell(val, coords, cell)
                    boundary_dof_type[dof] = bc_type
                    boundary_dof_value[dof] = val[0]

            else:
                self._warn(
                    'WARNING: Field %s has unsupported limiter BC %r in '
                    'region %s' % (fname, type(value), boundary_region.name)
                )

        timer.stop()
        return boundary_dof_type, boundary_dof_value
Exemple #20
0
def main(module_name, ncases, params, petsc_params):
    '''
    Run the test case in module with ncases. Optionally store results
    in savedir. For some modules there are multiple (which) choices of 
    preconditioners.
    '''
    
    # Unpack
    for k, v in params.items(): exec(k + '=v', locals())

    RED = '\033[1;37;31m%s\033[0m'
    print RED % ('\tRunning %s with %d preconditioner' % (module_name, precond))

    module = __import__(module_name)  # no importlib in python2.7

    # Setup the MMS case
    u_true, rhs_data = module.setup_mms(eps)

    # Setup the convergence monitor
    if log:
        params = [('solver', solver), ('precond', str(precond)), ('eps', str(eps))]
        
        path = '_'.join([module_name] + ['%s=%s' % pv for pv in params])
        path = os.path.join(save_dir if save_dir else '.', path)
        path = '.'.join([path, 'txt'])
    else:
        path = ''

    memory, residuals  = [], []
    monitor = module.setup_error_monitor(u_true, memory, path=path)

    # Sometimes it is usedful to transform the solution before computing
    # the error. e.g. consider subdomains
    if hasattr(module, 'setup_transform'):
        # NOTE: transform take two args for case and the current computed
        # solution
        transform = module.setup_transform
    else:
        transform = lambda i, x: x

    print '='*79
    print '\t\t\tProblem eps = %r' % eps
    print '='*79
    for i in ncases:
        a, L, W = module.setup_problem(i, rhs_data, eps=eps)
        
        # Assemble blocks
        t = Timer('assembly'); t.start()
        AA, bb = map(ii_assemble, (a, L))
        print '\tAssembled blocks in %g s' % t.stop()

        # Check the symmetry
        wh = ii_Function(W)

        assert (AA*wh.block_vec() - (AA.T)*wh.block_vec()).norm() < 1E-10

        if solver == 'direct':
            # Turn into a (monolithic) PETScMatrix/Vector
            t = Timer('conversion'); t.start()        
            AAm, bbm = map(ii_convert, (AA, bb))
            print '\tConversion to PETScMatrix/Vector took %g s' % t.stop()

            t = Timer('solve'); t.start()
            LUSolver('umfpack').solve(AAm, wh.vector(), bbm)
            print '\tSolver took %g s' % t.stop()

            niters = 1

        if solver == 'iterative':
            # Here we define a Krylov solver using PETSc
            BB = module.setup_preconditioner(W, precond, eps=eps)

            ## AA and BB as block_mat
            ksp = PETSc.KSP().create()

            # Default is minres
            if '-ksp_type' not in petsc_params: petsc_params['-ksp_type'] = 'minres'
            
            opts = PETSc.Options()
            for key, value in petsc_params.iteritems():
                opts.setValue(key, None if value == 'none' else value)

            ksp.setOperators(ii_PETScOperator(AA))

            ksp.setNormType(PETSc.KSP.NormType.NORM_PRECONDITIONED)
            # ksp.setTolerances(rtol=1E-6, atol=None, divtol=None, max_it=300)
            ksp.setConvergenceHistory()
            # We attach the wrapped preconditioner defined by the module
            ksp.setPC(ii_PETScPreconditioner(BB, ksp))
            
            ksp.setFromOptions()
            
            print ksp.getTolerances()
            
            # Want the iterations to start from random
            wh.block_vec().randomize()

            # Solve, note the past object must be PETSc.Vec
            t = Timer('solve'); t.start()            
            ksp.solve(as_petsc_nest(bb), wh.petsc_vec())
            print '\tSolver took %g s' % t.stop()

            niters = ksp.getIterationNumber()

            residuals.append(ksp.getConvergenceHistory())
            
        # Let's check the final size of the residual
        r_norm = (bb - AA*wh.block_vec()).norm()
        # Convergence?
        monitor.send((transform(i, wh), W, niters, r_norm))
        
    # Only send the final
    if save_dir:
        path = os.path.join(save_dir, module_name)
        for i, wh_i in enumerate(wh):
            # Renaming to make it easier to save state in Visit/Pareview
            wh_i.rename('u', str(i))
            
            File('%s_%d.pvd' % (path, i)) << wh_i

    # Plot relative residual norm
    if plot:
        plt.figure()
        [plt.semilogy(res/res[0], label=str(i)) for i, res in enumerate(residuals, 1)]
        plt.legend(loc='best')
        plt.show()
s = assign_particle_values(x, psi0_expr)

p = particles(x, [s], mesh)
# Initialize advection class, use RK3 scheme
ap = advect_rk3(p, V, uh, 'closed')
# Init projection
lstsq_psi = l2projection(p, W, 1)

# Do projection to get initial field
lstsq_psi.project(psi_h.cpp_object(), lb, ub)
AD = AddDelete(p, 10, 20, [psi_h], [1], [lb, ub])

step = 0
t = 0.
area_0 = assemble(psi_h * dx)
timer = Timer()
timer.start()

outfile.write(psi_h, t)
while step < num_steps:
    step += 1
    t += float(dt)

    if comm.Get_rank() == 0:
        print("Step " + str(step))

    AD.do_sweep()
    ap.do_step(float(dt))
    AD.do_sweep_failsafe(4)

    lstsq_psi.project(psi_h.cpp_object(), lb, ub)
Exemple #22
0
    if comm.rank == 0:
        with open(fname, "a" if append else "w") as fi:
            fi.write(",".join(map(lambda v: "%.6e" % v, vals)) + "\n")


# Compute and output functionals
def output_data_step(append=False):
    urms = (1.0 / (lmbdax*lmbdaz) * assemble(dot(u_vec, u_vec) * dx)) ** 0.5
    conservation = abs(assemble(phi * dx) - conservation0)
    entrainment = assemble(1.0 / (lmbdax * lmbdaz * Constant(db)) * phi * dx(de))
    output_functionals(data_filename, [float(t), float(dt), urms, conservation, entrainment],
                       append=append)


# Initial Stokes solve
time = Timer("ZZZ Stokes assemble")
ssc.assemble_global_system(True)
del time
time = Timer("ZZZ Stokes solve")
for bc in bcs:
    ssc.apply_boundary(bc)
ssc.solve_problem(Uhbar.cpp_object(), Uh.cpp_object(), "mumps", "default")
del time

# Transfer the computed velocity function and compute functionals
velocity_assigner.assign(u_vec, Uh.sub(0))
output_data_step(append=False)

time_snap_shot_interval = 5.0
next_snap_shot_time = time_snap_shot_interval
Exemple #23
0
# This script illustrates how the Xd-1d mesh could be makde from 1d
from dolfin import Mesh, MeshFunction, Timer
from mesh_around_1d import mesh_around_1d
from meshconvert import convert2xml
import subprocess, os
import numpy as np

timer = Timer('Meshing')
timer.start()

path = 'vasc_mesh.xml.gz'
geo, _ = mesh_around_1d(path, size=1.)

data = MeshFunction('int', Mesh(path), 'widths.xml.gz')

out = subprocess.check_output(['gmsh', '--version'], stderr=subprocess.STDOUT)
assert out.split('.')[0] == '3', 'Gmsh 3+ is required'

ccall = 'gmsh -3 -optimize %s' % geo
subprocess.call(ccall, shell=True)

# Convert
xml_file = 'vasc_GMSH.xml'
msh_file = 'vacs_GMSH.msh'
convert2xml(msh_file, xml_file)

# Throw away the line function as it is expensive to store all the edges
meshXd = Mesh(xml_file)
# Make sure that the assumption of correspondence hold
assert np.linalg.norm(mesh.coordinates() -
                      meshXd.coordinates()[:mesh.num_vertices()]) < 1E-13
Exemple #24
0
 def update(self):
     '''Evaluate now (with the fields as they are at the moment)'''
     timer = Timer('update'); timer.start()
     status = [f() for f in self.eval_fields]
     info('Probe update took %g' % timer.stop())
     return all(status)
Exemple #25
0
        )

        # Initialize the l2 projection
        lstsq_psi = l2projection(p, W, property_idx)

        # Set initial condition at mesh and particles
        psi0_h.interpolate(psi0_expression)
        p.interpolate(psi0_h.cpp_object(), property_idx)

        # Initialize add/delete particle
        AD = AddDelete(p, 15, 25, [psi0_h])

        step = 0
        t = 0.0
        area_0 = assemble(psi0_h * dx)
        timer = Timer()

        timer.start()
        while step < num_steps:
            step += 1
            t += float(dt)

            if comm.rank == 0:
                print("Step " + str(step))

            # Advect particle, assemble and solve pde projection
            t1 = Timer("[P] Advect particles step")
            AD.do_sweep()
            ap.do_step(float(dt))
            AD.do_sweep_failsafe(4)
            del t1
Exemple #26
0
    comm = mpi_comm_world()
    h5 = HDF5File(comm, args.tile, 'r')
    tile = Mesh()
    h5.read(tile, 'mesh', False)

    data = {}
    cell_dim = tile.topology().dim()
    facet_dim = cell_dim - 1

    if args.facet_tags: 
        data = load_data(tile, h5, args.facet_tags, facet_dim, data)
    
    if args.cell_tags: 
        data = load_data(tile, h5, args.cell_tags, cell_dim, data)

    t = Timer('tile')
    mesh, mesh_data = TileMesh(tile, shape, mesh_data=data)
    info('\nTiling took %g s; nvertices %d, ncells %d' % (t.stop(),
                                                          mesh.num_vertices(),
                                                          mesh.num_cells()))

    # Saving
    t = Timer('save')
    h5_file = '%s_%d_%d.h5' % (root, shape[0], shape[1])
        
    out = HDF5File(mesh.mpi_comm(), h5_file, 'w')
    out.write(mesh, 'mesh')
    
    tt = Timer('data')
    # To mesh functions
    if mesh_data:
Exemple #27
0
    x[:] *= args.scale_x

    # Did it work?
    print(dx, 'vs', x.max(axis=0) - x.min(axis=0), xmin, x.min(axis=0))

    data = {}
    cell_dim = tile.topology().dim()
    facet_dim = cell_dim - 1

    if args.facet_tags:
        data = load_data(tile, (h5, args.facet_tags), facet_dim, data)

    if args.cell_tags:
        data = load_data(tile, (h5, args.cell_tags), cell_dim, data)

    t = Timer('tile')
    mesh, mesh_data = TileMesh(tile, shape, mesh_data=data)
    info('\nTiling took %g s; nvertices %d, nfacets %d, ncells %d' %
         (t.stop(), mesh.num_vertices(), mesh.init(2), mesh.num_cells()))

    x_ = mesh.coordinates()
    print('Final mesh size', x_.min(axis=0), x_.max(axis=0) - x_.min(axis=0))

    # Saving
    t = Timer('save')
    h5_file = '%s_%d_%d.h5' % (root, shape[0], shape[1])

    out = HDF5File(mesh.mpi_comm(), h5_file, 'w')
    out.write(mesh, 'mesh')

    tt = Timer('data')
Exemple #28
0
    forms_stokes["A_S"],
    forms_stokes["G_S"],
    forms_stokes["B_S"],
    forms_stokes["Q_S"],
    forms_stokes["S_S"],
)

ex = as_vector((1.0, 0.0))
ey = as_vector((0.0, 1.0))

# Prepare time stepping loop
num_steps = np.rint(Tend / float(dt))
step = 0
t = 0.0

timer = Timer()
timer.start()

while step < num_steps:
    step += 1
    t += float(dt)

    if comm.Get_rank() == 0:
        print("Step number " + str(step))

    t1 = Timer("[P] Sweep and step")
    # Limit number of particles
    AD.do_sweep()

    # Advect particles
    ap.do_step(float(dt))
Exemple #29
0
            forms_pde["R_a"],
            forms_pde["S_a"],
            [],
            property_idx,
        )

        # Set initial condition at mesh and particles
        psi0_h.interpolate(psi0_expression)
        p.interpolate(psi0_h.cpp_object(), property_idx)

        # Add/Delete particles
        AD = AddDelete(p, 10, 20, [psi0_h])

        step = 0
        area_0 = assemble(psi0_h * dx)
        timer = Timer()

        timer.start()
        while step < num_steps:
            step += 1

            # Add/delete particles, must be done before advection!
            AD.do_sweep()
            # Advect particle, assemble and solve pde projection
            ap.do_step(float(dt))

            pde_projection.assemble(True, True)
            pde_projection.solve_problem(psibar_h.cpp_object(),
                                         psi_h.cpp_object(), "gmres",
                                         "hypre_amg")
            # Update old solution
Exemple #30
0
def create_multicable(cable_pos, res=0.1):
    geo = pygmsh.built_in.geometry.Geometry()
    num_cables = int(len(cable_pos) / 2)
    metal_circles = []
    rubber_circles = []
    for i in range(num_cables):
        metal_circles.append(
            geo.add_circle((cable_pos[2 * i], cable_pos[2 * i + 1], 0),
                           metal_radius,
                           lcar=res / 4))
        rubber_circles.append(
            geo.add_circle((cable_pos[2 * i], cable_pos[2 * i + 1], 0),
                           rubber_radius,
                           lcar=res / 2,
                           holes=[metal_circles[i]]))
        geo.add_physical_surface(metal_circles[i].plane_surface,
                                 label=int(metal_marker + i))
        geo.add_physical_surface(rubber_circles[i].plane_surface,
                                 label=int(rubber_marker + i))
        geo.add_physical_line(metal_circles[i].line_loop.lines,
                              label=int(metaliso + i))
        geo.add_physical_line(rubber_circles[i].line_loop.lines,
                              label=int(isofill + i))

    fill_circle = geo.add_circle((0, 0, 0),
                                 outer_radius,
                                 lcar=res,
                                 holes=metal_circles + rubber_circles)

    geo.add_physical_surface(fill_circle.plane_surface, label=fill_marker)

    geo.add_physical_line(fill_circle.line_loop.lines, label=ext)

    with Timer("USER_TIMING: Generate mesh") as t:
        (points, cells, point_data,
         cell_data, field_data) = pygmsh.generate_mesh(geo,
                                                       prune_z_0=True,
                                                       verbose=True)  #,
        # geo_filename="mesh.geo")

    meshio.write(
        "multicable.xdmf",
        meshio.Mesh(points=points, cells={"triangle": cells["triangle"]}))
    meshio.write(
        "mf.xdmf",
        meshio.Mesh(points=points,
                    cells={"line": cells["line"]},
                    cell_data={
                        "line": {
                            "name_to_read": cell_data["line"]["gmsh:physical"]
                        }
                    }))
    meshio.write(
        "cf.xdmf",
        meshio.Mesh(points=points,
                    cells={"triangle": cells["triangle"]},
                    cell_data={
                        "triangle": {
                            "name_to_read":
                            cell_data["triangle"]["gmsh:physical"]
                        }
                    }))
            forms_pde["R_a"],
            forms_pde["S_a"],
            [],
            property_idx,
        )

        # Initialize the l2 projection
        lstsq_psi = l2projection(p, W, property_idx)

        # Set initial condition at mesh and particles
        psi0_h.interpolate(psi0_expression)
        p.interpolate(psi0_h, property_idx)

        step = 0
        area_0 = assemble(psi0_h * dx)
        timer = Timer("[P] Advection loop")
        timer.start()
        while step < num_steps:
            step += 1

            # Advect particle, assemble and solve pde projection
            t1 = Timer("[P] Advect particles step")
            ap.do_step(float(dt))
            del t1

            if projection_type == "PDE":
                t1 = Timer("[P] Assemble PDE system")
                pde_projection.assemble(True, True)
                del t1
                t1 = Timer("[P] Solve projection")
                pde_projection.solve_problem(psibar_h, psi_h, "superlu_dist", "default")
    def solve(self):
        """
        Solve optmal control problem

        """

        msg = "You need to build the problem before solving it"
        assert hasattr(self, "opt_type"), msg

        module, method = self.opt_type.split("_")

        logger.info("\n" + "Starting optimization".center(100, "-"))
        logger.info(
            "Scale: {}, \nDerivative Scale: {}".format(
                self.rd.scale, self.rd.derivative_scale
            )
        )
        logger.info(
            "Tolerace: {}, \nMaximum iterations: {}\n".format(self.tol, self.max_iter)
        )

        t = Timer()
        t.start()

        if self.oneD:

            res = minimize_1d(self.rd, self.x[0], **self.options)
            x = res["x"]

        else:

            if module == "scipy":
                res = scipy_minimize(self.rd, self.x, **self.options)
                x = res["x"]

            elif module == "pyOpt":

                obj, x, d = self.problem(**self.options)

            elif module == "moola":

                sol = self.solver.solve()
                x = sol["control"].data

            elif module == "ipopt":
                x = self.solver.solve(self.x)

            else:
                msg = (
                    "Unknown optimizatin type {}. "
                    "Define the optimization type as 'module-method', "
                    "where module is e.g scipy, pyOpt and methos is "
                    "eg slsqp."
                )
                raise ValueError(msg)

        run_time = t.stop()

        opt_result = {}

        opt_result["x"] = x
        opt_result["nfev"] = self.rd.iter
        opt_result["nit"] = self.rd.iter
        opt_result["njev"] = self.rd.nr_der_calls
        opt_result["ncrash"] = self.rd.nr_crashes
        opt_result["run_time"] = run_time
        opt_result["controls"] = self.rd.controls_lst
        opt_result["func_vals"] = self.rd.func_values_lst
        opt_result["forward_times"] = self.rd.forward_times
        opt_result["backward_times"] = self.rd.backward_times
        opt_result["grad_norm"] = self.rd.grad_norm

        return self.rd, opt_result
Exemple #33
0
from dolfin import UnitSquareMesh, Timer
import matplotlib.pyplot as plt
import numpy as np
from tieler import TileMesh

tile = UnitSquareMesh(1, 1)

ns = [128, 256, 1024, 2048, 4096]
dts = []
for n in ns:
    shape = (n + 1, n - 1)  # To get odd as well

    t = Timer('s')
    mesh, mesh_data = TileMesh(tile, shape, mesh_data={})
    # Get tile for mesh write as well
    dts.append(t.stop())
    print mesh.num_cells()

a, b = np.polyfit(np.log(ns), np.log(dts), 1)

ns, dts = map(np.array, (ns, dts))

plt.figure()
plt.loglog(ns, dts, basex=2., basey=2., marker='x')
plt.loglog(ns,
           np.exp(b) * ns**a,
           basex=2.,
           basey=2.,
           linestyle='dashed',
           label='O(N^%.2f)' % a)
plt.ylabel('T')
Exemple #34
0
    def init_pcd(self, pcd_assembler, pcd_pc_class=None):
        """Initialize from ``PCDAssembler`` instance. Needs to be called
        after ``setOperators`` and ``setUp``. That's why two-phase
        initialization is needed: first ``__init__``, then ``init_pcd``

        Note that this function automatically calls setFromOptions to
        all subKSP objects.
        """

        # Get subfield index sets
        V = pcd_assembler.function_space()
        is0 = dofmap_dofs_is(V.sub(0).dofmap())
        is1 = dofmap_dofs_is(V.sub(1).dofmap())

        assert self.comm == V.mesh().mpi_comm(), "Non-matching MPI comm"

        # Set subfields index sets
        # NOTE: Doing only so late here so that user has a chance to
        # set options prefix, see PETSc issue #160
        self.pc.setFieldSplitIS(["u", is0], ["p", is1])

        # From now on forbid setting options prefix (at least from Python)
        self.setOptionsPrefix = self._forbid_setOptionsPrefix

        # Setup fieldsplit preconditioner
        pc_prefix = self.pc.getOptionsPrefix() or ""
        with Timer("FENaPack: PCDKSP PC {} setup".format(pc_prefix)):
            self.pc.setUp()

        # Extract fieldsplit subKSPs (only once self.pc is set up)
        ksp0, ksp1 = self.pc.getFieldSplitSubKSP()

        # Set some sensible defaults
        ksp0.setType(PETSc.KSP.Type.PREONLY)
        ksp0.pc.setType(PETSc.PC.Type.LU)
        ksp0.pc.setFactorSolverPackage(
            get_default_factor_solver_package(self.comm))
        ksp1.setType(PETSc.KSP.Type.PREONLY)
        ksp1.pc.setType(PETSc.PC.Type.PYTHON)

        # Setup 0,0-block pc so that we have accurate timing
        ksp0_prefix = ksp0.getOptionsPrefix()
        with Timer("FENaPack: {} setup".format(ksp0_prefix)):
            ksp0.setFromOptions()  # Override defaults above by user's options
            ksp0.pc.setUp()

        # Initialize PCD PC context
        pcd_pc_prefix = ksp1.pc.getOptionsPrefix()
        pcd_pc_opt = PETSc.Options(pcd_pc_prefix).getString(
            "pc_python_type", "")
        # Use PCDPC class given by option
        if pcd_pc_opt != "":
            ksp1.setFromOptions()  # Override defaults above by user's options
            pcd_pc = ksp1.pc.getPythonContext()
        # Use PCDPC class specified as argument
        elif pcd_pc_class is not None:
            pcd_pc = pcd_pc_class()
            ksp1.pc.setPythonContext(pcd_pc)
            ksp1.setFromOptions()  # Override defaults above by user's options
        # Use default PCDPC class
        else:
            pcd_pc = PCDPC_BRM1()
            ksp1.pc.setPythonContext(pcd_pc)
            ksp1.setFromOptions()  # Override defaults above by user's options

        # Get backend implementation of PCDAssembler
        # FIXME: Make me parameter
        #deep_submats = False
        deep_submats = True
        A = self.getOperators()[0]
        pcd_interface = PCDInterface(pcd_assembler,
                                     A,
                                     is0,
                                     is1,
                                     deep_submats=deep_submats)

        # Provide assembling routines to PCD
        try:
            pcd_pc.init_pcd(pcd_interface)
        except Exception:
            print("Initialization of PCD PC from PCDAssembler failed!")
            print("Maybe wrong PCD PC class or PCDAssembler instance.")
            raise

        # Setup PCD PC so that we have accurate timing
        with Timer("FENaPack: {} setup".format(pcd_pc_prefix)):
            ksp1.pc.setUp()