コード例 #1
0
ファイル: cpp_expression.py プロジェクト: jakobes/Ocellaris
def ocellaris_interpolate(simulation,
                          cpp_code,
                          description,
                          V,
                          function=None,
                          params=None):
    """
    Create a C++ expression with parameters like time and all scalars in
    simulation.data available (nu and rho for single phase simulations)

    Interpolate the expression into a dolfin.Function. The results can be
    returned in a provided function, or a new function will be returned
    """
    # Compile the C++ code
    with dolfin.Timer('Ocellaris make expression'):
        expr = make_expression(simulation,
                               cpp_code,
                               description,
                               element=V.ufl_element(),
                               params=params)

    if function is None:
        function = dolfin.Function(V)
    else:
        Vf = function.function_space()
        if (not Vf.ufl_element().family() == V.ufl_element().family()
                and Vf.dim() == V.dim()):
            ocellaris_error(
                'Error in ocellaris_interpolate',
                'Provided function is not in the specified function space V',
            )

    with dolfin.Timer('Ocellaris interpolate expression'):
        return function.interpolate(expr)
コード例 #2
0
ファイル: finfet-levels.py プロジェクト: jhwnkim/nanopores
def solve(h):
    # --- create mesh and geometrical/physical context information
    tic()
    t = dolfin.Timer("mesh")
    geo = finfet.create_geometry(lc=h)
    print "Number of elements:", geo.mesh.num_cells()
    print "Number of vertices:", geo.mesh.num_vertices()
    #finfet.plot()
    phys = nanopores.Physics("finfet",
                             geo,
                             dopants=dopants(Ndop),
                             vD=None,
                             vG=0.,
                             vS=None)
    #phys.add_dopants
    t.stop()

    # --- definition and solution of PDE
    t = dolfin.Timer("init")
    pde = nanopores.NonstandardPB(geo, phys)
    pde.tolnewton = 1e-5
    pde.newtondamp = 1.
    t.stop()
    t = dolfin.Timer("solve")
    pde.solve()
    t.stop()
    u = pde.solution
    return u, toc()
コード例 #3
0
    def step(self, t0: float, t1: float) -> None:
        """Solve on the given time step (t0, t1).

        *Arguments*
          interval (:py:class:`tuple`)
            The time interval (t0, t1) for the step

        *Invariants*
          Assuming that v\_ is in the correct state for t0, gives
          self.v in correct state at t1.
        """
        timer = df.Timer("PDE Step")

        # Extract interval and thus time-step
        dt = t1 - t0
        theta = self.parameters["theta"]
        t = t0 + theta * dt
        self.time.assign(t)

        # Update matrix and linear solvers etc as needed
        self._update_solver(dt)

        # Assemble right-hand-side
        timer0 = df.Timer("Assemble rhs")
        df.assemble(self._rhs, tensor=self._rhs_vector)
        del timer0

        # Solve problem
        self.linear_solver.solve(self.v.vector(), self._rhs_vector)
        timer.stop()
コード例 #4
0
 def step(self, u, dt):
     'Move particles by forward Euler x += u*dt'
     start = df.Timer('shift')
     for cwp in self.particle_map.itervalues():
         # Restrict once per cell
         u.restrict(self.coefficients,
                    self.element,
                    cwp,
                    cwp.get_vertex_coordinates(),
                    cwp)
         for particle in cwp.particles:
             x = particle.position
             # Compute velocity at position x
             self.element.evaluate_basis_all(self.basis_matrix,
                                             x,
                                             cwp.get_vertex_coordinates(),
                                             cwp.orientation())
             x[:] = x[:] + dt*np.dot(self.coefficients, self.basis_matrix)[:]
     # Recompute the map
     stop_shift = start.stop()
     start =df.Timer('relocate')
     info = self.relocate()
     stop_reloc = start.stop()
     # We return computation time per process
     return (stop_shift, stop_reloc)
コード例 #5
0
def solve(w_, t, dt, q_rhs, solvers, enable_EC, enable_NS,
          use_iterative_solvers, bcs,
          **namespace):
    """ Solve equations. """
    # Update the time-dependent source terms
    # Update the time-dependent source terms
    for qi in q_rhs.values():
        qi.t = t+dt
    # Update the time-dependent boundary conditions
    for boundary_name, bcs_fields in bcs.iteritems():
        for field, bc in bcs_fields.iteritems():
            if isinstance(bc.value, df.Expression):
                bc.value.t = t+dt

    timer_outer = df.Timer("Solve system")
    for subproblem, enable in zip(["EC", "NS"], [enable_EC, enable_NS]):
        if enable:
            timer_inner = df.Timer("Solve subproblem " + subproblem)
            df.mpi_comm_world().barrier()
            if subproblem == "NS" and use_iterative_solvers:
                solver, a, L, bcs = solvers[subproblem]
                A = df.assemble(a)
                b = df.assemble(L)
                for bc in bcs:
                    bc.apply(A)
                    bc.apply(b)
                solver.set_operator(A)
                solver.solve(w_["NS"].vector(), b)
            else:
                solvers[subproblem].solve()
            timer_inner.stop()

    timer_outer.stop()
コード例 #6
0
def solve(tstep, w_, w_1, solvers, enable_EC, enable_NS, **namespace):
    """ Solve equations. """
    timer_outer = df.Timer("Solve system")
    if enable_EC:
        timer_inner = df.Timer("Solve subproblem EC")
        df.mpi_comm_world().barrier()
        solvers["EC"].solve()
        timer_inner.stop()
    if enable_NS:
        # Step 1: Predict u
        timer = df.Timer("NS: Predict velocity.")
        solvers["NSu"]["predict"].solve()
        timer.stop()

        # Step 2: Pressure correction
        timer = df.Timer("NS: Pressure correction")
        solvers["NSp"].solve()
        timer.stop()

        # Step 3: Velocity correction
        timer = df.Timer("NS: Velocity correction")
        solvers["NSu"]["correct"].solve()
        timer.stop()

    timer_outer.stop()
コード例 #7
0
ファイル: custom_linear.py プロジェクト: NREL/pfibs
    def init_solver_options(self):
 
        ## Start the timer ##
        timer = df.Timer("pFibs: Setup Solver Options")

        if self.log_level >= 1:
            timer_fieldsplit = df.Timer("pFibs: Setup Solver Options - set_fieldsplit")
        ## Manually construct fieldsplit if more than one field detected ##
        if not self.solver and self.split_0 != "":
            self._set_fieldsplit(self.options_prefix,self.split_0,self.ksp(),True)
        
        ## Define fieldsplit if no split was called ##
        elif self.split_0 == "" and self.num_fields > 1 and not self.solver:
            self.vbp.split('s',list(self.block_field.keys()))
            self.split_0 = self.vbp.split_0
            self._set_fieldsplit(self.options_prefix,self.split_0,self.ksp(),True)
        
        ## Or setup all solver and fieldsplit options via solver dict ##
        elif self.solver:
            self._set_petsc_options(self.options_prefix,self.solver)
        if self.log_level >= 1:
            timer_fieldsplit.stop()

        if self.log_level >= 1:
            timer1 = df.Timer("pFibs: Setup Solver Options - setup PETSC commandline options")

        ## Set PETSc commandline options ##
        self.ksp().setFromOptions()
        self.ksp().setUp()

        if self.log_level >= 1:
            timer1.stop()

        ## Stop the timer ##
        timer.stop()
コード例 #8
0
    def fixedpoint(self, tol=None, damp=None):
        if tol is None: tol = self.params["tolnewton"]
        if damp is None: damp = self.params["damp"]
        imax = self.params["ipicard"]
        inewton = self.params["inewton"]
        nverbose = self.params["nverbose"]
        verbose = self.params["verbose"]
        ntol = tol

        times = {name : 0. for name in self.solvers}
        tcum = 0.
        U = self.coupled.solutions
        Uold = self.coupled.oldsolutions
        self.converged = False

        for i in range(1, imax+1):
            if verbose:
                print("\n-- Fixed-Point Loop %d of max. %d" % (i, imax))
            tloop = dolfin.Timer("loop")
            for name, solver in self.solvers.items():
                if verbose:
                    print("    Solving %s." % name)
                t = dolfin.Timer(name)
                if solver.is_linear:
                    solver.solve()
                else:
                    for _ in newtoniteration(
                        solver, ntol, damp, inewton, nverbose):
                        pass
                times[name] += t.stop()
            # cumulative time
            tcum += tloop.stop()

            yield i

            # calculate the error
            errors = [(name, error(U[name], Uold[name])) for name in U]
            if verbose:
                for item in errors: print("    error %s: %s" % item)
            err = sum(err for _, err in errors)/len(errors)

            # check for stopping
            if err < tol:
                self.converged = True
                if verbose:
                    print("- Break at iteration %d because err %.3g < tol %.3g" %(i, err, tol))
                break

            self.save_estimate("err hybrid i", err, N=i)
            self.save_estimate("err hybrid time", err, N=tcum)
            self.coupled.update_uold()
        else:
            raise Exception("Error: Fixed-Point Loop did not converge.")

        Tt = sum(times.values())
        if verbose:
            print("\n CPU Time (solve): %.2f s" % Tt)
            for name in self.solvers:
                print("  -) %s: %.2f s" %(name, times[name]))
コード例 #9
0
    def step(self, u, dt):
        'Move particles by forward Euler x += u*dt'
        start = df.Timer('shift')

        #print self.particle_map.total_number_of_particles()

        for cwp in self.particle_map.itervalues():

            # Restrict once per cell

            u.restrict(self.coefficients, self.element, cwp,
                       cwp.get_vertex_coordinates(), cwp)

            for particle in cwp.particles:

                x = particle.position
                self.element.evaluate_basis_all(self.basis_matrix, x,
                                                cwp.get_vertex_coordinates(),
                                                cwp.orientation())

                u_f = np.dot(self.coefficients, self.basis_matrix)[:]
                u_p = particle.properties['u_p']
                particle.properties['u_1'] = u_f
                particle.properties['x1'] = 1 * x[:]
                particle.properties['up1'] = 1 * u_p

                forces = self.g

                u_new = self.drag_model_1(u_p, u_f, u_f, forces, dt)
                x[:] = x[:] + dt * 0.5 * (u_new + u_p[:])

                self.element.evaluate_basis_all(self.basis_matrix, x,
                                                cwp.get_vertex_coordinates(),
                                                cwp.orientation())
                # Fluid velocity
                u_f = np.dot(self.coefficients, self.basis_matrix)[:]
                u_p = particle.properties['u_p']
                u_f_1 = particle.properties['u_1']
                x1 = particle.properties['x1']

                forces = self.g
                u_new = self.drag_model_2(u_p, u_f, u_f_1, forces, dt)
                x[:] = x1[:] + dt * 0.5 * (u_new[:] + u_p[:])
                particle.properties['u_p'] = u_new[:]

                #cell_index, distance = self.boundary_tree.compute_closest_entity(df.Point(x) )
                #particle.properties['dist'] = distance

                #if u_p[0] > 0.2 + 1e-8:

                #    return 1

        # Recompute the map
        stop_shift = start.stop()
        start = df.Timer('relocate')
        info = self.relocate()
        stop_reloc = start.stop()
コード例 #10
0
    def setup_fields(self):

        if self.log_level >= 1:
            timer = df.Timer("pFibs: Setup fields")

        ## Default if empty ##
        if not self.block_field:
            for i in range(self.V.num_sub_spaces()):
                self.block_field.update({i: [i, i, {}]})
            self.num_fields = len(self.block_field)
        if self.num_fields == 0:
            self.num_fields += 1
        ## Create PetscSection ##
        self.section = PETSc.Section().create()
        self.section.setNumFields(self.num_fields)
        self.section.setChart(0, len(self.V.dofmap().dofs()))

        if self.log_level >= 2:
            timer_iterBlockFields = df.Timer(
                "pFibs: Setup fields - Iterate through block fields")

        ## Iterate through all the block fields ##
        for key in self.block_field:
            self.section.setFieldName(self.block_field[key][0], str(key))

            ## Extract dofs ##
            (dofs, ndof) = self.extract_dofs(key)

            ## Record dof count for each field ##
            self.field_size.update({self.block_field[key][0]: ndof})

            if self.log_level >= 3:
                timer_assignDof = df.Timer(
                    "pFibs: Setup fields - Iterate through block fields - assign dof"
                )

            assign_dof(self.section, dofs, self.goffset,
                       self.block_field[key][0])

            if self.log_level >= 3:
                timer_assignDof.stop()

        if self.log_level >= 2:
            timer_iterBlockFields.stop()

        ## Create DM and assign PetscSection ##
        self.section.setUp()
        self.dm = PETSc.DMShell().create()
        self.dm.setDefaultSection(self.section)
        self.dm.setUp()

        ## Prevent any further modification to block_field ##
        self.finalize_field = True

        if self.log_level >= 1:
            timer.stop()
コード例 #11
0
def calculate(**params):
    # this time we do it the simple way: just pass every parameter
    # have to be careful though
    globals().update(params)
    params.update(_globals())

    # use some of the parameters
    params["x0"] = [r0, 0., z0]
    params["l3"] = l3*domscale
    params["R"] = R*domscale
    # TODO does this something?
    nanopores.IllposedNonlinearSolver.newtondamp = newtondamp
    nanopores.PNPS.tolnewton = tolnewton

    t = dolfin.Timer("meshing")
    geo = nanopores.geo_from_xml_threadsafe(geo_name, **params)
    print "Mesh generation time:",t.stop()

    #dolfin.plot(geo.submesh("solid"), interactive=True)
    phys = nanopores.Physics(phys_name, geo, **params)

    t = dolfin.Timer("PNPS")
    pnps = nanopores.PNPS(geo, phys)
    if skip_stokes:
        pnps.solvers.pop("Stokes")
    pnps.alwaysstokes = True
    pnps.solve()
    print "Time to calculate F:",t.stop()
    #pnps.visualize("fluid")

    (v, cp, cm, u, p) = pnps.solutions(deepcopy=True)
    # F = phys.Feff(v, u)
    # def avg(u, dx):
    #     return dolfin.assemble(u*dx)/dolfin.assemble(dolfin.Constant(1.)*dx)

    Jcomp = ["Jzdiff", "Jzdrift", "Jzstokes"]
    lPore = geo.params["ltop"]+geo.params["lctr"]+geo.params["lbtm"]
    Jzdiff = dolfin.Constant((1.0/phys.lscale)**2) * phys.cFarad*phys.D*phys.rDPore*phys.grad(-cp+cm)[2] /lPore * geo.dx("pore")
    Jzdrift = dolfin.Constant((1.0/phys.lscale)**2) * phys.cFarad*phys.mu*phys.rDPore*(-cp-cm)*phys.grad(v)[2]/lPore * geo.dx("pore")
    Jzstokes = dolfin.Constant((1.0/phys.lscale)**2) * phys.cFarad*phys.stokesdampPore*(cp-cm)*u[2]/lPore * geo.dx("pore")

    Jcomponents = [j+p for j in Jcomp for p in ["top","btm"]]
    Jzdifftop = dolfin.Constant((1.0/phys.lscale)**2) * phys.cFarad*phys.D*phys.rDPore*phys.grad(-cp+cm)[2] /geo.params["ltop"] * geo.dx("poretop")
    Jzdrifttop = dolfin.Constant((1.0/phys.lscale)**2) * phys.cFarad*phys.mu*phys.rDPore*(-cp-cm)*phys.grad(v)[2]/geo.params["ltop"] * geo.dx("poretop")
    Jzstokestop = dolfin.Constant((1.0/phys.lscale)**2) * phys.cFarad*phys.stokesdampPore*(cp-cm)*u[2]/geo.params["ltop"] * geo.dx("poretop")
    Jzdiffbtm = dolfin.Constant((1.0/phys.lscale)**2) * phys.cFarad*phys.D*phys.rDPore*phys.grad(-cp+cm)[2] /geo.params["lbtm"] * geo.dx("porebottom")
    Jzdriftbtm = dolfin.Constant((1.0/phys.lscale)**2) * phys.cFarad*phys.mu*phys.rDPore*(-cp-cm)*phys.grad(v)[2]/geo.params["lbtm"] * geo.dx("porebottom")
    Jzstokesbtm = dolfin.Constant((1.0/phys.lscale)**2) * phys.cFarad*phys.stokesdampPore*(cp-cm)*u[2]/geo.params["lbtm"] * geo.dx("porebottom")

    result = pnps.get_functionals()

    for j in Jcomp+Jcomponents:
        result.update({j: 1e12*dolfin.assemble(locals()[j])})

    return result
コード例 #12
0
def calculate(**params):
    # this time we do it the simple way: just pass every parameter
    # have to be careful though
    globals().update(params)
    params.update(_globals())

    # use some of the parameters
    params["x0"] = [r0, 0., z0]
    params["l3"] = l3 * domscale
    params["R"] = R * domscale
    # does this something?
    nanopores.IllposedNonlinearSolver.newtondamp = newtondamp
    nanopores.PNPS.tolnewton = tolnewton

    t = dolfin.Timer("meshing")
    geo = nanopores.geo_from_xml_threadsafe(geo_name, **params)
    print "Mesh generation time:", t.stop()

    #dolfin.plot(geo.submesh("solid"), interactive=True)
    phys = nanopores.Physics(phys_name, geo, **params)

    t = dolfin.Timer("PNPS")
    pnps = nanopores.PNPS(geo, phys)
    if skip_stokes:
        pnps.solvers.pop("Stokes")
    pnps.solve()
    print "Time to calculate F:", t.stop()
    #pnps.visualize("fluid")

    (v, cp, cm, u, p) = pnps.solutions(deepcopy=True)
    F = phys.Feff(v, u)

    def avg(u, dx):
        return dolfin.assemble(u * dx) / dolfin.assemble(
            dolfin.Constant(1.) * dx)

    t = dolfin.Timer("SE")
    nanopores.SurvivalProblem.method["iterative"] = iterative
    steadysurv = nanopores.LinearPDE(geo,
                                     nanopores.SurvivalProblem,
                                     phys,
                                     F=F,
                                     goodexit=goodexit,
                                     badexit=badexit)
    steadysurv.solve(verbose=False)
    print "Time to solve SE problem:", t.stop()
    #steadysurv.visualize("exittime")
    psteady = steadysurv.solution

    result = {}
    for domain in ["poretop", "porecenter", "fluid_bulk_top"]:
        result["SER_%s" % domain] = 100. * avg(psteady, geo.dx(domain))
    result["SER_molecule"] = 100. * avg(psteady, geo.dS("moleculeb"))

    return result
コード例 #13
0
ファイル: basic.py プロジェクト: mstiegl/BERNAISE
def solve(w_, solvers, enable_PF, enable_EC, enable_NS, **namespace):
    """ Solve equations. """
    timer_outer = df.Timer("Solve system")
    for subproblem, enable in zip(["PF", "EC", "NS"],
                                  [enable_PF, enable_EC, enable_NS]):
        if enable:
            timer_inner = df.Timer("Solve subproblem " + subproblem)
            mpi_barrier()
            solvers[subproblem].solve()
            timer_inner.stop()

    timer_outer.stop()
コード例 #14
0
ファイル: custom_linear.py プロジェクト: NREL/pfibs
    def _extract_IS(self,sub_field_array,full_field_array):
        if self.log_level >= 3:
            timer = df.Timer("pFibs: Setup Solver Options - set_fieldsplit - create PCFieldSplit - extract_IS")
        num_sub_fields = len(sub_field_array)
        sub_field_indx = np.zeros((num_sub_fields,),dtype=np.int32)
        ISarray = np.array([],dtype=np.int32)
        ISsplit = np.zeros(num_sub_fields-1,dtype=np.int32)
        total_field_indx = 0

        if self.log_level >= 4:
            timer_allocNParrays = df.Timer("pFibs: Setup Solver Options - set_fieldsplit - create PCFieldSplit - extract_IS - allocate numpy arrays")
        ## Allocate the numpy arrays ##
        for i in range(num_sub_fields):
            dof_sum = 0
            if isinstance(sub_field_array[i][1],int):
                dof_sum = self.field_size[sub_field_array[i][1]]
            else:
                for field in sub_field_array[i][1]:
                    dof_sum += self.field_size[field]
            ISarray = np.append(ISarray,np.zeros(dof_sum,dtype=np.int32))
            if i == 0:
                ISsplit[0] = dof_sum
            elif i < num_sub_fields - 1:
                ISsplit[i] = dof_sum + ISsplit[i-1]
        ISarray = np.split(ISarray,ISsplit)
        if self.log_level >= 4:
            timer_allocNParrays.stop()

        if self.log_level >= 4:
            timer_iterSecChart = df.Timer("pFibs: Setup Solver Options - set_fieldsplit - create PCFieldSplit - extract_IS - iterate through Section Chart")
        total_field_indx = iterate_section(self.section, full_field_array, num_sub_fields, sub_field_array, total_field_indx, ISarray, sub_field_indx)
        if self.log_level >= 4:
            timer_iterSecChart.stop()

        if self.log_level >= 4:
            timer_PETScIS = df.Timer("pFibs: Setup Solver Options - set_fieldsplit - create PCFieldSplit - extract_IS - create PETSc IS")
        ## Global offsets communicated via MPI_Scan ##
        goffset = comm.scan(total_field_indx) - total_field_indx

        # Create PETSc IS #
        agg_IS = []
        for i in range(num_sub_fields):
            ISset = np.add(ISarray[i],goffset)
            agg_IS.append((sub_field_array[i][0],PETSc.IS().createGeneral([ISset])))

        if self.log_level >= 4:
            timer_PETScIS.stop()

        if self.log_level >= 3:
            timer.stop()
        return agg_IS
コード例 #15
0
    def field(self, *args, **kwargs):

        if self.log_level >= 1:
            timer = df.Timer("pFibs: Add block problem field")

        ## Check if splits already defined ##
        if self.finalize_field:
            raise RuntimeError(
                "Cannot add anymore fields after split has been called")

        ## Required input ##
        field_name = args[0]
        field_indx = args[1]

        ## Optional solver parameters ##
        solver_params = kwargs.get("solver", {})

        ## Check types ##
        if not isinstance(field_name, str):
            raise TypeError("Field name must be of type str")
        if not isinstance(solver_params, dict):
            raise TypeError("Solver parameters must be of type dict")

        ## Add to dictionary ##
        self.block_field.update(
            {field_name: [self.num_fields, field_indx, solver_params]})
        self.num_fields += 1

        if self.log_level >= 1:
            timer.stop()
コード例 #16
0
    def update(self, dt, velocity):
        """
        Update the values of the blending function beta at the facets
        according to the HRIC algorithm. Several versions of HRIC
        are implemented
        """
        degree_b = self.blending_function.ufl_element().degree()
        degree_u = velocity[0].ufl_element().degree()
        assert degree_b == 0, (
            'Only facetwise constant blending factors are supported! Got order %d'
            % degree_b
        )
        assert degree_u == 0, (
            'VelocityDGT0Projector must be enabled! Got order %d' % degree_u
        )

        # Check that the input is supported by the C++ code
        degree_a = self.alpha_function.ufl_element().degree()
        if degree_a != 0:
            ocellaris_error(
                'HRIC scalar field order must be 0',
                'HRIC implementation does not support order %d fields' % degree_a,
            )

        with dolfin.Timer('Ocellaris update HRIC'):
            if self.use_cpp:
                Co_max = self.update_cpp(dt, velocity)
            else:
                Co_max = self.update_python(dt, velocity)

        Co_max = dolfin.MPI.max(self.mesh.mpi_comm(), Co_max)
        self.simulation.reporting.report_timestep_value('Cof_max', Co_max)
コード例 #17
0
ファイル: pcd.py プロジェクト: NREL/pfibs
    def apply(self, pc, x, y):
        ## Start the timer ##
        timer = df.Timer("pFibs PythonPC: Apply Preconditioner")

        ## Get working vectors ##
        z = x.duplicate()
        x.copy(result=z)

        ## Apply the boundary conditions to the RHS ##
        z[self.bc_dofs] = self.bc_value

        ## Perform: y = K_submat^{-1} z ##
        self.K_ksp.solve(z, y) # 

        ## Apply A_submat: z = A_submat y
        self.A_submat.mult(y, z)

        ## Add in x: z = z + x ##
        z.axpy(1.0, x)

        ## Perform: y = M_submat^{-1} z ##
        self.M_ksp.solve(z, y)
        
        ## Negate ##
        y.scale(-1.0) 

        ## Stop the timer ##
        timer.stop()
コード例 #18
0
ファイル: fe_space_op.py プロジェクト: MiroK/fenics_ii
    def collapse(self):
        '''Return a matrix representation'''
        # Check cache
        if hasattr(self, 'matrix_repr'): return self.matrix_repr

        # Otherwise compute it, but only once
        x = self.create_vec(dim=1)
        x_values = x.get_local()

        columns = []
        df.info('Collapsing to %d x %d matrix' %
                (self.V0.dim(), self.V1.dim()))
        timer = df.Timer('Collapse')
        for i in range(self.V1.dim()):
            # Basis of row space
            x_values[i] = 1.
            x.set_local(x_values)

            column = sparse.csc_matrix((self * x).get_local())
            columns.append(column)
            # Reset
            x_values[i] = 0.
        # Alltogether
        mat = (sparse.vstack(columns).T).tocsr()
        df.info('\tDone in %g' % timer.stop())

        # As PETSc.Mat
        A = PETSc.Mat().createAIJ(comm=PETSc.COMM_WORLD,
                                  size=mat.shape,
                                  csr=(mat.indptr, mat.indices, mat.data))
        # Finally for dolfin
        self.matrix_repr = df.PETScMatrix(A)

        return self.matrix_repr
コード例 #19
0
    def derivative(self, *args, **kwargs):

        logger.debug("\nEvaluate gradient...")
        self.collector["nr_derivative_calls"] += 1

        t = dolfin.Timer("Backward run")
        t.start()
        out = super().derivative()
        back_time = t.stop()
        logger.info(("Evaluating gradient done. "
                     "Time to evaluate = {} seconds".format(back_time)))
        self.collector["backward_times"].append(back_time)

        # Multiply with some small number to that we take smaller steps
        gathered_out = numpy_mpi.gather_broadcast(out.vector().get_local())

        self.collector["gradient_norm"].append(np.linalg.norm(gathered_out))
        self.collector["gradient_norm_scaled"].append(
            np.linalg.norm(gathered_out) * self.scale * self.derivative_scale)
        logger.info(("|dJ|(actual) = {}\t"
                     "|dJ|(scaled) = {}").format(
                         self.collector["gradient_norm"][-1],
                         self.collector["gradient_norm_scaled"][-1],
                     ))

        return self.scale * gathered_out * self.derivative_scale
コード例 #20
0
ファイル: block_solver.py プロジェクト: NREL/pfibs
    def solve(self):
        ## Start the timer ##
        timer = df.Timer("pFibs: Solve")

        ## Apply bcs ##
        self.applyBC()

        ## Create nonlinear problem ##
        if not self._init_nlp:
            self.problem = NLP(self.a,
                               self.L,
                               self.aP,
                               bcs=self.bcs_u,
                               ident_zeros=self.ident_zeros,
                               ksp=self.linear_solver.ksp())
            self._init_nlp == True

        ## Actual solve ##
        its, converged = self.newton_solver.solve(self.problem,
                                                  self.u.vector())

        ## Stop the timer ##
        timer.stop()

        return its, converged
コード例 #21
0
    def step(self, interval, v):
        """
        Solve on the given time step (t0, t1).
        
        End users are recommended to use solve instead.
        
        Arguments:
        interval : tuple
          The time interval (t0, t1) for the step
        """

        assert isinstance(v, d.Function), "expected a Function as the 'v' argument"
        # FIXME: Add proper check!
        #assert v in self._state_space, "expected v to be in the state space"

        timer = d.Timer("ODE step")
        (t0, t1) = interval
        dt = t1 - t0

        # Update local field states
        self.to_field_states(v)

        # Update any changed field_parameters
        self.update_parameters()

        # Step solvers
        for label, ode_system_solver in self._ode_system_solvers.items():
            ode_system_solver.forward(t0, dt)

        # Copy solution from local field states
        self.from_field_states(v)
コード例 #22
0
    def single_solve(self, tol=None, damp=None, inside_loop=_pass):
        if tol is None: tol = self.params["tolnewton"]
        if damp is None: damp = self.params["damp"]
        I = self.params["ipicard"]
        J = self.params["inewton"]
        nverbose = self.params["nverbose"]
        verbose = self.params["verbose"]
        times = {name : 0. for name in self.solvers}

        for i in range(1, I+1):
            if verbose:
                print("\n-- Fixed-Point Loop %d of max. %d" % (i, I))
            for name, solver in self.solvers.items():
                if verbose:
                    print("    Solving %s." % name)
                t = dolfin.Timer(name)
                if solver.is_linear:
                    solver.solve()
                    times[name] += t.stop()
                else:
                    j, con = newtonsolve(solver, tol, damp, J, nverbose, lambda: inside_loop(self))
                    times[name] += t.stop()
                    if j==1 and con:
                        print("- Break at iteration %d because Newton stopped changing." %i)
                        break
            else:
                inside_loop(self)
                self.coupled.update_uold()
                continue
            break
        Tt = sum(times.values())
        if verbose:
            print("\n CPU Time (solve): %.2f s" % Tt)
            for name in self.solvers:
                print("  -) %s: %.2f s" %(name, times[name]))
コード例 #23
0
    def update(self, timestep_number, t, dt):
        """
        Update the density field by advecting it for a time dt
        using the given divergence free velocity field
        """
        timer = dolfin.Timer('Ocellaris update rho')
        sim = self.simulation

        if timestep_number != 1:
            # Update the previous values
            self.rho_pp.assign(self.rho_p)
            self.rho_p.assign(self.rho)

        # Check for steady solution every timestep, this can change over time
        force_static = sim.input.get_value('multiphase_solver/force_static',
                                           FORCE_STATIC, 'bool')

        if force_static:
            # Keep the existing solution
            self.rho.assign(self.rho_p)

        elif self.use_analytical_solution:
            # Use an analytical density field for testing other parts of Ocellaris
            cpp_code = sim.input.get_value('initial_conditions/rho_p/cpp_code',
                                           required_type='string')
            description = 'initial condition for rho_p'
            V = sim.data['Vrho']
            ocellaris_interpolate(sim, cpp_code, description, V, self.rho)

        elif self.use_rk_method:
            # Strong-Stability-Preserving Runge-Kutta DG time integration
            self.rho.assign(self.rho_p)
            self.rho_explicit.assign(self.rho_p)
            self.rk.step(dt)

        else:
            # Compute global bounds
            if self.is_first_timestep:
                lo, hi = self.slope_limiter.set_global_bounds(self.rho)
                if self.slope_limiter.has_global_bounds:
                    sim.log.info(
                        'Setting global bounds [%r, %r] in VariableDensity' %
                        (lo, hi))

            # Solve the implicit advection equation
            A = self.eq.assemble_lhs()
            b = self.eq.assemble_rhs()
            self.solver.solve(A, self.rho.vector(), b)
            self.slope_limiter.run()
            self.time_coeffs.assign(Constant([3 / 2, -2, 1 / 2]))

        sim.reporting.report_timestep_value('min(rho)',
                                            self.rho.vector().min())
        sim.reporting.report_timestep_value('max(rho)',
                                            self.rho.vector().max())

        timer.stop()  # Stop timer before hook
        self.simulation.hooks.run_custom_hook('MultiPhaseModelUpdated')
        self.is_first_timestep = False
コード例 #24
0
ファイル: io.py プロジェクト: jakobes/Ocellaris
 def load_restart_file_functions(self, h5_file_name):
     """
     Load only the Functions stored on the given restart file
     Returns a dictionary of functions, does not affect the
     Simulation object itself (for switching meshes etc.)
     """
     with dolfin.Timer('Ocellaris load hdf5'):
         return self.restart.read_functions(h5_file_name)
コード例 #25
0
ファイル: io.py プロジェクト: TormodLandet/Ocellaris
 def load_restart_file_input(self, h5_file_name):
     """
     Load the input used in the given restart file
     """
     with dolfin.Timer('Ocellaris load hdf5'):
         self.restart.read(h5_file_name,
                           read_input=True,
                           read_results=False)
コード例 #26
0
ファイル: io.py プロジェクト: TormodLandet/Ocellaris
 def load_restart_file_results(self, h5_file_name):
     """
     Load the results stored on the given restart file
     """
     with dolfin.Timer('Ocellaris load hdf5'):
         self.restart.read(h5_file_name,
                           read_input=False,
                           read_results=True)
コード例 #27
0
 def _process_call_after(self):
     """
     Run any delayed action after a normal hooks is done
     """
     while self._call_after:
         func, description, args, kwargs = self._call_after.popleft()
         with dolfin.Timer('Ocellaris delayed %s' % description):
             func(*args, **kwargs)
コード例 #28
0
    def __init__(self, linear_solver):

        ## Time function execution ##
        timer = df.Timer("pFibs: Init Newton solver")
        timer.start()

        comm = linear_solver.ksp().comm.tompi4py()
        factory = df.PETScFactory.instance()
        super(NS, self).__init__(comm, linear_solver, factory)
        self._solver = linear_solver
コード例 #29
0
    def solver_setup(self, A, P, nlp, iteration):

        ## Time function execution ##
        timer = df.Timer("pFibs: Newton solver setup")
        timer.start()

        if nlp.aP is not None:
            timer2 = df.Timer("pFibs: Newton solver setup - preconditioner")
            df.assemble(nlp.aP, tensor=P)
            for bc in nlp.bcs:
                bc.apply(P)
            timer2.stop()
        if iteration > 0 or getattr(self, "_initialized", False):
            return
        P = A if P.empty() else P
        self._solver.set_operators(A, P)
        self._initialized = True
        self._solver.init_solver_options()

        timer.stop()
コード例 #30
0
ファイル: xdmf.py プロジェクト: TormodLandet/Ocellaris
 def write(self):
     """
     Write a file that can be used for visualization. The fluid fields will
     be automatically downgraded (interpolated) into something that
     dolfin.XDMFFile can write, typically linear CG elements.
     """
     with dolfin.Timer('Ocellaris save xdmf'):
         if self.xdmf_file is None:
             self._setup_xdmf()
         self._write_xdmf()
     return self.file_name