示例#1
0
文件: ss22.py 项目: pk-organics/OOF3D
    def nonlinearstep(self, subproblem, linsys, time, unknowns, endtime,
                      nonlinearMethod):
        # M d2u/dt2 + C du/dt + F(u,t) = 0
        # F might depend on time and u explicitly
        # M and C might depend on time

        dt = endtime - time

        data = NLDataSS22(subproblem, linsys, endtime, dt, unknowns,
                          self.theta1, self.theta2)

        alpha = doublevec.DoubleVec(linsys.n_unknowns_MCK())  # not MCKd

        nonlinearMethod.solve(
            subproblem.matrix_method(_asymmetric, subproblem),
            self.precomputeNL, self.compute_residual, self.compute_jacobian,
            self.compute_linear_coef_mtx, data, alpha)

        # Update a and adot, reusing the storage in data.a0 and data.a0dot.
        data.a0.axpy(dt, data.a0dot)
        data.a0.axpy(0.5 * dt * dt, alpha)
        data.a0dot.axpy(dt, alpha)
        endValues = doublevec.DoubleVec(unknowns.size())
        linsys.set_fields_MCKd(data.a0, endValues)
        linsys.set_derivs_MCKd(data.a0dot, endValues)
        return timestepper.StepResult(endTime=endtime,
                                      nextStep=dt,
                                      endValues=endValues,
                                      linsys=linsys)
示例#2
0
    def linearstep(self, subprobctxt, linsys, time, unknowns, endtime):
        dt = endtime - time
        ## The matrix equation to be solved (for alpha) is A*alpha=x,
        ## where
        ## A = M + theta1 dt C + 1/2 theta2 dt^2 K
        ## x = -(C + theta1 dt K) adot - K a + rhs

        ## Note that   rhs = -f.
        ## We work with   M (d2/dt2) u + C (d/dt) u + K u = rhs
        ## instead of     M (d2/dt2) u + C (d/dt) u + K u + f = 0

        ## a is the known vector of dofs, adot is its known first time
        ## derivative.  rhs is a weighted average of the right hand sides
        ## over the time interval:
        ## rhs = (1-theta1) rhs(t) + theta1 rhs(t+dt)

        ## Values at t+dt are
        ##    a    <- a + dt adot + 1/2 dt^2 alpha
        ##    adot <- adot + dt alpha

        K = linsys.K_MCK()
        C = linsys.C_MCK()
        M = linsys.M_MCK()

        ## This relies on the Fields and their time derivative Fields
        ## having the same global dof ordering!
        A = M.clone()
        A.add(self.theta1*dt, C)
        A.add(0.5*self.theta2*dt*dt, K)
        A.consolidate()

        x  = linsys.rhs_MCK() # x = rhs(t_n)
        x *= (1.0 - self.theta1)        # x = (1-theta1) rhs(t_n)
        ## Compute the rhs at t = endtime.
        linsys1 = subprobctxt.make_linear_system(endtime, linsys)
        # x = (1-theta1) rhs(t_n) + theta1  rhs(t_{n+1})
        x.axpy(self.theta1, linsys1.rhs_MCK())

        # Separate the field and deriv parts of unknowns
        a    = linsys.get_fields_MCKd(unknowns)
        adot = linsys.get_derivs_MCKd(unknowns)

        K.axpy(-1.0, a, x)               # x = - K a + rhs
        C.axpy(-1.0, adot, x)            # x = -C adot - K a + rhs
        K.axpy(-self.theta1*dt, adot, x) # ... -theta1 dt K adot

        alpha = doublevec.DoubleVec(len(a))
        subprobctxt.matrix_method(_asymmetric, subprobctxt).solve(A, x, alpha)
        
        ## update a and adot, then copy them into endValues
        a.axpy(dt, adot)
        a.axpy(0.5*dt*dt, alpha)
        adot.axpy(dt, alpha)

        endValues = doublevec.DoubleVec(unknowns.size())
        linsys.set_fields_MCKd(a, endValues)
        linsys.set_derivs_MCKd(adot, endValues)

        return timestepper.StepResult(endTime=endtime, nextStep=dt,
                                      endValues=endValues, linsys=linsys)
示例#3
0
 def BiCGStab_ILUT(self):
     solution = doublevec.DoubleVec(0)
     pc = preconditioner.ILUTPreconditioner()
     solver = matrixmethod.StabilizedBiConjugateGradient(
         pc, self.tolerance, self.max_iterations)
     solver.solveMatrix(self.matrix, self.rhs, solution)
     self.assertAlmostEqual(self.ref.norm(), solution.norm(), 9)
示例#4
0
 def CG_Jacobi(self):
     solution = doublevec.DoubleVec(0)
     pc = preconditioner.JacobiPreconditioner()
     solver = matrixmethod.ConjugateGradient(pc, self.tolerance,
                                             self.max_iterations)
     solver.solveMatrix(self.matrix, self.rhs, solution)
     self.assertAlmostEqual(self.ref.norm(), solution.norm(), 9)
示例#5
0
    def Basics(self):
        vec = doublevec.DoubleVec(10)
        saveVector(vec, 'vector.out')
        self.assert_(fp_file_compare('vector.out',
                                     os.path.join(datadir, 'vec_zero'),
                                     1.e-8))

        vec = doublevec.DoubleVec(10, 0.5)
        saveVector(vec, 'vector.out')
        self.assert_(fp_file_compare('vector.out',
                                     os.path.join(datadir, 'vec_init'),
                                     1.e-8))
        
        vec.unit()
        saveVector(vec, 'vector.out')
        self.assert_(fp_file_compare('vector.out',
                                     os.path.join(datadir, 'vec_unit'),
                                     1.e-8))

        self.assert_(vec.size() == 10)

        vec.resize(5)
        self.assert_(vec.size() == 5)
示例#6
0
def _doublevectest(menuitem):
    from ooflib.SWIG.common import doublevec
    doublevec.printVecSizes("initial")
    d = doublevec.DoubleVec(10)
    doublevec.printVecSizes("allocated")
    d.resize(100)
    doublevec.printVecSizes("resized")
    dd = d.clone()
    doublevec.printVecSizes("cloned")
    d.clear()
    doublevec.printVecSizes("cleared")
    del d
    doublevec.printVecSizes("deleted")
    del dd
    doublevec.printVecSizes("deleted again")
示例#7
0
文件: ss22.py 项目: pk-organics/OOF3D
    def precomputeNL(self, data, alphas, nlsolver):
        values = doublevec.DoubleVec(data.linsys.n_unknowns_MCKd())
        data.linsys.set_fields_MCKd(
            data.a11 + (0.5 * data.dt * data.dt) * alphas, values)
        data.linsys.set_derivs_MCKd(data.a0dot + data.dt * alphas, values)

        # Base class precomputeNL computes linsys.
        timestepper.NonLinearStepper.precomputeNL(self, data, values, nlsolver)

        data.M1 = data.linsys.M_MCK()
        data.C1 = data.linsys.C_MCK()

        data.F1 = data.linsys.static_residual_MCK()

        data.Mstar = data.Mstar0.clone()
        data.Mstar.add(self.theta1, data.M1)
        data.Mstar.add(data.dt * self.theta2, data.C1)
示例#8
0
    def _do_step(self, subproblem, linsys, time, unknowns, endtime, get_res):
        # Solve C(u_{n+1} - u_n) = dt*(f - K u_n)
        # C and K are effective matrices, coming from the reduction of
        # a second order problem to a system of first order problems.

        # If C has empty rows, the static solution of those rows has
        # already been computed by initializeStaticFields() or by a
        # previous Euler step, and we only have to solve the remaining
        # nonempty rows here.  Unlike GeneralizedEuler, here we're
        # solving an unmodified C, so it can't contain empty rows.
        staticEqns = linsys.n_unknowns_part('K') > 0

        dt = endtime - time
        # v = dt * linsys.rhs_MCa() # v = dt*f

        # # K_MCa is really (MCa)x(MCKa), so we can multiply by
        # # unknowns, which includes the K part.
        # K = linsys.K_MCa()
        # K.axpy(-dt, unknowns, v) # v = dt*(f - K u)
        v = get_res(linsys, dt, unknowns)

        C = linsys.C_MCa()
        # solve() stores u_{n+1}-u_n in endValues.  Before calling
        # solve, set endValues to a good guess for u_{n+1}-u_n.
        # Assuming that the step size is small, a good guess is zero.
        endValues = doublevec.DoubleVec(unknowns.size())
        endValues.zero()

        x = linsys.extract_MCa_dofs(endValues)
        subproblem.matrix_method(_asymmetricFE, subproblem,
                                 linsys).solve(C, v, x)
        linsys.inject_MCa_dofs(x, endValues)

        endValues += unknowns

        if staticEqns:
            # Re-solve the static equations at endtime.
            subproblem.installValues(linsys, endValues, endtime)
            linsys = subproblem.make_linear_system(endtime, linsys)
            subproblem.computeStaticFields(linsys, endValues)

        return timestepper.StepResult(endTime=endtime,
                                      nextStep=dt,
                                      endValues=endValues,
                                      linsys=linsys)
示例#9
0
    def __init__(self, name, classname, obj, parent, subptype, secretFlag=0):
        whoville.Who.__init__(self, name, classname, obj, parent, secretFlag)
        obj.set_rwlock(self.rwLock)
        # Obj is a CSubProblem.

        obj.set_mesh(parent)
        obj.set_nnodes(self.nfuncnodes())

        # These shouldn't be accessed directly.  They store the values
        # of properties defined below..
        self._solver_mode = None
        self._time_stepper = None
        self._nonlinear_solver = None
        self._symmetric_solver = None
        self._asymmetric_solver = None

        self.stepperSet = timestamp.TimeStamp()

        self.startTime = 0.0
        self.endTime = None
        self.installedTime = None
        self.fieldsInstalled = timestamp.TimeStamp()
        self.startValues = doublevec.DoubleVec(0)
        self.endValues = doublevec.DoubleVec(0)
        self.subptype = subptype
        self._precomputing = False
        self._timeDependent = False
        self._second_order_fields = set()

        # solveOrder determines the order in which subproblems are to
        # be solved.
        self.solveOrder = parent.nSubproblems()
        # solveFlag indicates whether or not the subproblem is to be
        # solved at all.
        self.solveFlag = True

        self.defnChanged = timestamp.TimeStamp() # fields or eqns changed
        self.solutiontimestamp = timestamp.TimeStamp() # last "solve" command
        self.solutiontimestamp.backdate()

        self.solverStats = solverstats.SolverStats()
        self.newMatrixCount = 0 # no. of time matrices have been rebuilt.

        self.requestCallback(("preremove who", "SubProblem"),
                             self.preremoveCB)
        self.requestCallback("subproblem redefined", self.redefinedCB)

        self.matrix_symmetry_K = symstate.SymState()
        self.matrix_symmetry_C = symstate.SymState()
        self.matrix_symmetry_M = symstate.SymState()

        # If a SubProblemType's Registration has a callable 'startup'
        # member, it's invoked here.  This can be used to set up
        # switchboard signals, for instance.  A corresponding
        # 'cleanup' function is called when the subproblem is
        # destroyed.
        try:
            startupfn = self.subptype.getRegistration().startup
        except AttributeError:
            pass
        else:
            startupfn(self)
示例#10
0
    def solve(self, matrix_method, precompute, compute_residual,
              compute_jacobian, compute_linear_coef_mtx, data, values):
        # initialize: set soln = startValues, update = 0
        # debug.fmsg("-----------------------------")
        # debug.fmsg("initial values=", values.norm())
        n = values.size()
        update   = doublevec.DoubleVec(n)
        # residual = doublevec.DoubleVec(n)

        # compute the residual = -K*startValues + rhs
        self.requireResidual(True)
        self.requireJacobian(False)
        precompute(data, values, self)
        residual = compute_residual(data, values, self)

        res_norm0 = residual.norm() # this is the norm of the initial residual
        # debug.fmsg("res_norm0=%g:" % res_norm0)
        res_norm  = res_norm0       # we will keep comparing current residual
                                    # with res_norm0 to judge convergence

        target_res = self.relative_tolerance*res_norm0 + self.absolute_tolerance
        prog = progress.getProgress('Picard Solver', progress.LOGDEFINITE)
        if res_norm > target_res:
            prog.setRange(res_norm0, target_res)

        try:
            # compute Picard updates while residual is large and
            # self.maximum_iterations is not exceeded
            s = 1.0
            i = 0
            while (res_norm > target_res and i < self.maximum_iterations
                   and not prog.stopped()):
                # debug.fmsg("iteration %d" % i)
                update.zero()  # start with a zero update vector

                # solve for the Picard step:  K * update = -residual
                K = compute_linear_coef_mtx(data, self)
                residual *= -1.0

                # debug.fmsg("residual=", residual.norm())
                matrix_method.solve( K, residual, update )
                # debug.fmsg("update=", update.norm())

                # choose step size for the Picard update
                s, residual = self.choose_step_size(
                    data, values, update, res_norm,
                    precompute, compute_residual)
                # debug.fmsg("line search s=", s)

                # correct the soln with the Picard update
                values += s * update
                res_norm = residual.norm()
                if res_norm <= target_res:
                    break

                # update the linear system
                self.requireResidual(True)
                self.requireJacobian(False)
                precompute(data, values, self)

                # compute the residual
                residual = compute_residual(data, values, self)
                res_norm = residual.norm()
                # debug.fmsg("Current residual:", res_norm)
                prog.setMessage("%g/%g" % (res_norm, target_res))
                prog.setFraction(res_norm)

                i = i+1
                # end of Picard iterations

            if prog.stopped():
                prog.setMessage("Picard solver interrupted")
                #prog.finish()
                raise ooferror2.ErrInterrupted()
        finally:
            prog.finish()

        # debug.fmsg("Done: res_norm=%g" % res_norm)
        # raise error if Picard's iterations did not converge in
        # maximum_iterations
        if i >= self.maximum_iterations and res_norm > target_res:
             raise ooferror2.ErrConvergenceFailure(
                 'Nonlinear solver - Picard iterations',
                 self.maximum_iterations)
        return i, res_norm
示例#11
0
    def solve(self, matrix_method, precompute, compute_residual,
              compute_jacobian, compute_linear_coef_mtx, data, values):

        # matrix_method is function that takes a matrix A, a rhs b and
        # a vector of unknows x and sets x so that A*x = b.

        # 'data' is user defined object that can be used to pass
        # information from the calling function to the callback
        # functions if necessary.  The callback functions are
        # precompute, compute_residual, and compute_jacobian.

        # precompute will be called before calling compute_residual
        # and compute_jacobian, and should perform any calculations
        # shared by those two functions.  Its arguments are 'data'
        # (the user defined object), 'values' (a DoubleVec containing
        # a trial solution) and 'needJacobian' (a boolean indicating
        # whether or not the Jacobian needs to be recomputed).

        # compute_residual is called only after precompute.  Its
        # arguments are 'data' (the same object that was used for
        # precompute), 'values' (the trial solution), and residual
        # (the resulting vector of residuals).

        # compute_jacobian is also called only after precompute.  It
        # only takes the 'data' argument.

        # TODO OPT: The vectors and matrices computed by
        # compute_residual, compute_jacobian, and
        # compute_linear_coef_mtx can be preallocated here and passed
        # to the functions, instead of being repeatedly reallocated on
        # each function call.

        # debug.fmsg("initial values=", values.norm())
        n = values.size()

        update   = doublevec.DoubleVec(n)

        # compute the residual = -K*startValues + rhs
        self.requireResidual(True)
        self.requireJacobian(True)
        precompute(data, values, self)
        residual = compute_residual(data, values, self)

        res_norm0 = residual.norm() # norm of the initial residual
        res_norm  = res_norm0       # we will keep comparing current residual
                                    # with res_norm0 to judge convergence
        # debug.fmsg("initial residual:", res_norm0)

        prog = progress.getProgress("Newton Solver", progress.LOGDEFINITE)
        target_res = self.relative_tolerance*res_norm0 + self.absolute_tolerance
        if res_norm0 > target_res:
            prog.setRange(res_norm0, target_res)
        try:
            # compute Newton updates while residual is large and
            # self.maximum_iterations is not exceeded
            s = 1.
            i = 0
            while (res_norm > target_res and i < self.maximum_iterations
                   and not prog.stopped()):
                # debug.fmsg("iter =", i, ",  res =", res_norm, " s =", s)
                update.zero()
                # solve for the Newton step:  Jacobian * update = -residual
                J = compute_jacobian(data, self)
                # debug.fmsg("J=\n", J.norm())
                residual *= -1.0
                matrix_method.solve( J, residual, update )
                # debug.fmsg("update=", update.norm())

                # choose step size for the Newton update.  This resets
                # self.requireXXX.
                s, residual = self.choose_step_size(
                    data, values, update, res_norm,
                    precompute, compute_residual)
                # debug.fmsg("s=", s)
                # correct the soln with the Newton update
                values += s * update

                res_norm = residual.norm()
                if res_norm <= target_res:
                    break

                # update the linear system
                self.requireJacobian(True)
                self.requireResidual(True)
                # debug.fmsg("norm updated values=", values.norm())
                precompute(data, values, self)
                # compute the residual
                residual = compute_residual(data, values, self)
                res_norm = residual.norm()
                #debug.fmsg("Current residual: [%s] (%g)" %(residual, res_norm))
                # debug.fmsg("new residual =", res_norm)
                prog.setMessage("%g/%g" % (res_norm, target_res))
                prog.setFraction(res_norm)

                i += 1
                # end of Newton iterations

            if prog.stopped():
                prog.setMessage("Newton solver interrupted")
                #progress.finish()
                raise ooferror2.ErrInterrupted();
        finally:
            prog.finish()
        # raise error if Newton's method did not converge in maximum_iterations
        if i >= self.maximum_iterations and res_norm > target_res:
            raise ooferror2.ErrConvergenceFailure(
                'Nonlinear solver - Newton iterations', self.maximum_iterations)
        # debug.fmsg("final values=", values)
        # debug.fmsg("-------------------")
        return i, res_norm
示例#12
0
    def Solve(self):
        result = doublevec.DoubleVec(4)

        # Lower triangular matrix with implicit unit diagonal.
        lower = loadMatrix('lowertriunit')

        rhs = loadVector('rhs0')
        lower.solve_lower_triangle_unitd(rhs, result)
        result.save('vector.out')
        self.assert_(fp_file_compare('vector.out',
                                     os.path.join(datadir, 'solve0.out'),
                                     1.e-8))

        rhs = loadVector('rhs1')
        lower.solve_lower_triangle_unitd(rhs, result)
        result.save('vector.out')
        self.assert_(fp_file_compare('vector.out',
                                     os.path.join(datadir, 'solve1.out'),
                                     1.e-8))

        # Upper triangular matrix with explicit diagonal entries.
        upper = loadMatrix('uppertri')

        rhs = loadVector('rhs0')
        upper.solve_upper_triangle(rhs, result)
        result.save('vector.out')
        self.assert_(fp_file_compare('vector.out',
                                     os.path.join(datadir, 'solve2.out'),
                                     1.e-8))
        resid = upper*result - rhs
        self.assertAlmostEqual(resid.norm(), 0.0, 8)

        rhs = loadVector('rhs1')
        upper.solve_upper_triangle(rhs, result)
        result.save('vector.out')
        self.assert_(fp_file_compare('vector.out',
                                     os.path.join(datadir, 'solve3.out'),
                                     1.e-8))
        resid = upper*result - rhs
        self.assertAlmostEqual(resid.norm(), 0.0, 8)

        # Transpose of upper triangular matrix with explicit diagonal entries.
        rhs = loadVector('rhs0')
        upper.solve_upper_triangle_trans(rhs, result)
        result.save('vector.out')
        self.assert_(fp_file_compare('vector.out',
                                     os.path.join(datadir, 'solve4.out'),
                                     1.e-8))
        resid = upper.transpose()*result - rhs
        self.assertAlmostEqual(resid.norm(), 0.0, 8)

        rhs = loadVector('rhs1')
        upper.solve_upper_triangle_trans(rhs, result)
        result.save('vector.out')
        self.assert_(fp_file_compare('vector.out',
                                     os.path.join(datadir, 'solve5.out'),
                                     1.e-8))
        resid = upper.transpose()*result - rhs
        self.assertAlmostEqual(resid.norm(), 0.0, 8)

        # Transpose of lower triangular matrix with explicit diagonal
        # entries.
        lower = loadMatrix('lowertri')

        rhs = loadVector('rhs0')
        lower.solve_lower_triangle_trans(rhs, result)
        result.save('vector.out')
        self.assert_(fp_file_compare('vector.out',
                                     os.path.join(datadir, 'solve6.out'),
                                     1.e-8))
        resid = lower.transpose()*result - rhs
        self.assertAlmostEqual(resid.norm(), 0.0, 8)

        rhs = loadVector('rhs1')
        lower.solve_lower_triangle_trans(rhs, result)
        result.save('vector.out')
        self.assert_(fp_file_compare('vector.out',
                                     os.path.join(datadir, 'solve7.out'),
                                     1.e-8))
        resid = lower.transpose()*result - rhs
        self.assertAlmostEqual(resid.norm(), 0.0, 8)

        # Lower triangular matrix with explicit unit diagonal entries.
        rhs = loadVector('rhs0')
        lower.solve_lower_triangle(rhs, result)
        result.save('vector.out')
        self.assert_(fp_file_compare('vector.out',
                                     os.path.join(datadir, 'solve8.out'),
                                     1.e-8))
        resid = lower*result - rhs
        self.assertAlmostEqual(resid.norm(), 0.0, 8)

        rhs = loadVector('rhs1')
        lower.solve_lower_triangle(rhs, result)
        result.save('vector.out')
        self.assert_(fp_file_compare('vector.out',
                                     os.path.join(datadir, 'solve9.out'),
                                     1.e-8))
        resid = lower*result - rhs
        self.assertAlmostEqual(resid.norm(), 0.0, 8)

        # Transpose of lower triangular matrix with implicit unit
        # diagonal.
        lower = loadMatrix('lowertriunit')

        lowerunit = lower.clone()
        for i in range(lowerunit.nrows()):
            lowerunit.insert(i, i, 1.0)

        rhs = loadVector('rhs0')
        lower.solve_lower_triangle_trans_unitd(rhs, result)
        result.save('vector.out')
        self.assert_(fp_file_compare('vector.out',
                                     os.path.join(datadir, 'solve10.out'),
                                     1.e-8))
        resid = lowerunit.transpose()*result - rhs
        self.assertAlmostEqual(resid.norm(), 0.0, 8)

        rhs = loadVector('rhs1')
        lower.solve_lower_triangle_trans_unitd(rhs, result)
        result.save('vector.out')
        self.assert_(fp_file_compare('vector.out',
                                     os.path.join(datadir, 'solve11.out'),
                                     1.e-8))
        resid = lowerunit.transpose()*result - rhs
        self.assertAlmostEqual(resid.norm(), 0.0, 8)

        file_utils.remove('vector.out')
示例#13
0
 def get_derivs_part(self, part, linsys, unknowns):
     # return linsys.get_derivs_part_MCK(part, unknowns)
     return doublevec.DoubleVec(0)
示例#14
0
 def rhs_ind_part(self, part, linsys):
     if part == 'K':
         return linsys.rhs_MCK()
     return doublevec.DoubleVec(0)
示例#15
0
 def get_unknowns_part(self, part, linsys, unknowns):
     if part == 'K':
         return unknowns.clone()
     return doublevec.DoubleVec(0)
示例#16
0
 def SparseQR(self):
     solution = doublevec.DoubleVec(0)
     solver = matrixmethod.SparseQR()
     succ = solver.solveMatrix(self.matrix, self.rhs, solution)
     self.assertAlmostEqual(self.ref.norm(), solution.norm(), 9)