def IsConverged(self):
        convergence_list = []
        for i, data_entry in enumerate(self.settings["data_list"]):
            solver = self.solvers[data_entry["solver"]]
            data_name = data_entry["data_name"]
            cs_tools.ImportArrayFromSolver(solver, data_name, self.new_data)

            residual = self.new_data - self.old_data[i]
            res_norm = la.norm(residual)
            norm_new_data = la.norm(self.new_data)
            if norm_new_data < 1e-15:
                norm_new_data = 1.0 # to avoid division by zero
            abs_norm = res_norm / np.sqrt(residual.size)
            rel_norm = res_norm / norm_new_data
            convergence_list.append(abs_norm < self.abs_tolerances[i] or rel_norm < self.rel_tolerances[i])
            if self.echo_level > 1:
                info_msg  = 'Convergence for "'+bold(data_entry["data_name"])+'": '
                if convergence_list[i]:
                    info_msg += green("ACHIEVED")
                else:
                    info_msg += red("NOT ACHIEVED")
                classprint(self.lvl, self._Name(), info_msg)
            if self.echo_level > 2:
                info_msg  = bold("abs_norm")+" = " + str(abs_norm) + " | "
                info_msg += bold("abs_tol")+" = " + str(self.abs_tolerances[i])
                info_msg += " || "+bold("rel_norm")+" = " + str(rel_norm) + " | "
                info_msg += bold("rel_tol") +" = " + str(self.rel_tolerances[i])
                classprint(self.lvl, self._Name(), info_msg)

        return min(convergence_list) # return false if any of them did not converge!
 def _ComputeUpdate(self, r, x):
     if self.echo_level > 3:
         classprint(self.lvl, self._Name(),
                    "Doing relaxation with factor = ",
                    "{0:.1g}".format(self.alpha))
     delta_x = self.alpha * r
     return delta_x
コード例 #3
0
    def _UpdateData(self, updated_data):
        for data_entry, data_update in zip(self.settings["data_list"],
                                           updated_data):
            solver = self.solvers[data_entry["solver"]]
            data_name = data_entry["data_name"]
            cs_tools.ExportArrayToSolver(solver, data_name, data_update)

        if self.echo_level > 3:
            cs_tools.classprint(self.lvl, self._Name(), "Computed prediction")
 def FinalizeSolutionStep(self):
     if self.V_new != [] and self.W_new != []:
         self.v_old_matrices.appendleft(self.V_new)
         self.w_old_matrices.appendleft(self.W_new)
     if self.v_old_matrices and self.w_old_matrices:
         self.V_old = np.concatenate(self.v_old_matrices, 1)
         self.W_old = np.concatenate(self.w_old_matrices, 1)
     ## Clear the buffer
     if self.R and self.X:
         if self.echo_level > 3:
             classprint(self.lvl, self._Name(), "Cleaning")
         self.R.clear()
         self.X.clear()
     self.V_new = []
     self.W_new = []
    def FinalizeSolutionStep(self):
        if self.J == []:
            return

        row = self.J.shape[0]
        col = self.J.shape[1]
        ## Assign J=J_hat
        for i in range(0, row):
            for j in range(0, col):
                self.J[i][j] = self.J_hat[i][j]
        if self.echo_level > 3:
            classprint(self.lvl, self._Name(), "Jacobian matrix updated!")
        ## Clear the buffer
        if self.R and self.X:
            self.R.clear()
            self.X.clear()
コード例 #6
0
    def _ComputeUpdate(self, r, x):

        self.V.appendleft(deepcopy(r))
        self.W.appendleft(deepcopy(x))
        row = len(r)
        col = len(self.V) - 1
        k = col
        if k == 0:
            ## For the first iteration, do relaxation only
            if self.echo_level > 3:
                classprint(
                    self.lvl, self._Name(),
                    "Doing relaxation in the first iteration with factor = ",
                    "{0:.1g}".format(self.alpha))
            return self.alpha * r
        else:
            self.F = np.empty(shape=(col, row))  # will be transposed later
            self.X = np.empty(shape=(col, row))  # will be transposed later
            for i in range(0, col):
                self.F[i] = self.V[i] - self.V[i + 1]
                self.X[i] = self.W[i] - self.W[i + 1]
            self.F = self.F.T
            self.X = self.X.T

            #compute Moore-Penrose inverse of F^T F
            A = np.linalg.pinv(self.F.T @ self.F)

            switch = (self.iteration_counter + 1) / self.p

            classprint(self.lvl, magenta(self.iteration_counter))

            if switch.is_integer() == True:
                B = self.beta * np.identity(row) - (
                    self.X + self.beta * self.F) @ A @ self.F.T
                if self.echo_level > 3:
                    classprint(self.lvl, self._Name(),
                               blue("Compute B with Anderson"))
            else:
                B = self.alpha * np.identity(row)
                if self.echo_level > 3:
                    classprint(self.lvl, self._Name(),
                               red("Constant underrelaxtion"))

            delta_x = B @ r

            self.iteration_counter += 1

            return delta_x

        def FinalizeSolutionStep(self):
            self.V.clear()
            self.W.clear()

        def _Name(self):
            return self.__class__.__name__
    def _ComputeUpdate(self, r, x):
        self.R.appendleft(deepcopy(r))
        self.X.appendleft(deepcopy(x))
        col = len(self.R) - 1
        row = len(r)
        k = col
        if self.echo_level > 3:
            classprint(self.lvl, self._Name(), "Number of new modes: ", col)

        ## For the first iteration
        if k == 0:
            if self.J == []:
                return self.alpha * r  # if no Jacobian, do relaxation
            else:
                return np.linalg.solve(
                    self.J, -r)  # use the Jacobian from previous step

        ## Let the initial Jacobian correspond to a constant relaxation
        if self.J == []:
            self.J = -np.identity(
                row) / self.alpha  # correspongding to constant relaxation

        ## Construct matrix V (differences of residuals)
        V = np.empty(shape=(col, row))  # will be transposed later
        for i in range(0, col):
            V[i] = self.R[i] - self.R[i + 1]
        V = V.T

        ## Construct matrix W(differences of intermediate solutions x)
        W = np.empty(shape=(col, row))  # will be transposed later
        for i in range(0, col):
            W[i] = self.X[i] - self.X[i + 1]
        W = W.T

        ## Solve least norm problem
        rhs = V - np.dot(self.J, W)
        b = np.identity(row)
        W_right_inverse = np.linalg.lstsq(W, b)[0]
        J_tilde = np.dot(rhs, W_right_inverse)
        self.J_hat = self.J + J_tilde
        delta_r = -self.R[0]
        delta_x = np.linalg.solve(self.J_hat, delta_r)

        return delta_x
コード例 #8
0
    def _ComputeUpdate(self, r, x):
        self.R.appendleft(deepcopy(r))
        k = len(self.R) - 1
        ## For the first iteration, do relaxation only
        if k == 0:
            alpha = min(self.alpha_old, self.init_alpha_max)
            if self.echo_level > 3:
                classprint(
                    self.lvl, self._Name(),
                    ": Doing relaxation in the first iteration with initial factor = "
                    + "{0:.1g}".format(alpha))
            return alpha * r
        else:
            r_diff = self.R[0] - self.R[1]
            numerator = np.inner(self.R[1], r_diff)
            denominator = np.inner(r_diff, r_diff)
            alpha = -self.alpha_old * numerator / denominator
            if self.echo_level > 3:
                classprint(
                    self.lvl, self._Name(),
                    ": Doing relaxation with factor = " +
                    "{0:.1g}".format(alpha))
            if alpha > 20:
                alpha = 20
                if self.echo_level > 0:
                    classprint(
                        self.lvl, self._Name(), ": " +
                        red("WARNING: dynamic relaxation factor reaches upper bound: 20"
                            ))
            elif alpha < -2:
                alpha = -2
                if self.echo_level > 0:
                    classprint(
                        self.lvl, self._Name(), ": " +
                        red("WARNING: dynamic relaxation factor reaches lower bound: -2"
                            ))
            delta_x = alpha * self.R[0]
        self.alpha_old = alpha

        return delta_x
 def PrintInfo(self):
     '''Function to print Info abt the Object
     Can be overridden in derived classes to print more information
     '''
     cs_tools.classprint(self.lvl, "Convergence Accelerator",
                         cs_tools.bold(self._Name()))
 def PrintInfo(self):
     classprint(self.lvl, "Convergence Criteria", bold(self._Name()))
    def _ComputeUpdate(self, r, x):
        self.R.appendleft(deepcopy(r))
        self.X.appendleft(x + r)  # r = x~ - x
        row = len(r)
        col = len(self.R) - 1
        k = col
        num_old_matrices = len(self.v_old_matrices)

        if self.V_old == [] and self.W_old == []:  # No previous vectors to reuse
            if k == 0:
                ## For the first iteration in the first time step, do relaxation only
                if self.echo_level > 3:
                    classprint(
                        self.lvl, self._Name(),
                        "Doing relaxation in the first iteration with factor = ",
                        "{0:.1g}".format(self.alpha))
                return self.alpha * r
            else:
                if self.echo_level > 3:
                    classprint(self.lvl, self._Name(),
                               "Doing multi-vector extrapolation")
                    classprint(self.lvl, self._Name(), "Number of new modes: ",
                               col)
                self.V_new = np.empty(shape=(col,
                                             row))  # will be transposed later
                for i in range(0, col):
                    self.V_new[i] = self.R[i] - self.R[i + 1]
                self.V_new = self.V_new.T
                V = self.V_new

                ## Check the dimension of the newly constructed matrix
                if (V.shape[0] < V.shape[1]) and self.echo_level > 0:
                    classprint(
                        self.lvl, self._Name(), ": " +
                        red("WARNING: column number larger than row number!"))

                ## Construct matrix W(differences of predictions)
                self.W_new = np.empty(shape=(col,
                                             row))  # will be transposed later
                for i in range(0, col):
                    self.W_new[i] = self.X[i] - self.X[i + 1]
                self.W_new = self.W_new.T
                W = self.W_new

                ## Solve least-squares problem
                delta_r = -self.R[0]
                c = np.linalg.lstsq(V, delta_r)[0]

                ## Compute the update
                delta_x = np.dot(W, c) - delta_r

                return delta_x
        else:  # previous vectors can be reused
            if k == 0:  # first iteration
                if self.echo_level > 3:
                    classprint(self.lvl, self._Name(),
                               "Using matrices from previous time steps")
                    classprint(self.lvl, self._Name(),
                               "Number of previous matrices: ",
                               num_old_matrices)
                V = self.V_old
                W = self.W_old
                ## Solve least-squares problem
                delta_r = -self.R[0]
                c = np.linalg.lstsq(V, delta_r)[0]

                ## Compute the update
                delta_x = np.dot(W, c) - delta_r
                return delta_x
            else:
                ## For other iterations, construct new V and W matrices and combine them with old ones
                if self.echo_level > 3:
                    classprint(self.lvl, self._Name(),
                               "Doing multi-vector extrapolation")
                    classprint(self.lvl, self._Name(), "Number of new modes: ",
                               col)
                    classprint(self.lvl, self._Name(),
                               "Number of previous matrices: ",
                               num_old_matrices)
                ## Construct matrix V (differences of residuals)
                self.V_new = np.empty(shape=(col,
                                             row))  # will be transposed later
                for i in range(0, col):
                    self.V_new[i] = self.R[i] - self.R[i + 1]
                self.V_new = self.V_new.T
                V = np.hstack((self.V_new, self.V_old))
                ## Check the dimension of the newly constructed matrix
                if (V.shape[0] < V.shape[1]) and self.echo_level > 0:
                    classprint(
                        self.lvl, self._Name(), ": " +
                        red("WARNING: column number larger than row number!"))

                ## Construct matrix W(differences of predictions)
                self.W_new = np.empty(shape=(col,
                                             row))  # will be transposed later
                for i in range(0, col):
                    self.W_new[i] = self.X[i] - self.X[i + 1]
                self.W_new = self.W_new.T
                W = np.hstack((self.W_new, self.W_old))

                ## Solve least-squares problem
                delta_r = -self.R[0]
                c = np.linalg.lstsq(V, delta_r)[0]

                ## Compute the update
                delta_x = np.dot(W, c) - delta_r

                return delta_x