示例#1
0
def crateParallelMatrixFromLocal(local_matrix):
    n_rows = local_matrix.shape[1]
    n_columns = local_matrix.shape[0]

    plan = DistributionPlan(mpi.COMM_WORLD, n_columns=n_columns, n_rows=n_rows)
    parallel_matrix = ParallelMatrix(plan)
    parallel_matrix.broadcast(local_matrix.transpose(), root=0)

    return parallel_matrix
示例#2
0
def crateParallelMatrixFromLocal(local_matrix):
    n_rows = local_matrix.shape[1]
    n_columns = local_matrix.shape[0]

    plan = DistributionPlan(mpi.COMM_WORLD, n_columns=n_columns, n_rows=n_rows)
    parallel_matrix = ParallelMatrix(plan)
    parallel_matrix.broadcast(local_matrix.transpose(), root=0)

    return parallel_matrix
示例#3
0
    def eigenfunctions(self, matrix, number_modes):
        import sys, slepc4py

        slepc4py.init(sys.argv)

        from petsc4py import PETSc
        from slepc4py import SLEPc

        E = SLEPc.EPS()
        E.create()

        E.setOperators(matrix.petScMatrix())
        E.setProblemType(SLEPc.EPS.ProblemType.HEP)
        #E.setType(SLEPc.EPS.Type.ARNOLDI)
        E.setFromOptions()
        E.setTolerances(tol=1e-9, max_it=200)
        E.setDimensions(nev=number_modes)
        E.solve()

        Print = PETSc.Sys.Print

        iterations = E.getIterationNumber()
        self.log("Number of iterations of the method: %d" % iterations)

        eps_type = E.getType()
        self.log("Solution method: %s" % eps_type)

        nev, ncv, mpd = E.getDimensions()
        self.log("Number of requested eigenvalues: %d" % nev)

        tol, maxit = E.getTolerances()
        self.log("Stopping condition: tol=%.4g, maxit=%d" % (tol, maxit))

        nconv = E.getConverged()
        self.log("Number of converged eigenpairs %d" % nconv)

        eigenvalues = np.zeros(nconv, dtype=np.complex128)
        result_vector = ParallelVector(matrix.distributionPlan())
        plan = DistributionPlan(mpi.COMM_WORLD,
                                n_columns=matrix.totalShape()[1],
                                n_rows=nconv)
        eigenvectors_parallel = ParallelMatrix(plan)

        # Create the results vectors
        vr, wr = matrix.petScMatrix().getVecs()
        vi, wi = matrix.petScMatrix().getVecs()
        #
        for i in range(nconv):
            k = E.getEigenpair(i, vr, vi)

            result_vector.setCollective(vr.getArray())
            eigenvalues[i] = k

            if i in eigenvectors_parallel.localRows():
                eigenvectors_parallel.setRow(i, result_vector.fullData())

        return eigenvalues, eigenvectors_parallel
示例#4
0
    def eigenfunctions(self, matrix, number_modes):
        import sys, slepc4py

        slepc4py.init(sys.argv)

        from petsc4py import PETSc
        from slepc4py import SLEPc

        E = SLEPc.EPS()
        E.create()

        E.setOperators(matrix.petScMatrix())
        E.setProblemType(SLEPc.EPS.ProblemType.HEP)
        #E.setType(SLEPc.EPS.Type.ARNOLDI)
        E.setFromOptions()
        E.setTolerances(tol=1e-9, max_it=200)
        E.setDimensions(nev=number_modes)
        E.solve()

        Print = PETSc.Sys.Print

        iterations = E.getIterationNumber()
        self.log("Number of iterations of the method: %d" % iterations)

        eps_type = E.getType()
        self.log("Solution method: %s" % eps_type)

        nev, ncv, mpd = E.getDimensions()
        self.log("Number of requested eigenvalues: %d" % nev)

        tol, maxit = E.getTolerances()
        self.log("Stopping condition: tol=%.4g, maxit=%d" % (tol, maxit))

        nconv = E.getConverged()
        self.log("Number of converged eigenpairs %d" % nconv)

        eigenvalues = np.zeros(nconv, dtype=np.complex128)
        result_vector = ParallelVector(matrix.distributionPlan())
        plan = DistributionPlan(mpi.COMM_WORLD, n_columns=matrix.totalShape()[1], n_rows=nconv)
        eigenvectors_parallel = ParallelMatrix(plan)

        # Create the results vectors
        vr, wr = matrix.petScMatrix().getVecs()
        vi, wi = matrix.petScMatrix().getVecs()
        #
        for i in range(nconv):
            k = E.getEigenpair(i, vr, vi)

            result_vector.setCollective(vr.getArray())
            eigenvalues[i] = k

            if i in eigenvectors_parallel.localRows():
                eigenvectors_parallel.setRow(i, result_vector.fullData())

        return eigenvalues, eigenvectors_parallel
示例#5
0
    def testDotComplexNotSquared(self):
        n_rows = 300
        n_columns = 700

        parallel_vector_in = createVector(rows=n_rows, columns=n_columns)
        parallel_vector_out = createVector(rows=n_rows, columns=n_rows)
        matrix = ParallelMatrix(parallel_vector_in.distributionPlan())


        matrix = ParallelMatrix(parallel_vector_in.distributionPlan())

        communicator = matrix.distributionPlan().communicator()

        if communicator.Get_rank() == 0:
            entire_vector = np.array(np.random.random(n_columns), dtype=np.complex128)
            entire_vector /= np.linalg.norm(entire_vector)

            entire_matrix = np.random.random((n_rows, n_columns)) + 1j*np.random.random((n_rows, n_columns))
            entire_matrix /= np.linalg.norm(entire_matrix)

            entire_result = entire_matrix.dot(entire_vector)
        else:
            entire_vector = None
            entire_matrix = None
            entire_result = None

        entire_result = communicator.bcast(entire_result, root=0)

        parallel_vector_in.broadcast(entire_vector, root=0)
        matrix.broadcast(entire_matrix, root=0)


        matrix.dot(parallel_vector_in, parallel_vector_out)

        self.assertLess(np.linalg.norm(parallel_vector_out.fullData()-entire_result), 1e-10)
示例#6
0
    def _createIterationMatrices(self, H, Q, A, number_eigenvectors):
        if Q is None or H is None:
            start_index = 1
            m = number_eigenvectors * 2 + 1
            q = self.randomVector(A.totalShape()[0])
            q /= norm(q)

            distribution_plan = DistributionPlan(mpi.COMM_WORLD, n_rows=m, n_columns=A.totalShape()[0])

            Q = ParallelMatrix(distribution_plan)

            H = np.zeros((m, m), dtype=np.complex128)
            if 0 in Q.localRows():
                Q.setRow(0, q)
        else:
            start_index = Q.totalShape()[0]
            m = start_index + number_eigenvectors + 1

            new_distribution_plan = DistributionPlan(mpi.COMM_WORLD, n_rows=m, n_columns=A.totalShape()[0])

            # if new_distribution_plan.totalShape()[0] <= Q.distributionPlan().totalShape()[0]:
            #     new_distribution_plan = Q.distributionPlan()

            Q = Q.enlargeTo(new_distribution_plan)
            H = self.resizeCopy(H, m, m)

        return H, Q, start_index, m
示例#7
0
    def testDot(self):

        n_rows = 700
        n_columns = n_rows

        parallel_vector = createVector(rows=n_rows, columns=n_columns)
        matrix = ParallelMatrix(parallel_vector.distributionPlan())

        communicator = matrix.distributionPlan().communicator()

        if communicator.Get_rank() == 0:
            entire_vector = np.array(np.random.random(n_columns),
                                     dtype=np.complex128)
            entire_vector /= np.linalg.norm(entire_vector)

            entire_matrix = np.random.random((n_rows, n_columns))
            entire_matrix /= np.linalg.norm(entire_matrix)

            entire_result = entire_matrix.dot(
                entire_matrix.dot(entire_matrix.dot(entire_vector)))
        else:
            entire_vector = None
            entire_matrix = None
            entire_result = None

        entire_result = communicator.bcast(entire_result, root=0)

        parallel_vector.broadcast(entire_vector, root=0)
        matrix.broadcast(entire_matrix, root=0)

        for i in range(3):
            matrix.dot(parallel_vector)

        self.assertLess(
            np.linalg.norm(parallel_vector.fullData() - entire_result), 1e-10)
示例#8
0
    def testScalarMultiplication(self):
        n_rows = 300
        n_columns = n_rows

        parallel_vector = createVector(rows=n_rows, columns=n_columns)
        matrix = ParallelMatrix(parallel_vector.distributionPlan())

        scalars = np.random.random(5)

        communicator = matrix.distributionPlan().communicator()

        if communicator.Get_rank() == 0:
            entire_matrix = np.random.random((n_rows, n_columns))
            entire_matrix /= np.linalg.norm(entire_matrix)
        else:
            entire_matrix = None

        matrix.broadcast(entire_matrix, root=0)
        entire_matrix = communicator.bcast(entire_matrix, root=0)

        for scalar in scalars:
            matrix = scalar * matrix
            matrix = matrix * scalar
            matrix *= scalar

            entire_matrix = scalar * entire_matrix
            entire_matrix = entire_matrix * scalar
            entire_matrix *= scalar

            for i_local_row, i_row in enumerate(matrix.localRows()):
                self.assertLess(
                    np.linalg.norm(matrix.localMatrix()[i_local_row, :] -
                                   entire_matrix[i_row, :]), 1e-10)
示例#9
0
    def _createParallelMatrix(self, f_gamma):
        log("Building matrix")
        return self._createParallelMatrixPETSc(f_gamma)

        product_coordinates=self.productCoordinates()

        n_coordinates = product_coordinates.shape[0]

        distribution_plan = DistributionPlan(communicator=mpi.COMM_WORLD,
                                             n_rows=product_coordinates.shape[0],
                                             n_columns=product_coordinates.shape[0])

        matrix = ParallelMatrix(distribution_plan=distribution_plan)

        if self._mode_element_wise:
            for i_row in distribution_plan.localRows():

                self._printProgress(n_coordinates, i_row)

                r_i = product_coordinates[i_row, :]
                for i_column in range(n_coordinates):

                    r_j = product_coordinates[i_column, :]
                    value = f_gamma(r_i, r_j)

                    # TODO
                    raise NotImplementedError("Can only handle entire rows")
                    # matrix.setElement(i_row, i_column, value)

        else:
            for i_row in distribution_plan.localRows():
                self._printProgress(len(distribution_plan.localRows()), i_row)

                r_i = product_coordinates[i_row, :]
                value = f_gamma(r_i)
                value = value.reshape(value.size)

                matrix.setRow(global_index=i_row,
                              content=value)

        if distribution_plan.communicator().Get_rank() == 0:
            log("done")

        return matrix
示例#10
0
    def testParallelDot(self):
        n_rows = 20
        n_columns = n_rows
        vector = createVector(rows=n_rows, columns=n_columns)
        matrix = ParallelMatrix(vector.distributionPlan())

        communicator = vector.communicator()
        if communicator.Get_rank() == 0:
            entire_vector = np.array(np.random.random(n_columns),
                                     dtype=np.complex128)
            entire_vector /= np.linalg.norm(entire_vector)

            entire_matrix = np.zeros((n_rows, n_columns), dtype=np.complex128)
            entire_matrix[:, :] = np.random.random(
                (n_rows, n_columns)) + 1j * np.random.random(
                    (n_rows, n_columns))
            entire_matrix /= np.linalg.norm(entire_matrix)

            entire_result = entire_matrix.dot(
                entire_matrix.dot(entire_matrix.dot(entire_vector)))
        else:
            entire_vector = None
            entire_matrix = None
            entire_result = None

        vector.broadcast(entire_vector, root=0)
        #self.assertLess(np.linalg.norm(vector.fullData()-entire_vector), 1e-10)
        matrix.broadcast(entire_matrix, root=0)
        #self.assertLess(np.linalg.norm(matrix.localMatrix()-entire_matrix), 1e-10)
        entire_result = communicator.bcast(entire_result, root=0)

        operator = ParallelLinearOperator(matrix, vector)

        operator.listenIfSlave()

        if communicator.Get_rank() == 0:
            for i in range(3):
                operator.matvec(vector.fullData())

        operator.finishListen()

        self.assertLess(np.linalg.norm(vector.fullData() - entire_result),
                        1e-10)
示例#11
0
    def testScalarMultiplication(self):
        n_rows = 300
        n_columns = n_rows

        parallel_vector = createVector(rows=n_rows, columns=n_columns)
        matrix = ParallelMatrix(parallel_vector.distributionPlan())

        scalars = np.random.random(5)

        communicator = matrix.distributionPlan().communicator()

        if communicator.Get_rank() == 0:
            entire_matrix = np.random.random((n_rows, n_columns))
            entire_matrix /= np.linalg.norm(entire_matrix)
        else:
            entire_matrix = None

        matrix.broadcast(entire_matrix, root=0)
        entire_matrix = communicator.bcast(entire_matrix, root=0)

        for scalar in scalars:
            matrix = scalar * matrix
            matrix = matrix * scalar
            matrix *= scalar

            entire_matrix = scalar * entire_matrix
            entire_matrix = entire_matrix * scalar
            entire_matrix *= scalar

            for i_local_row, i_row in enumerate(matrix.localRows()):
                self.assertLess(np.linalg.norm(matrix.localMatrix()[i_local_row, :] - entire_matrix[i_row, :]), 1e-10)
示例#12
0
    def testDotComplexConjugate(self):
        n_rows = 700
        n_columns = n_rows

        parallel_vector = createVector(rows=n_rows, columns=n_columns)
        matrix = ParallelMatrix(parallel_vector.distributionPlan())

        communicator = matrix.distributionPlan().communicator()

        if communicator.Get_rank() == 0:
            entire_vector = np.array(np.random.random(n_columns), dtype=np.complex128)
            entire_vector /= np.linalg.norm(entire_vector)

            entire_matrix = np.random.random((n_rows, n_columns)) + 1j*np.random.random((n_rows, n_columns))
            entire_matrix /= np.linalg.norm(entire_matrix)

            c_entire_matrix = entire_matrix.conjugate()
            entire_result = c_entire_matrix.dot(c_entire_matrix.dot(c_entire_matrix.dot(entire_vector)))
        else:
            entire_vector = None
            entire_matrix = None
            entire_result = None

        entire_result = communicator.bcast(entire_result, root=0)

        parallel_vector.broadcast(entire_vector, root=0)
        matrix.broadcast(entire_matrix, root=0)


        for i in range(3):
            matrix.dot(parallel_vector, complex_conjugate=True)

        self.assertLess(np.linalg.norm(parallel_vector.fullData()-entire_result), 1e-10)
    def testParallelDot(self):
        n_rows = 20
        n_columns = n_rows
        vector = createVector(rows=n_rows, columns=n_columns)
        matrix = ParallelMatrix(vector.distributionPlan())

        communicator = vector.communicator()
        if communicator.Get_rank() == 0:
            entire_vector = np.array(np.random.random(n_columns), dtype=np.complex128)
            entire_vector /= np.linalg.norm(entire_vector)

            entire_matrix = np.zeros((n_rows, n_columns), dtype=np.complex128)
            entire_matrix[:, :] = np.random.random((n_rows, n_columns)) + 1j * np.random.random((n_rows, n_columns))
            entire_matrix /= np.linalg.norm(entire_matrix)

            entire_result = entire_matrix.dot(entire_matrix.dot(entire_matrix.dot(entire_vector)))
        else:
            entire_vector = None
            entire_matrix = None
            entire_result = None

        vector.broadcast(entire_vector, root=0)
        #self.assertLess(np.linalg.norm(vector.fullData()-entire_vector), 1e-10)
        matrix.broadcast(entire_matrix, root=0)
        #self.assertLess(np.linalg.norm(matrix.localMatrix()-entire_matrix), 1e-10)
        entire_result = communicator.bcast(entire_result, root=0)

        operator = ParallelLinearOperator(matrix, vector)

        operator.listenIfSlave()

        if communicator.Get_rank() == 0:
            for i in range(3):
                operator.matvec(vector.fullData())

        operator.finishListen()

        self.assertLess(np.linalg.norm(vector.fullData()-entire_result), 1e-10)
示例#14
0
    def testAddition(self):
        n_rows = 300
        n_columns = n_rows

        parallel_vector = createVector(rows=n_rows, columns=n_columns)
        matrix = [
            ParallelMatrix(parallel_vector.distributionPlan())
            for i in range(5)
        ]

        communicator = matrix[0].distributionPlan().communicator()

        total_matrix = None
        for i in range(len(matrix)):
            if communicator.Get_rank() == 0:
                entire_matrix = np.random.random((n_rows, n_columns))
                entire_matrix /= np.linalg.norm(entire_matrix)

                if total_matrix is None:
                    total_matrix = entire_matrix
                else:
                    total_matrix += entire_matrix
            else:
                entire_matrix = None

            matrix[i].broadcast(entire_matrix, root=0)

        total_matrix = communicator.bcast(total_matrix, root=0)

        for i in range(1, len(matrix)):
            matrix[0] += matrix[i]

        for i_local_row, i_row in enumerate(matrix[0].localRows()):
            self.assertLess(
                np.linalg.norm(matrix[0].localMatrix()[i_local_row, :] -
                               total_matrix[i_row, :]), 1e-10)
示例#15
0
def createMatrix(rows=10, columns=10):
    plan = createDistributionPlan(rows, columns)
    matrix = ParallelMatrix(distribution_plan=plan)

    return matrix