def testDotForTransposedNotSquared(self):

        n_rows = 300
        n_columns = 700

        parallel_vector_in = createVector(rows=n_rows, columns=n_rows)
        parallel_vector_out = createVector(rows=n_columns, columns=n_columns)
        matrix = createMatrix(n_rows, n_columns)

        communicator = matrix.distributionPlan().communicator()

        if communicator.Get_rank() == 0:
            entire_vector = np.array(np.random.random(n_rows), dtype=np.complex128)
            entire_vector /= np.linalg.norm(entire_vector)

            entire_matrix = np.random.random((n_rows, n_columns))
            entire_matrix /= np.linalg.norm(entire_matrix)

            entire_result = entire_matrix.transpose().dot(entire_vector)
        else:
            entire_vector = None
            entire_matrix = None
            entire_result = None

        entire_result = communicator.bcast(entire_result, root=0)

        parallel_vector_in.broadcast(entire_vector, root=0)
        matrix.broadcast(entire_matrix, root=0)
        matrix.dotForTransposed(parallel_vector_in, parallel_vector_out)
        self.assertLess(np.linalg.norm(parallel_vector_out.fullData()-entire_result), 1e-10)
Beispiel #2
0
    def testDotForTransposedNotSquared(self):

        n_rows = 300
        n_columns = 700

        parallel_vector_in = createVector(rows=n_rows, columns=n_rows)
        parallel_vector_out = createVector(rows=n_columns, columns=n_columns)
        matrix = createMatrix(n_rows, n_columns)

        communicator = matrix.distributionPlan().communicator()

        if communicator.Get_rank() == 0:
            entire_vector = np.array(np.random.random(n_rows),
                                     dtype=np.complex128)
            entire_vector /= np.linalg.norm(entire_vector)

            entire_matrix = np.random.random((n_rows, n_columns))
            entire_matrix /= np.linalg.norm(entire_matrix)

            entire_result = entire_matrix.transpose().dot(entire_vector)
        else:
            entire_vector = None
            entire_matrix = None
            entire_result = None

        entire_result = communicator.bcast(entire_result, root=0)

        parallel_vector_in.broadcast(entire_vector, root=0)
        matrix.broadcast(entire_matrix, root=0)
        matrix.dotForTransposed(parallel_vector_in, parallel_vector_out)
        self.assertLess(
            np.linalg.norm(parallel_vector_out.fullData() - entire_result),
            1e-10)
    def testDot(self):

        n_rows = 700
        n_columns = n_rows

        parallel_vector = createVector(rows=n_rows, columns=n_columns)
        matrix = ParallelMatrix(parallel_vector.distributionPlan())

        communicator = matrix.distributionPlan().communicator()

        if communicator.Get_rank() == 0:
            entire_vector = np.array(np.random.random(n_columns), dtype=np.complex128)
            entire_vector /= np.linalg.norm(entire_vector)

            entire_matrix = np.random.random((n_rows, n_columns))
            entire_matrix /= np.linalg.norm(entire_matrix)

            entire_result = entire_matrix.dot(entire_matrix.dot(entire_matrix.dot(entire_vector)))
        else:
            entire_vector = None
            entire_matrix = None
            entire_result = None

        entire_result = communicator.bcast(entire_result, root=0)

        parallel_vector.broadcast(entire_vector, root=0)
        matrix.broadcast(entire_matrix, root=0)


        for i in range(3):
            matrix.dot(parallel_vector)

        self.assertLess(np.linalg.norm(parallel_vector.fullData()-entire_result), 1e-10)
    def testScalarMultiplication(self):
        n_rows = 300
        n_columns = n_rows

        parallel_vector = createVector(rows=n_rows, columns=n_columns)
        matrix = ParallelMatrix(parallel_vector.distributionPlan())

        scalars = np.random.random(5)

        communicator = matrix.distributionPlan().communicator()

        if communicator.Get_rank() == 0:
            entire_matrix = np.random.random((n_rows, n_columns))
            entire_matrix /= np.linalg.norm(entire_matrix)
        else:
            entire_matrix = None

        matrix.broadcast(entire_matrix, root=0)
        entire_matrix = communicator.bcast(entire_matrix, root=0)

        for scalar in scalars:
            matrix = scalar * matrix
            matrix = matrix * scalar
            matrix *= scalar

            entire_matrix = scalar * entire_matrix
            entire_matrix = entire_matrix * scalar
            entire_matrix *= scalar

            for i_local_row, i_row in enumerate(matrix.localRows()):
                self.assertLess(np.linalg.norm(matrix.localMatrix()[i_local_row, :] - entire_matrix[i_row, :]), 1e-10)
    def testAddition(self):
        n_rows = 300
        n_columns = n_rows

        parallel_vector = createVector(rows=n_rows, columns=n_columns)
        matrix = [ParallelMatrix(parallel_vector.distributionPlan()) for i in range(5)]

        communicator = matrix[0].distributionPlan().communicator()

        total_matrix = None
        for i in range(len(matrix)):
            if communicator.Get_rank() == 0:
                entire_matrix = np.random.random((n_rows, n_columns))
                entire_matrix /= np.linalg.norm(entire_matrix)

                if total_matrix is None:
                    total_matrix = entire_matrix
                else:
                    total_matrix += entire_matrix
            else:
                entire_matrix = None

            matrix[i].broadcast(entire_matrix, root=0)

        total_matrix = communicator.bcast(total_matrix, root=0)

        for i in range(1, len(matrix)):
            matrix[0] += matrix[i]

        for i_local_row, i_row in enumerate(matrix[0].localRows()):
            self.assertLess(np.linalg.norm(matrix[0].localMatrix()[i_local_row, :] - total_matrix[i_row, :]), 1e-10)
Beispiel #6
0
    def testDot(self):

        n_rows = 700
        n_columns = n_rows

        parallel_vector = createVector(rows=n_rows, columns=n_columns)
        matrix = ParallelMatrix(parallel_vector.distributionPlan())

        communicator = matrix.distributionPlan().communicator()

        if communicator.Get_rank() == 0:
            entire_vector = np.array(np.random.random(n_columns),
                                     dtype=np.complex128)
            entire_vector /= np.linalg.norm(entire_vector)

            entire_matrix = np.random.random((n_rows, n_columns))
            entire_matrix /= np.linalg.norm(entire_matrix)

            entire_result = entire_matrix.dot(
                entire_matrix.dot(entire_matrix.dot(entire_vector)))
        else:
            entire_vector = None
            entire_matrix = None
            entire_result = None

        entire_result = communicator.bcast(entire_result, root=0)

        parallel_vector.broadcast(entire_vector, root=0)
        matrix.broadcast(entire_matrix, root=0)

        for i in range(3):
            matrix.dot(parallel_vector)

        self.assertLess(
            np.linalg.norm(parallel_vector.fullData() - entire_result), 1e-10)
Beispiel #7
0
    def testScalarMultiplication(self):
        n_rows = 300
        n_columns = n_rows

        parallel_vector = createVector(rows=n_rows, columns=n_columns)
        matrix = ParallelMatrix(parallel_vector.distributionPlan())

        scalars = np.random.random(5)

        communicator = matrix.distributionPlan().communicator()

        if communicator.Get_rank() == 0:
            entire_matrix = np.random.random((n_rows, n_columns))
            entire_matrix /= np.linalg.norm(entire_matrix)
        else:
            entire_matrix = None

        matrix.broadcast(entire_matrix, root=0)
        entire_matrix = communicator.bcast(entire_matrix, root=0)

        for scalar in scalars:
            matrix = scalar * matrix
            matrix = matrix * scalar
            matrix *= scalar

            entire_matrix = scalar * entire_matrix
            entire_matrix = entire_matrix * scalar
            entire_matrix *= scalar

            for i_local_row, i_row in enumerate(matrix.localRows()):
                self.assertLess(
                    np.linalg.norm(matrix.localMatrix()[i_local_row, :] -
                                   entire_matrix[i_row, :]), 1e-10)
    def testEvaluateFredholmConvolutionVsConvolution(self):
        sigma_matrix = SigmaWaist(sigma_x=3e-6,
                                  sigma_y=1e-6,
                                  sigma_x_prime=5e-6,
                                  sigma_y_prime=5e-6)

        x_coordinates = np.linspace(-1e-6,1e-6, 51)
        y_coordinates = np.linspace(-1e-6,1e-6, 51)
        wavenumber = 1e+11

        density = PhaseSpaceDensity(sigma_matrix, wavenumber)

        e_field = createGaussian2D(sigma_x=1.0e-6,
                                   sigma_y=1.0e-6,
                                   x_coordinates=x_coordinates,
                                   y_coordinates=y_coordinates)
        e_field = e_field + 0j

        yy, xx = np.meshgrid(y_coordinates, x_coordinates)
        e_field = e_field + e_field * 1j * xx * yy

        strategy = BuilderStrategyPython(x_coordinates, y_coordinates, density, x_coordinates, y_coordinates, e_field[np.newaxis,:,:])
        strategy_convolution = BuilderStrategyConvolution(x_coordinates, y_coordinates, density, x_coordinates, y_coordinates, e_field[np.newaxis,:,:])

        coordinate_vector = np.zeros((x_coordinates.size, y_coordinates.size), dtype=np.complex128)

        for i_r_x, r_x in enumerate(x_coordinates):
            if i_r_x % 4 != 0:
                continue
            print("i_x %i/%i" %(i_r_x, x_coordinates.size))
            for i_r_y, r_y in enumerate(y_coordinates):
                if i_r_y % 11 != 0:
                    continue
#                if i_r_x != 26 and i_r_y != 26:
#                    continue

                r_1 = np.array([r_x, r_y])
                coordinate_vector[:, :] = 0.0
                coordinate_vector[i_r_x, i_r_y] = 1.0

                parallel_vector = createVector(coordinate_vector.size, coordinate_vector.size)
                parallel_vector.broadcast(coordinate_vector.flatten(), root=0)

                strategy.evaluateAllR_2_Fredholm_parallel_convolution(v_in=parallel_vector, v_out=parallel_vector)
                eval_fredholm = parallel_vector.fullData()
                eval_integral = strategy_convolution.evaluateAllR_2_Integral(r_1)

                diff = np.abs((eval_integral.flatten()-eval_fredholm) / eval_fredholm)
                self.assertLess(diff.max(), 1e-12)
Beispiel #9
0
    def testParallelDot(self):
        n_rows = 20
        n_columns = n_rows
        vector = createVector(rows=n_rows, columns=n_columns)
        matrix = ParallelMatrix(vector.distributionPlan())

        communicator = vector.communicator()
        if communicator.Get_rank() == 0:
            entire_vector = np.array(np.random.random(n_columns),
                                     dtype=np.complex128)
            entire_vector /= np.linalg.norm(entire_vector)

            entire_matrix = np.zeros((n_rows, n_columns), dtype=np.complex128)
            entire_matrix[:, :] = np.random.random(
                (n_rows, n_columns)) + 1j * np.random.random(
                    (n_rows, n_columns))
            entire_matrix /= np.linalg.norm(entire_matrix)

            entire_result = entire_matrix.dot(
                entire_matrix.dot(entire_matrix.dot(entire_vector)))
        else:
            entire_vector = None
            entire_matrix = None
            entire_result = None

        vector.broadcast(entire_vector, root=0)
        #self.assertLess(np.linalg.norm(vector.fullData()-entire_vector), 1e-10)
        matrix.broadcast(entire_matrix, root=0)
        #self.assertLess(np.linalg.norm(matrix.localMatrix()-entire_matrix), 1e-10)
        entire_result = communicator.bcast(entire_result, root=0)

        operator = ParallelLinearOperator(matrix, vector)

        operator.listenIfSlave()

        if communicator.Get_rank() == 0:
            for i in range(3):
                operator.matvec(vector.fullData())

        operator.finishListen()

        self.assertLess(np.linalg.norm(vector.fullData() - entire_result),
                        1e-10)
    def testParallelDot(self):
        n_rows = 20
        n_columns = n_rows
        vector = createVector(rows=n_rows, columns=n_columns)
        matrix = ParallelMatrix(vector.distributionPlan())

        communicator = vector.communicator()
        if communicator.Get_rank() == 0:
            entire_vector = np.array(np.random.random(n_columns), dtype=np.complex128)
            entire_vector /= np.linalg.norm(entire_vector)

            entire_matrix = np.zeros((n_rows, n_columns), dtype=np.complex128)
            entire_matrix[:, :] = np.random.random((n_rows, n_columns)) + 1j * np.random.random((n_rows, n_columns))
            entire_matrix /= np.linalg.norm(entire_matrix)

            entire_result = entire_matrix.dot(entire_matrix.dot(entire_matrix.dot(entire_vector)))
        else:
            entire_vector = None
            entire_matrix = None
            entire_result = None

        vector.broadcast(entire_vector, root=0)
        #self.assertLess(np.linalg.norm(vector.fullData()-entire_vector), 1e-10)
        matrix.broadcast(entire_matrix, root=0)
        #self.assertLess(np.linalg.norm(matrix.localMatrix()-entire_matrix), 1e-10)
        entire_result = communicator.bcast(entire_result, root=0)

        operator = ParallelLinearOperator(matrix, vector)

        operator.listenIfSlave()

        if communicator.Get_rank() == 0:
            for i in range(3):
                operator.matvec(vector.fullData())

        operator.finishListen()

        self.assertLess(np.linalg.norm(vector.fullData()-entire_result), 1e-10)
Beispiel #11
0
    def testAddition(self):
        n_rows = 300
        n_columns = n_rows

        parallel_vector = createVector(rows=n_rows, columns=n_columns)
        matrix = [
            ParallelMatrix(parallel_vector.distributionPlan())
            for i in range(5)
        ]

        communicator = matrix[0].distributionPlan().communicator()

        total_matrix = None
        for i in range(len(matrix)):
            if communicator.Get_rank() == 0:
                entire_matrix = np.random.random((n_rows, n_columns))
                entire_matrix /= np.linalg.norm(entire_matrix)

                if total_matrix is None:
                    total_matrix = entire_matrix
                else:
                    total_matrix += entire_matrix
            else:
                entire_matrix = None

            matrix[i].broadcast(entire_matrix, root=0)

        total_matrix = communicator.bcast(total_matrix, root=0)

        for i in range(1, len(matrix)):
            matrix[0] += matrix[i]

        for i_local_row, i_row in enumerate(matrix[0].localRows()):
            self.assertLess(
                np.linalg.norm(matrix[0].localMatrix()[i_local_row, :] -
                               total_matrix[i_row, :]), 1e-10)