Exemple #1
0
def test_distributed_matrix_local_index(na, nev, nblk):
    import numpy as np
    from pyelpa import ProcessorLayout, DistributedMatrix
    from mpi4py import MPI
    comm = MPI.COMM_WORLD
    layout_p = ProcessorLayout(comm)

    for dtype in [np.float64, np.complex128]:
        a = DistributedMatrix(layout_p, na, nev, nblk, dtype=dtype)
        for global_row in range(a.na):
            for global_col in range(a.na):
                if not a.is_local_index(global_row, global_col):
                    continue
                local_row, local_col = a.get_local_index(
                    global_row, global_col)
                g_row, g_col = a.get_global_index(local_row, local_col)
                assert (local_row >= 0 and local_row < a.na_rows)
                assert (local_col >= 0 and local_col < a.na_cols)
                assert (global_row == g_row and global_col == g_col)
Exemple #2
0
def test_setting_global_matrix(na, nev, nblk):
    import numpy as np
    from pyelpa import ProcessorLayout, DistributedMatrix
    from mpi4py import MPI
    comm = MPI.COMM_WORLD
    layout_p = ProcessorLayout(comm)

    for dtype in [np.float64, np.complex128]:
        a = DistributedMatrix(layout_p, na, nev, nblk, dtype=dtype)
        # get global matrix that is equal on all cores
        matrix = get_random_vector(na * na).reshape(na, na).astype(dtype)
        a.set_data_from_global_matrix(matrix)

        # check data
        for global_row in range(a.na):
            for global_col in range(a.na):
                if not a.is_local_index(global_row, global_col):
                    continue
                local_row, local_col = a.get_local_index(
                    global_row, global_col)
                assert (a.data[local_row, local_col] == matrix[global_row,
                                                               global_col])
Exemple #3
0
def test_distributed_matrix_indexing_loop(na, nev, nblk):
    import numpy as np
    from pyelpa import ProcessorLayout, DistributedMatrix
    from mpi4py import MPI
    comm = MPI.COMM_WORLD
    layout_p = ProcessorLayout(comm)

    for dtype in [np.float64, np.complex128]:
        a = DistributedMatrix(layout_p, na, nev, nblk, dtype=dtype)
        for local_row in range(a.na_rows):
            for local_col in range(a.na_cols):
                global_row, global_col = a.get_global_index(
                    local_row, local_col)
                a.data[local_row, local_col] = global_row * 10 + global_col

        for global_row in range(a.na):
            for global_col in range(a.na):
                if not a.is_local_index(global_row, global_col):
                    continue
                local_row, local_col = a.get_local_index(
                    global_row, global_col)
                assert (a.data[local_row,
                               local_col] == global_row * 10 + global_col)