예제 #1
0
def test_processor_layout():
    from pyelpa import ProcessorLayout
    from mpi4py import MPI
    comm = MPI.COMM_WORLD
    layout_p = ProcessorLayout(comm)
    assert (comm.Get_size() == layout_p.np_cols * layout_p.np_rows)
    assert (layout_p.my_prow >= 0)
    assert (layout_p.my_pcol >= 0)
    assert (layout_p.my_prow <= comm.Get_size())
    assert (layout_p.my_pcol <= comm.Get_size())
예제 #2
0
def test_distributed_matrix_from_processor_layout(na, nev, nblk):
    import numpy as np
    from pyelpa import ProcessorLayout, DistributedMatrix
    from mpi4py import MPI
    comm = MPI.COMM_WORLD
    layout_p = ProcessorLayout(comm)

    for dtype in [np.float64, np.float32, np.complex64, np.complex128]:
        a = DistributedMatrix(layout_p, na, nev, nblk, dtype=dtype)
        assert (a.data.dtype == dtype)
        assert (a.data.shape == (a.na_rows, a.na_cols))
예제 #3
0
def test_distributed_matrix_like_other_matrix(na, nev, nblk):
    import numpy as np
    from pyelpa import ProcessorLayout, DistributedMatrix
    from mpi4py import MPI
    comm = MPI.COMM_WORLD
    layout_p = ProcessorLayout(comm)

    for dtype in [np.float64, np.float32, np.complex64, np.complex128]:
        a = DistributedMatrix(layout_p, na, nev, nblk, dtype=dtype)
        b = DistributedMatrix.like(a)
        assert (a.na == b.na)
        assert (a.nev == b.nev)
        assert (a.nblk == b.nblk)
        assert (a.data.dtype == b.data.dtype)
        assert (a.data.shape == b.data.shape)
예제 #4
0
def test_call_eigenvalues(na, nev, nblk):
    import numpy as np
    from pyelpa import ProcessorLayout, DistributedMatrix, Elpa
    from mpi4py import MPI

    comm = MPI.COMM_WORLD
    layout_p = ProcessorLayout(comm)

    for dtype in [np.float64, np.complex128]:
        # create arrays
        a = DistributedMatrix(layout_p, na, nev, nblk, dtype=dtype)
        a.data[:, :] = np.random.rand(a.na_rows, a.na_cols).astype(dtype)
        ev = np.zeros(na, dtype=np.float64)

        e = Elpa.from_distributed_matrix(a)
        e.eigenvalues(a.data, ev)
예제 #5
0
def test_distributed_matrix_global_index(na, nev, nblk):
    import numpy as np
    from pyelpa import ProcessorLayout, DistributedMatrix
    from mpi4py import MPI
    comm = MPI.COMM_WORLD
    layout_p = ProcessorLayout(comm)

    for dtype in [np.float64, np.complex128]:
        a = DistributedMatrix(layout_p, na, nev, nblk, dtype=dtype)
        for local_row in range(a.na_rows):
            for local_col in range(a.na_cols):
                global_row, global_col = a.get_global_index(
                    local_row, local_col)
                l_row, l_col = a.get_local_index(global_row, global_col)
                assert (global_row >= 0 and global_row < a.na)
                assert (global_col >= 0 and global_col < a.na)
                assert (local_row == l_row and local_col == l_col)
예제 #6
0
def test_setting_global_matrix(na, nev, nblk):
    import numpy as np
    from pyelpa import ProcessorLayout, DistributedMatrix
    from mpi4py import MPI
    comm = MPI.COMM_WORLD
    layout_p = ProcessorLayout(comm)

    for dtype in [np.float64, np.complex128]:
        a = DistributedMatrix(layout_p, na, nev, nblk, dtype=dtype)
        # get global matrix that is equal on all cores
        matrix = get_random_vector(na * na).reshape(na, na).astype(dtype)
        a.set_data_from_global_matrix(matrix)

        # check data
        for global_row in range(a.na):
            for global_col in range(a.na):
                if not a.is_local_index(global_row, global_col):
                    continue
                local_row, local_col = a.get_local_index(
                    global_row, global_col)
                assert (a.data[local_row, local_col] == matrix[global_row,
                                                               global_col])
예제 #7
0
def test_distributed_matrix_indexing_loop(na, nev, nblk):
    import numpy as np
    from pyelpa import ProcessorLayout, DistributedMatrix
    from mpi4py import MPI
    comm = MPI.COMM_WORLD
    layout_p = ProcessorLayout(comm)

    for dtype in [np.float64, np.complex128]:
        a = DistributedMatrix(layout_p, na, nev, nblk, dtype=dtype)
        for local_row in range(a.na_rows):
            for local_col in range(a.na_cols):
                global_row, global_col = a.get_global_index(
                    local_row, local_col)
                a.data[local_row, local_col] = global_row * 10 + global_col

        for global_row in range(a.na):
            for global_col in range(a.na):
                if not a.is_local_index(global_row, global_col):
                    continue
                local_row, local_col = a.get_local_index(
                    global_row, global_col)
                assert (a.data[local_row,
                               local_col] == global_row * 10 + global_col)
예제 #8
0
def test_compare_eigenvalues_to_those_from_eigenvectors(na, nev, nblk):
    import numpy as np
    from pyelpa import ProcessorLayout, DistributedMatrix, Elpa
    from mpi4py import MPI

    comm = MPI.COMM_WORLD
    layout_p = ProcessorLayout(comm)

    for dtype in [np.float64, np.complex128]:
        # create arrays
        a = DistributedMatrix(layout_p, na, nev, nblk, dtype=dtype)
        random_matrix = np.random.rand(a.na_rows, a.na_cols).astype(dtype)
        a.data[:, :] = random_matrix
        q = DistributedMatrix(layout_p, na, nev, nblk, dtype=dtype)
        ev = np.zeros(na, dtype=np.float64)
        ev2 = np.zeros(na, dtype=np.float64)

        e = Elpa.from_distributed_matrix(a)
        e.eigenvectors(a.data, ev, q.data)

        a.data[:, :] = random_matrix
        e.eigenvalues(a.data, ev2)

        assert (np.allclose(ev, ev2))