コード例 #1
0
ファイル: test_core_pc.py プロジェクト: vincentmr/scalapy
def test_initmpi():

    core.initmpi([2, 2], block_shape=[5, 5])

    # Test grid shape is correct
    assert core._context.grid_shape == (2, 2)

    # Test we have the correct positions
    assert core._context.grid_position == poslist[rank]

    # Test the blockshape is set correctly
    assert core._block_shape == (5, 5)
コード例 #2
0
ファイル: test_core_dm.py プロジェクト: jrs65/scalapy
import numpy as np

from mpi4py import MPI
from scalapy import core

comm = MPI.COMM_WORLD

rank = comm.rank
size = comm.size

if size != 4:
    raise Exception("Test needs 4 processes.")

core.initmpi([2, 2], block_shape=[3, 3])


def test_dm_init():
    dm = core.DistributedMatrix([5, 5])

    # Check global shape
    assert dm.global_shape == (5, 5)

    # Check local shape
    shapelist = [(3, 3), (3, 2), (2, 3), (2, 2)]
    assert dm.local_shape == shapelist[rank]


def test_dm_load_5x5():
    ## Test that a 5x5 DistributedMatrix is loaded correctly.

    # Generate matrix
コード例 #3
0
ファイル: test_npdm.py プロジェクト: vincentmr/scalapy
import numpy as np

from mpi4py import MPI

from scalapy import core
import scalapy.routines as rt

comm = MPI.COMM_WORLD

rank = comm.rank
size = comm.size

if size != 4:
    raise Exception("Test needs 4 processes.")

core.initmpi([2, 2], block_shape=[3, 3])

allclose = lambda a, b: np.allclose(a, b, rtol=1e-4, atol=1e-6)


def test_np2self_D():
    ## Test copy a numpy array to a section of the distributed matrix and vice versa
    am, an = 13, 5
    Am, An = 39, 23
    srow, scol = 3, 12

    a = np.arange(am * an, dtype=np.float64).reshape(am, an)
    a = np.asfortranarray(a)

    gA = np.random.standard_normal((Am, An)).astype(np.float64)
    gA = np.asfortranarray(gA)
コード例 #4
0
from mpi4py import MPI

from scalapy import core
import scalapy.routines as rt


comm = MPI.COMM_WORLD

rank = comm.rank
size = comm.size

if size != 4:
    raise Exception("Test needs 4 processes.")

core.initmpi([2, 2], block_shape=[16, 16])

allclose = lambda a, b: np.allclose(a, b, rtol=1e-4, atol=1e-6)


def test_cholesky_D():
    ## Test the Cholesky decomposition of a double precision matrix (use the
    ## default, upper half)
    ns = 317

    gA = np.random.standard_normal((ns, ns)).astype(np.float64)
    gA = np.dot(gA, gA.T)  # Make positive definite
    gA = np.asfortranarray(gA)

    dA = core.DistributedMatrix.from_global_array(gA, rank=0)
コード例 #5
0
ファイル: decomp_corr.py プロジェクト: zuoshifan/rpca_HI
from r_pca import R_pca

from mpi4py import MPI

from scalapy import core
import scalapy.routines as rt

comm = MPI.COMM_WORLD

rank = comm.rank
size = comm.size

if size != 4:
    raise Exception("Test needs 4 processes.")

core.initmpi([2, 2], block_shape=[32, 32])

ps_name = 'sim_pointsource_256_700_800_256.hdf5'
ga_name = 'sim_galaxy_256_700_800_256.hdf5'
cm_name = 'sim_21cm_256_700_800_256.hdf5'
with h5py.File(ps_name, 'r') as f:
    ps_map = f['map'][:, 0, :]
with h5py.File(ga_name, 'r') as f:
    ga_map = f['map'][:, 0, :]
with h5py.File(cm_name, 'r') as f:
    cm_map = f['map'][:, 0, :]

fg_map = ps_map + ga_map
tt_map = fg_map + cm_map  # total signal

npix = ps_map.shape[-1]
コード例 #6
0
ファイル: test_pdgemm.py プロジェクト: jrs65/scalapy
import scalapy.lowlevel.pblas as pblas_ll
import scalapy.lowlevel as ll


comm = MPI.COMM_WORLD

rank = comm.rank
size = comm.size

allclose = lambda a, b: np.allclose(a, b, rtol=1e-4, atol=1e-6)

if size != 4:
    raise Exception("Test needs 4 processes.")


core.initmpi([2, 2], block_shape=[8, 8])


def pdgemm_iter_NN(n, m, k):

    gA = np.asfortranarray(np.random.standard_normal((n, k)))
    gB = np.asfortranarray(np.random.standard_normal((k, m)))

    dA = core.DistributedMatrix.from_global_array(gA, rank=0)
    dB = core.DistributedMatrix.from_global_array(gB, rank=0)
    dC = core.DistributedMatrix([n, m], dtype=np.float64)

    ll.pdgemm('N', 'N', n, m, k, 1.0, dA, dB, 0.0, dC)

    gCd = dC.to_global_array(rank=0)
    gC = np.asfortranarray(np.dot(gA, gB))
コード例 #7
0
ファイル: test_pdgemm.py プロジェクト: vincentmr/scalapy
from scalapy import core
import scalapy.lowlevel.pblas as pblas_ll
import scalapy.lowlevel as ll

comm = MPI.COMM_WORLD

rank = comm.rank
size = comm.size

allclose = lambda a, b: np.allclose(a, b, rtol=1e-4, atol=1e-6)

if size != 4:
    raise Exception("Test needs 4 processes.")

core.initmpi([2, 2], block_shape=[8, 8])


def pdgemm_iter_NN(n, m, k):

    gA = np.asfortranarray(np.random.standard_normal((n, k)))
    gB = np.asfortranarray(np.random.standard_normal((k, m)))

    dA = core.DistributedMatrix.from_global_array(gA, rank=0)
    dB = core.DistributedMatrix.from_global_array(gB, rank=0)
    dC = core.DistributedMatrix([n, m], dtype=np.float64)

    ll.pdgemm('N', 'N', n, m, k, 1.0, dA, dB, 0.0, dC)

    gCd = dC.to_global_array(rank=0)
    gC = np.asfortranarray(np.dot(gA, gB))