Ejemplo n.º 1
0
"""Use ga.access() to sum locally per SMP node."""

import mpi4py.MPI
from ga4py import ga
import numpy as np

world_id = ga.nodeid()
world_nproc = ga.nnodes()
node_id = ga.cluster_nodeid()
node_nproc = ga.cluster_nprocs(node_id)
node_me = ga.cluster_procid(node_id,ga.nodeid())

g_a = ga.create(ga.C_DBL, (3,4,5,6))
if world_id == 0:
    ga.put(g_a, np.arange(3*4*5*6))
ga.sync()

if node_me == 0:
    sum = 0
    for i in range(node_nproc):
        smp_neighbor_world_id = ga.cluster_procid(node_id,i)
        buffer = ga.access(g_a, proc=smp_neighbor_world_id)
        sum += np.sum(buffer)
    print sum
Ejemplo n.º 2
0
"""At first it was a duplicate of GA's test.x, but was made more modular since
so much could be reused.  It is more like unit tests since each piece has
build up and tear down."""

import random
import sys

from mpi4py import MPI
from ga4py import ga
import numpy as np

me = ga.nodeid()
nproc = ga.nnodes()
nnodes = ga.cluster_nnodes()
inode = ga.cluster_nodeid()
lprocs = ga.cluster_nprocs(inode)
iproc = me % lprocs

n = 256
m = 2*n
maxloop = 100
nloop = min(maxloop,n)
block_size = [32,32]
proc_grid = [2,nproc/2]
MEM_INC = 1000
MIRROR = False
USE_RESTRICTED = False
NEW_API = False
NGA_GATSCAT = False
BLOCK_CYCLIC = False
USE_SCALAPACK_DISTR = False
Ejemplo n.º 3
0
def main():
    if 0 == me:
        if MIRROR:
            print ' Performing tests on Mirrored Arrays'
        print ' GA initialized'

    # note that MA is not used, so no need to initialize it
    # "import ga" registers malloc/free as memory allocators internally

    #if nproc-1 == me:
    if 0 == me:
        print 'using %d process(es) %d custer nodes' % (
                nproc, ga.cluster_nnodes())
        print 'process %d is on node %d with %d processes' % (
                me, ga.cluster_nodeid(), ga.cluster_nprocs(-1))

    # create array to force staggering of memory and uneven distribution
    # of pointers
    dim1 = MEM_INC
    mapc = [0]*nproc
    for i in range(nproc):
        mapc[i] = MEM_INC*i
        dim1 += MEM_INC*i
    g_s = ga.create_handle()
    ga.set_data(g_s, [dim1], ga.C_INT)
    ga.set_array_name(g_s, 's')
    ga.set_irreg_distr(g_s, mapc, [nproc])

    if MIRROR:
        if 0 == me:
            print '\nTESTING MIRRORED ARRAYS\n'

    # check support for single precision arrays
    if 0 == me:
        print '\nCHECKING SINGLE PRECISION\n'
    check_float()

    # check support for double precision arrays
    if 0 == me:
        print '\nCHECKING DOUBLE PRECISION\n'
    check_double()

    # check support for single precision complex arrays
    if 0 == me:
        print '\nCHECKING SINGLE COMPLEX\n'
    check_complex_float()

    # check support for double precision complex arrays
    if 0 == me:
        print '\nCHECKING DOUBLE COMPLEX\n'
    check_complex_double()

    # check support for integer arrays
    if 0 == me:
        print '\nCHECKING INT\n'
    check_int()

    # check support for long integer arrays
    if 0 == me:
        print '\nCHECKING LONG INT\n'
    check_long()

    if 0 == me:
        print '\nCHECKING Wrappers to Message Passing Collective ops\n'
    check_wrappers()

    # check if memory limits are enforced
    #check_mem(ma_heap*ga.nnodes())

    if 0 == me: ga.print_stats()
    if 0 == me: print ' '
    if 0 == me: print 'All tests successful'
Ejemplo n.º 4
0
"""Use ga.access() to sum locally per SMP node."""

import mpi4py.MPI
from ga4py import ga
import numpy as np

world_id = ga.nodeid()
world_nproc = ga.nnodes()
node_id = ga.cluster_nodeid()
node_nproc = ga.cluster_nprocs(node_id)
node_me = ga.cluster_procid(node_id, ga.nodeid())

g_a = ga.create(ga.C_DBL, (3, 4, 5, 6))
if world_id == 0:
    ga.put(g_a, np.arange(3 * 4 * 5 * 6))
ga.sync()

if node_me == 0:
    sum = 0
    for i in range(node_nproc):
        smp_neighbor_world_id = ga.cluster_procid(node_id, i)
        buffer = ga.access(g_a, proc=smp_neighbor_world_id)
        sum += np.sum(buffer)
    print sum