コード例 #1
0
ファイル: srumma.answer.py プロジェクト: dmlb2000/nwchem-cml
"""A way over-simplified SRUMMA matrix multiplication implementation.

Assumes square matrices with the shape as a multiple of the block size.

"""
import mpi4py.MPI
import ga
import numpy as np

CHUNK_SIZE = 256
MULTIPLIER = 3
N = CHUNK_SIZE*MULTIPLIER

me = ga.nodeid()
nproc = ga.nnodes()

class Task(object):
    def __init__(self, alo, ahi, blo, bhi, clo, chi):
        self.alo = alo
        self.ahi = ahi
        self.blo = blo
        self.bhi = bhi
        self.clo = clo
        self.chi = chi
    def __repr__(self):
        return "Task(%s,%s,%s,%s,%s,%s)" % (
                self.alo, self.ahi, self.blo, self.bhi, self.clo, self.chi)

def get_task_list(chunk_size, multiplier):
    count = 0
    task_list = [None]*multiplier**3
コード例 #2
0
import mpi4py.MPI
import ga
import numpy as np

me = ga.nodeid()
nproc = ga.nnodes()


def parallel_task():
    me = ga.pgroup_nodeid()
    nproc = ga.pgroup_nnodes()
    ### print a message from the master of the group
    g_a = ga.create(ga.C_DBL, (3, 4, 5))
    ga.randomize(g_a)
    ### sum the g_a and print the sum
    ###     -OR- do something else with g_a...


midproc = nproc // 2
### assign to 'proclist_first' the first half of the process range
### assign to 'proclist_last' the last half of the process range
### create the 'group_id_first' process group
### create the 'group_id_last' process group
if me in proclist_first:
    ### set the default group to 'group_id_first'
    parallel_task()
### reset the default group to the world group
### synchronize
if me in proclist_last:
    ### set the default group to 'group_id_last'
    parallel_task()
コード例 #3
0
 def __init__(self):
     self.me = ga.nodeid()
     self.stdout = sys.stdout
コード例 #4
0
ファイル: access.answer.py プロジェクト: dmlb2000/nwchem-cml
"""Use ga.access() to sum locally per SMP node."""

import mpi4py.MPI
import ga
import numpy as np

world_id = ga.nodeid()
world_nproc = ga.nnodes()
node_id = ga.cluster_nodeid()
node_nproc = ga.cluster_nprocs(node_id)
node_me = ga.cluster_procid(node_id,ga.nodeid())

g_a = ga.create(ga.C_DBL, (3,4,5,6))
if world_id == 0:
    ga.put(g_a, np.arange(3*4*5*6))
ga.sync()

if node_me == 0:
    sum = 0
    for i in range(node_nproc):
        smp_neighbor_world_id = ga.cluster_procid(node_id,i)
        buffer = ga.access(g_a, proc=smp_neighbor_world_id)
        sum += np.sum(buffer)
    print sum
コード例 #5
0
ファイル: laplace.py プロジェクト: dmlb2000/nwchem-cml
 def __init__(self):
     self.me = ga.nodeid()
     self.stdout = sys.stdout
コード例 #6
0
ファイル: access.answer.py プロジェクト: dmlb2000/nwchem-cml
"""Use ga.access() to sum locally per SMP node."""

import mpi4py.MPI
import ga
import numpy as np

world_id = ga.nodeid()
world_nproc = ga.nnodes()
node_id = ga.cluster_nodeid()
node_nproc = ga.cluster_nprocs(node_id)
node_me = ga.cluster_procid(node_id, ga.nodeid())

g_a = ga.create(ga.C_DBL, (3, 4, 5, 6))
if world_id == 0:
    ga.put(g_a, np.arange(3 * 4 * 5 * 6))
ga.sync()

if node_me == 0:
    sum = 0
    for i in range(node_nproc):
        smp_neighbor_world_id = ga.cluster_procid(node_id, i)
        buffer = ga.access(g_a, proc=smp_neighbor_world_id)
        sum += np.sum(buffer)
    print sum
コード例 #7
0
    return n

def comp_pi(n, myrank=0, nprocs=1):
    h = 1.0 / n;
    s = 0.0;
    for i in xrange(myrank + 1, n + 1, nprocs):
        x = h * (i - 0.5);
        s += 4.0 / (1.0 + x**2);
    return s * h

def prn_pi(pi, PI):
    message = "pi is approximately %.16f, error is %.16f"
    print  (message % (pi, abs(pi - PI)))

nprocs = ga.nnodes()
myrank = ga.nodeid()

g_pi = ga.create(ga.C_DBL, [1])

one_time = False
if len(sys.argv) == 2:
    n = int(sys.argv[1])
    one_time = True

while True:
    if not one_time:
        if myrank == 0:
            n = get_n()
            n = ga.brdcst(n)
        else:
            n = ga.brdcst(0)
コード例 #8
0
import mpi4py.MPI # initialize Message Passing Interface
import ga # initialize Global Arrays

print "hello from %s out of %s" % (ga.nodeid(),ga.nnodes())