예제 #1
0
 def __init__(self):
     from ipyparallel import Client
     rc = Client()
     rc.block=True
     self.cpu = rc[:]
     print '{} cores ready'.format(len(self.cpu))
     self.cpu.execute('import numpy as np')
     self.cpu.execute('from sklearn.neighbors import KDTree, BallTree')
예제 #2
0
def main():
    parser = OptionParser()
    parser.set_defaults(n=100)
    parser.set_defaults(tmin=1e-3)
    parser.set_defaults(tmax=1)
    parser.set_defaults(profile='default')

    parser.add_option("-n", type='int', dest='n',
        help='the number of tasks to run')
    parser.add_option("-t", type='float', dest='tmin',
        help='the minimum task length in seconds')
    parser.add_option("-T", type='float', dest='tmax',
        help='the maximum task length in seconds')
    parser.add_option("-p", '--profile', type='str', dest='profile',
        help="the cluster profile [default: 'default']")

    (opts, args) = parser.parse_args()
    assert opts.tmax >= opts.tmin, "tmax must not be smaller than tmin"

    rc = Client()
    view = rc.load_balanced_view()
    print(view)
    rc.block=True
    nengines = len(rc.ids)
    with rc[:].sync_imports():
        from IPython.utils.timing import time

    # the jobs should take a random time within a range
    times = [random.random()*(opts.tmax-opts.tmin)+opts.tmin for i in range(opts.n)]
    stime = sum(times)

    print("executing %i tasks, totalling %.1f secs on %i engines"%(opts.n, stime, nengines))
    time.sleep(1)
    start = time.time()
    amr = view.map(time.sleep, times)
    amr.get()
    stop = time.time()

    ptime = stop-start
    scale = stime/ptime

    print("executed %.1f secs in %.1f secs"%(stime, ptime))
    print("%.3fx parallel performance on %i engines"%(scale, nengines))
    print("%.1f%% of theoretical max"%(100*scale/nengines))
예제 #3
0
of megabytes you might saturate the network interface of a single node and 
potentially its memory buffers if the messages are not consumed in a streamed 
manner.

Note that the AllReduce scheme implemented with the spanning tree strategy 
impose the aggregation function to be commutative and distributive. It might 
not be the case if you implement the naive gather / reduce / broadcast strategy 
where you can reorder the partial data before performing the reduce.
"""
from __future__ import print_function

from ipyparallel import Client, Reference

# connect client and create views
rc = Client()
rc.block = True
ids = rc.ids

root_id = ids[0]
root = rc[root_id]

view = rc[:]

# run bintree.py script defining bintree functions, etc.
exec(compile(open('bintree.py').read(), 'bintree.py', 'exec'))

# generate binary tree of parents
btree = bintree(ids)

print("setting up binary tree interconnect:")
print_bintree(btree)
예제 #4
0
potentially its memory buffers if the messages are not consumed in a streamed 
manner.

Note that the AllReduce scheme implemented with the spanning tree strategy 
impose the aggregation function to be commutative and distributive. It might 
not be the case if you implement the naive gather / reduce / broadcast strategy 
where you can reorder the partial data before performing the reduce.
"""
from __future__ import print_function

from ipyparallel import Client, Reference


# connect client and create views
rc = Client()
rc.block = True
ids = rc.ids

root_id = ids[0]
root = rc[root_id]

view = rc[:]

# run bintree.py script defining bintree functions, etc.
exec(compile(open("bintree.py").read(), "bintree.py", "exec"))

# generate binary tree of parents
btree = bintree(ids)

print("setting up binary tree interconnect:")
print_bintree(btree)
예제 #5
0
 def __init__(self):
     from ipyparallel import Client
     rc = Client()
     rc.block=True
     self.cpu = rc[:]