def visitLink(self, url):
     if url not in self.allLinks:
         self.allLinks.append(url)
         if url.startswith(self.site):
             print '    ', url
             self.linksWorking[url] = self.tc.run(
                 kernel.Task('links = fetchAndParse(url)',
                             resultNames=['links'],
                             setupNS={'url': url}))
Beispiel #2
0
    def submit(self, tasks):
        drs = []
        for task in tasks:
            # create an ipython1 task from pebl task
            ipy1task = ipy1kernel.Task(
                "from pebl.pebl_script import runtask_picklestr; result = runtask_picklestr(task)",
                resultNames=['result'],
                setupNS={'task': cPickle.dumps(task)})

            task.ipy1_taskid = self.tc.run(ipy1task)
            drs.append(IPython1DeferredResult(self.tc, task.ipy1_taskid))
        return drs
def main():
    parser = OptionParser()
    parser.set_defaults(n=100)
    parser.set_defaults(tmin=1)
    parser.set_defaults(tmax=60)
    parser.set_defaults(controller='localhost')
    parser.set_defaults(meport=10105)
    parser.set_defaults(tport=10113)
    
    parser.add_option("-n", type='int', dest='n',
        help='the number of tasks to run')
    parser.add_option("-t", type='float', dest='tmin', 
        help='the minimum task length in seconds')
    parser.add_option("-T", type='float', dest='tmax',
        help='the maximum task length in seconds')
    parser.add_option("-c", type='string', dest='controller',
        help='the address of the controller')
    parser.add_option("-p", type='int', dest='meport',
        help="the port on which the controller listens for the MultiEngine/RemoteController client")
    parser.add_option("-P", type='int', dest='tport',
        help="the port on which the controller listens for the TaskController client")
    
    (opts, args) = parser.parse_args()
    assert opts.tmax >= opts.tmin, "tmax must not be smaller than tmin"
    
    rc = kernel.RemoteController((opts.controller, opts.meport))
    tc = kernel.TaskController((opts.controller, opts.tport))
    
    rc.block=True
    nengines = len(rc.getIDs())
    rc.executeAll('from IPython.genutils import time')

    # the jobs should take a random time within a range
    times = [random.random()*(opts.tmax-opts.tmin)+opts.tmin for i in range(opts.n)]
    tasks = [kernel.Task("time.sleep(%f)"%t) for t in times]
    stime = sum(times)
    
    print "executing %i tasks, totalling %.1f secs on %i engines"%(opts.n, stime, nengines)

    time.sleep(1)
    start = time.time()
    taskIDs = [tc.run(t) for t in tasks]
    tc.barrier(taskIDs)
    stop = time.time()

    ptime = stop-start
    scale = stime/ptime
    
    print "executed %.1f secs in %.1f secs"%(stime, ptime)
    print "%.3fx parallel performance on %i engines"%(scale, nengines)
    print "%.1f%% of theoretical max"%(100*scale/nengines)
Beispiel #4
0
def run(cmd, NS_lst, resultNames=[], cmd_all='', blocked=True, clusterno=0):
    """ run a task on default cluster"""

    cl = start(clusterno)

    rc = cl.getRemoteControllerClient()
    rc.resetAll()

    if len(cmd_all):
        rc.executeAll(cmd_all)

    #create tasks
    task_lst = []

    for s in range(len(NS_lst)):
        task_lst.append(
            kernel.Task(cmd, resultNames=resultNames, setupNS=NS_lst[s]))

    tc = cl.getTaskControllerClient()
    t_start = time.time()
    tids = [tc.run(tk) for tk in task_lst]

    logger.info('Submitted %d tasks to cluster %d' % (len(tids), clusterno))

    if blocked:
        tc.barrier(tids)

        cl.checkError(tids)

        res = []
        for tid in tids:
            res.append(tc.getTaskResult(tid))

        t_end = time.time()
        logger.info('done, duration: %1.1f sec' % (t_end - t_start))

        return res
    else:
        return (cl, tids)
Beispiel #5
0
def runDistributedSimulations(tc, netgenstr, params, indist, nS, tstr, Tsim=-1):
    import ipython1.kernel.api as kernel
    print 'running', nS, 'simulations with input distribution', indist.__class__.__name__

    stimuli=[]
    states=[]
    taskIds = dict()
    sys.stdout.write("Generating tasks:   0%")
    for i in range(nS):
        cmd="""
from area import Area
from area_spatial import AreaSpatial
from learning import response2states as r2s

stimulus = indist.generate(i)
if Tsim==-1:
    Tmax = stimulus.Tsim
else:
    Tmax = Tsim
net = eval(netgenstr)
net.generate()
response,aresp = net.simulate(stimulus=stimulus, Tsim=Tmax)
states = r2s.response2states([response], sampling=tstr)[0]
"""
        taskIds[i] = tc.run(kernel.Task(cmd, resultNames=['stimulus','states'], clearBefore=True,
            setupNS={'i':i,'netgenstr':netgenstr,'params':params,'indist':indist,'tstr':tstr,'Tsim':Tsim}))
        sys.stdout.write("\b\b\b\b%3d%%" % ((i+1)*100/nS))
        sys.stdout.flush()
    sys.stdout.write("\b\b\b\b100%\n")
    #tc.barrier(taskIds.values())
    for i in range(nS):
        sys.stdout.write('.')
        result = tc.getTaskResult(taskIds[i], block=True)
        stimuli.append(result.ns.stimulus)
        states.append(result.ns.states)
        if (i+1)%50==0 or i==nS-1:
            sys.stdout.write(" %d/%d\n" % (i+1,nS))
        sys.stdout.flush()
    return (stimuli, states)
Beispiel #6
0
"""
A Distributed Hello world
Ken Kinder <*****@*****.**>
"""
import ipython1.kernel.api as kernel
import ipython1.kernel.multienginexmlrpc
import ipython1.kernel.taskxmlrpc

rc = kernel.TaskController(('127.0.0.1', 10113))
ipc = kernel.RemoteController(('127.0.0.1', 10105))
assert isinstance(rc, ipython1.kernel.taskxmlrpc.XMLRPCInteractiveTaskClient)
assert isinstance(
    ipc, ipython1.kernel.multienginexmlrpc.XMLRPCInteractiveMultiEngineClient)

ipc.execute('all', 'import time')
helloTaskId = rc.run(
    kernel.Task('time.sleep(3) ; word = "Hello,"', resultNames=['word']))
worldTaskId = rc.run(
    kernel.Task('time.sleep(3) ; word = "World!"', resultNames=['word']))

print rc.getTaskResult(helloTaskId)[1]['word'], rc.getTaskResult(
    worldTaskId)[1]['word']
Beispiel #7
0
task_string = """\
op = MCOptionPricer(S,K,sigma,r,days,paths)
op.run()
vp, ap, vc, ac = op.vanilla_put, op.asian_put, op.vanilla_call, op.asian_call
"""

# Create arrays of strike prices and volatilities
K_vals = N.arange(90.0, 110.0, 2.0)
sigma_vals = N.arange(0.02, 0.3, 0.02)

# Submit tasks
taskIDs = []
for K in K_vals:
    for sigma in sigma_vals:
        t = kernel.Task(task_string,
                        setupNS=dict(sigma=sigma, K=K),
                        resultNames=['vp', 'ap', 'vc', 'ac', 'sigma', 'K'])
        taskIDs.append(tc.run(t))

print "Submitted tasks: ", taskIDs

# Block until tasks are completed
tc.barrier(taskIDs)

# Get the results
results = [tc.getTaskResult(tid) for tid in taskIDs]

# Assemble the result
vc = N.empty(K_vals.shape[0] * sigma_vals.shape[0], dtype='float64')
vp = N.empty(K_vals.shape[0] * sigma_vals.shape[0], dtype='float64')
ac = N.empty(K_vals.shape[0] * sigma_vals.shape[0], dtype='float64')
Beispiel #8
0
    def generate(self, argTstim=-1):
        '''generates a Stimulus object'''

        if self.Tstim > -1:
            Tstim = self.Tstim
        else:
            Tstim = argTstim

        if Tstim <= 0.0:
            error('length of stimulus undefined!')

        # channel number in first dim and time in second dim of f
        if numpy.isscalar(self.f):
            nTimeSlots = 1
            nChannels = 1
        elif self.f.ndim < 2:
            nTimeSlots = 1
            nChannels = self.f.shape[0]
        else:
            (nChannels, nTimeSlots) = self.f.shape

        dt = Tstim / nTimeSlots

        reg = self.gamma_r
        regsw = self.gamma_freq_switch

        stimulus = Stimulus(self.Tstim)

        # get Spikes for each channel seperately (different rate)
        if self.cluster is None:
            for i in range(nChannels):
                r = self.f[i]
                spikes = rate2spikes_sgp(r, dt, Tstim, numpy.atleast_1d(reg),
                                         numpy.atleast_1d(regsw))
                stimulus.appendChannel(Channel(spikes))
        else:
            import ipython1.kernel.api as kernel

            rc = self.cluster.getRemoteControllerClient()
            tc = self.cluster.getTaskControllerClient()
            nEngines = len(rc.getIDs())
            tids = []

            rc.resetAll()
            rc.executeAll('import inputs.gammaprocessrate as gpr\n')
            #rc.pushAll(dt=dt, Tstim=Tstim, reg=reg, regsw=regsw)

            cmd = '\nspikes=[]\nfor r in rlist:\n    spikes.append(gpr.rate2spikes_sgp(r, dt, Tstim, reg, regsw))\n'

            nTasks = nEngines
            cpe = int(numpy.ceil(float(nChannels) / nTasks))

            if cpe < self.mincpe:
                cpe = self.mincpe
                nTasks = int(numpy.ceil(float(nChannels) / cpe))

            for i in range(nTasks):
                endidx = i * cpe + cpe
                if endidx < nChannels:
                    r = self.f[i * cpe:endidx]
                else:
                    r = self.f[i * cpe:]
                setupNS = dict(rlist=r,
                               dt=dt,
                               Tstim=Tstim,
                               reg=reg,
                               regsw=regsw)
                tids.append(
                    tc.run(
                        kernel.Task(cmd,
                                    resultNames=['spikes'],
                                    setupNS=setupNS)))

            logger.info(' gammaprocessrate(): %d tasks started' % len(tids))
            tc.barrier(tids)
            for tid in range(len(tids)):
                res = tc.getTaskResult(tids[tid])
                for sp in res.results['spikes']:
                    stimulus.appendChannel(Channel(sp))

        stimulus.dt = dt

        return stimulus
Beispiel #9
0
rc.runAll('mcpricer.py')

# Send the variables that won't change
rc.pushAll(S=100.0, K=100.0, sigma=0.25, r=0.05, days=260, paths=10000)

task_string = """op = MCOptionPricer(S,K,sigma,r,days,paths)
op.run()
vp, ap, vc, ac = op.vanilla_put, op.asian_put, op.vanilla_call, op.asian_call
"""

sigmas = N.arange(0.01, 0.3, 0.01)
print sigmas
taskIDs = []
for s in sigmas:
    t = kernel.Task(task_string,
                    setupNS={'sigma': s},
                    resultNames=['vp', 'ap', 'vc', 'ac', 'sigma'])
    taskIDs.append(tc.run(t))

tc.barrier(taskIDs)

for tid in taskIDs:
    print tc.getTaskResult(tid)

vp = N.zeros(len(sigmas), dtype='float64')
ap = N.zeros(len(sigmas), dtype='float64')
vc = N.zeros(len(sigmas), dtype='float64')
ac = N.zeros(len(sigmas), dtype='float64')
for i, tid in enumerate(taskIDs):
    tr = tc.getTaskResult(tid)[1]
    vp[i] = tr['vp']
Beispiel #10
0
import ipython1.kernel.api as kernel

tc = kernel.TaskController(('127.0.0.1', 10113))
rc = kernel.RemoteController(('127.0.0.1', 10105))

rc.pushAll(d=30)

cmd1 = """\
a = 5
b = 10*d
"""

t1 = kernel.Task(cmd1,
                 clearBefore=False,
                 clearAfter=True,
                 resultNames=['a', 'b', 'c'])
tid1 = tc.run(t1)
tr1 = tc.getTaskResult(tid1, block=True)
tr1.raiseException()
print "a, b: ", tr1.ns.a, tr1.ns.b
Beispiel #11
0
def connected(perspective):
    client = PBTaskClient(perspective)
    client.run(kernel.Task('x = "hello"', resultNames=['x'])).addCallbacks(success, failure)
    client.run(kernel.Task('raise ValueError, "pants"', resultNames=['x'])).addCallbacks(success, failure)
    print "connected."
Beispiel #12
0
    def __processStimulus(self, stim, K):
        """ core routine for processing"""

        nTimeSteps = stim.shape[2]

        out = numpy.zeros((self.dims[1], self.dims[0], nTimeSteps))

        if numpy.sum(stim.flatten()):  # not only spont act no stim
            if self.cluster is None:
                for t in range(nTimeSteps):
                    s = stim[:, :, t]

                    if s.shape[0] <> self.dims[0] or s.shape[1] <> self.dims[1]:
                        im = scipy.misc.toimage(s, mode='F')
                        im = im.resize(self.dims)
                        s = scipy.misc.fromimage(im)

                    out[:, :, t] = convolve2dsame(s, K)

            else:  # use the cluster
                #                from cluster.cluster import Cluster, ClusterConfig

                import ipython1.kernel.api as kernel
                rc = self.cluster.getRemoteControllerClient()
                tc = self.cluster.getTaskControllerClient()
                nengines = len(rc.getIDs())
                tids = []
                rc.resetAll()

                smallamount = self.dims[0] < 40
                if smallamount:
                    #do directly
                    rc.executeAll('import scipy\nimport inputs.lgn as l\n')
                    rc.pushAll(k=K, dims=self.dims)

                    cmd = '''
if s.shape[0] <> dims[0] or s.shape[1] <> dims[1]:
    im=scipy.misc.toimage(s, mode='F')
    im=im.resize(dims)
    s=scipy.misc.fromimage(im)

o=l.convolve2dsame(s, k)
'''

                    for t in range(nTimeSteps):
                        setupNS = dict(s=stim[:, :, t])
                        tids.append(
                            tc.run(
                                kernel.Task(cmd,
                                            resultNames=['o'],
                                            setupNS=setupNS)))

                    logger.info('%d tasks started' % len(tids))
                    tc.barrier(tids)
                    for tid in range(len(tids)):
                        res = tc.getTaskResult(tids[tid])
                        out[:, :, tid] = res.results['o']

                else:
                    #large amount of data (exceeding cluster submit size)
                    tmpfname = os.environ['HOME'] + '/__tmp_retina_processing'
                    rc.executeAll(
                        'import scipy\nimport os\nimport inputs.lgn as l\nimport scipy.io'
                    )
                    rc.pushAll(dims=self.dims, s=[], k=[], o=[], v=[])

                    cmd = '''
v= scipy.io.loadmat(fname)
s= v['s']
k= v['k']
os.remove(fname+'.mat')


if s.shape[0] <> dims[0] or s.shape[1] <> dims[1]:
    im=scipy.misc.toimage(s, mode='F')
    im=im.resize(dims)
    s=scipy.misc.fromimage(im)

o=l.convolve2dsame(s, k)
scipy.io.savemat(fname,{'o':o})
'''

                    for t in range(nTimeSteps):
                        fname = tmpfname + str(t)
                        scipy.io.savemat(fname, {'s': stim[:, :, t], 'k': K})
                        setupNS = dict(fname=fname)
                        tids.append(
                            tc.run(
                                kernel.Task(cmd,
                                            resultNames=[],
                                            setupNS=setupNS)))

                    logger.info('%d tasks started' % len(tids))

                    tc.barrier(tids)
                    os.system('ls -l >/dev/null')

                    for tid in range(len(tids)):
                        res = tc.getTaskResult(tids[tid])

                        if None <> res.failure.printDetailedTraceback():
                            res.failure.raiseException()

                        fname = tmpfname + str(tid)
                        out[:, :, tid] = scipy.io.loadmat(fname)['o']
                        os.remove(fname + '.mat')

        return out