コード例 #1
0
def client_proc(computation, coro=None):
    # use RemoteCoroScheduler to schedule/submit coroutines; scheduler must be
    # created before computation is scheduled (next step below)
    rcoro_scheduler = RemoteCoroScheduler(computation)
    # in discoro_client6.py, data is sent to each remote coroutine; here, data
    # is broadcast over channel and remote coroutines subscribe to it
    data_channel = asyncoro.Channel('data_channel')
    # not necessary to register channel in this case, as it is sent to remote
    # coroutines; if they were to 'locate' it, it should be registered
    # data_channel.register()

    trend_coro = asyncoro.Coro(trend_proc)

    rcoro_avg = yield rcoro_scheduler.schedule(rcoro_avg_proc, data_channel,
                                               0.4, trend_coro, 10)
    assert isinstance(rcoro_avg, asyncoro.Coro)
    rcoro_save = yield rcoro_scheduler.schedule(rcoro_save_proc, data_channel)
    assert isinstance(rcoro_save, asyncoro.Coro)

    # make sure both remote coroutines have subscribed to channel ('deliver'
    # should return 2 if they both are)
    assert (yield data_channel.deliver('start', n=2)) == 2

    # if data is sent frequently (say, many times a second), enable
    # streaming data to remote peer; this is more efficient as
    # connections are kept open (so the cost of opening and closing
    # connections is avoided), but keeping too many connections open
    # consumes system resources
    yield asyncoro.AsynCoro.instance().peer(rcoro_avg.location,
                                            stream_send=True)
    yield asyncoro.AsynCoro.instance().peer(rcoro_save.location,
                                            stream_send=True)

    # send 1000 items of random data to remote coroutines
    for i in range(1000):
        n = random.uniform(-1, 1)
        item = (i, n)
        # data can be sent to remote coroutines either with 'send' or
        # 'deliver'; 'send' is more efficient but no guarantee data
        # has been sent successfully whereas 'deliver' indicates
        # errors right away
        data_channel.send(item)
        yield coro.sleep(0.02)
    item = (i, None)
    data_channel.send(item)

    yield rcoro_scheduler.finish(close=True)
    data_channel.close()
コード例 #2
0
def client_proc(computation, njobs, coro=None):
    # use RemoteCoroScheduler to start coroutines at servers (should be done
    # before scheduling computation)
    rcoro_scheduler = RemoteCoroScheduler(computation)
    # send 5 requests to remote process (compute_coro)
    def send_requests(rcoro, coro=None):
        # first send this local coroutine (to whom rcoro sends result)
        rcoro.send(coro)
        for i in range(5):
            # even if recipient doesn't use "yield" (such as executing long-run
            # computation, or thread-blocking function such as 'time.sleep' as
            # in this case), the message is accepted by another scheduler
            # (_ReactAsynCoro_) at the receiver and put in recipient's message
            # queue
            rcoro.send(random.uniform(10, 20))
            # assume delay in input availability
            yield coro.sleep(random.uniform(2, 5))
        # end of input is indicated with None
        rcoro.send(None)
        result = yield coro.receive() # get result
        print('    %s computed result: %.4f' % (rcoro.location, result))

    for i in range(njobs):
        rcoro = yield rcoro_scheduler.schedule(compute_coro)
        if isinstance(rcoro, asyncoro.Coro):
            print('  job %d processed by %s' % (i, rcoro.location))
            asyncoro.Coro(send_requests, rcoro)

    yield rcoro_scheduler.finish(close=True)
コード例 #3
0
def client_proc(computation, njobs, coro=None):
    # use RemoteCoroScheduler to schedule/submit coroutines; scheduler must be
    # created before computation is scheduled (next step below)
    rcoro_scheduler = RemoteCoroScheduler(computation)

    # create a separate coroutine to receive results, so they can be processed
    # as soon as received
    def recv_results(coro=None):
        for i in range(njobs):
            msg = yield coro.receive()
            print('    result for job %d: %s' % (i, msg))

    # remote coroutines send replies as messages to this coro
    results_coro = asyncoro.Coro(recv_results)

    # submit njobs; each job will be executed by one discoro server
    for i in range(njobs):
        cobj = C(i)
        cobj.n = random.uniform(5, 10)
        # as noted in 'discoro_client1.py', 'schedule' method is used to run
        # jobs sequentially; use 'submit' to run multiple jobs on one server
        # concurrently
        print('  request %d: %s' % (i, cobj.n))
        rcoro = yield rcoro_scheduler.schedule(compute, cobj, results_coro)
        if not isinstance(rcoro, asyncoro.Coro):
            print('failed to create rcoro %s: %s' % (i, rcoro))

    # wait for all results and close computation
    yield rcoro_scheduler.finish(close=True)
コード例 #4
0
ファイル: discomp2.py プロジェクト: sunmoonone/asyncoro
def client_proc(computation, coro=None):

    def status_proc(coro=None):
        coro.set_daemon()
        while True:
            msg = yield coro.receive()
            # send message to RemoteCoroScheduler's status_proc:
            job_scheduler.status_coro.send(msg)
            # and to httpd's status_coro:
            httpd.status_coro.send(msg)
            if isinstance(msg, asyncoro.MonitorException):
                if msg.args[1][0] == StopIteration:
                    print('result from %s: %s' % (msg.args[0].location, msg.args[1][1]))
                else:
                    # if computation is reentrant, resubmit this job
                    # (keep track of submitted rcoro, args and kwargs)
                    print('%s failed: %s' % (msg.args[0], msg.args[1][1]))

    job_scheduler = RemoteCoroScheduler(computation)
    computation.status_coro = asyncoro.Coro(status_proc)

    if (yield computation.schedule()):
        raise Exception('schedule failed')

    # submit jobs
    for i in range(3):
        rcoro = yield job_scheduler.schedule(compute, random.uniform(10, 20))

    # wait for all jobs to be done and close computation
    yield job_scheduler.finish(close=True)
コード例 #5
0
def client_proc(computation, data_file, njobs, coro=None):
    proc_setup_coros = set()  # processes used are kept track to cleanup when done

    # coroutine receives status messages from rcoro_scheduler. If the status is
    # ServerInitialized, send the file to server and wait for initialization
    def status_proc(status, info, coro=None):
        if status != discoro.Scheduler.ServerInitialized:
            raise StopIteration(0)
        if (yield asyncoro.AsynCoro().send_file(info, data_file, timeout=10)) < 0:
            print('Could not send data file "%s" to %s' % (data_file, info))
            raise StopIteration
        rcoro = yield rcoro_scheduler.submit_at(info, proc_setup, data_file, coro)
        if isinstance(rcoro, asyncoro.Coro):
            msg = yield coro.receive()
            if msg == "ready":
                proc_setup_coros.add(rcoro)
                raise StopIteration(0)  # success indicated with 0
            else:
                raise StopIteration(-1)  # scheduler won't use this server
        else:
            print("Setup of %s failed" % where)
            raise StopIteration(-1)  # scheduler won't use this server

    rcoro_scheduler = RemoteCoroScheduler(computation, status_proc)

    if (yield computation.schedule()):
        raise Exception("Failed to schedule computation")

    # remote coroutines send results to this coroutine
    def results_proc(coro=None):
        done = 0
        while done < njobs:
            msg = yield coro.receive()
            if isinstance(msg, tuple) and len(msg) == 2:
                print("%s checksum: %s" % (msg[0], msg[1]))
                done += 1

    results_coro = asyncoro.Coro(results_proc)

    algs = ["md5", "sha1", "sha224", "sha256", "sha384", "sha512"]
    submitted = 0
    while submitted < njobs:
        alg = algs[submitted % len(algs)]
        rcoro = yield rcoro_scheduler.schedule(rcoro_proc, alg, random.uniform(1, 5), results_coro)
        if isinstance(rcoro, asyncoro.Coro):
            submitted += 1

    # wait for results to be received
    yield results_coro.finish()
    # cleanup processes
    for proc_setup_coro in proc_setup_coros:
        yield proc_setup_coro.deliver("cleanup", timeout=5)
    yield rcoro_scheduler.finish(close=True)
コード例 #6
0
def client_proc(computation, coro=None):
    # use RemoteCoroScheduler to schedule/submit coroutines; scheduler must be
    # created before computation is scheduled (next step below)
    rcoro_scheduler = RemoteCoroScheduler(computation)
    trend_coro = asyncoro.Coro(trend_proc)

    rcoro_avg = yield rcoro_scheduler.schedule(rcoro_avg_proc, 0.4, trend_coro,
                                               10)
    assert isinstance(rcoro_avg, asyncoro.Coro)
    rcoro_save = yield rcoro_scheduler.schedule(rcoro_save_proc)
    assert isinstance(rcoro_save, asyncoro.Coro)

    # if data is sent frequently (say, many times a second), enable
    # streaming data to remote peer; this is more efficient as
    # connections are kept open (so the cost of opening and closing
    # connections is avoided), but keeping too many connections open
    # consumes system resources
    yield asyncoro.AsynCoro.instance().peer(rcoro_avg.location,
                                            stream_send=True)
    yield asyncoro.AsynCoro.instance().peer(rcoro_save.location,
                                            stream_send=True)

    # send 1000 items of random data to remote coroutines
    for i in range(1000):
        n = random.uniform(-1, 1)
        item = (i, n)
        # data can be sent to remote coroutines either with 'send' or
        # 'deliver'; 'send' is more efficient but no guarantee data
        # has been sent successfully whereas 'deliver' indicates
        # errors right away; alternately, messages can be sent with a
        # channel, which is more convenient if there are multiple
        # (unknown) recipients
        rcoro_avg.send(item)
        rcoro_save.send(item)
        yield coro.sleep(0.01)
    item = (i, None)
    rcoro_avg.send(item)
    rcoro_save.send(item)

    yield rcoro_scheduler.finish(close=True)
コード例 #7
0
ファイル: discoro_client4.py プロジェクト: vsajip/asyncoro
def submit_jobs_proc(computation, njobs, coro=None):
    # use RemoteCoroScheduler to schedule/submit coroutines; scheduler must be
    # created before computation is scheduled (next step below)
    rcoro_scheduler = RemoteCoroScheduler(computation)

    for i in range(njobs):
        # create remote coroutine
        rcoro = yield rcoro_scheduler.schedule(rcoro_proc)
        if isinstance(rcoro, asyncoro.Coro):
            # create local coroutine to send input file and data to rcoro
            asyncoro.Coro(client_proc, i, rcoro)

    yield rcoro_scheduler.finish(close=True)
コード例 #8
0
ファイル: discoro_client2.py プロジェクト: vsajip/asyncoro
def client_proc(computation, njobs, coro=None):
    # create rcoro scheduler; this replaces computation's current staus_coro (in
    # this case httpd status_coro) with coro that chains messages
    rcoro_scheduler = RemoteCoroScheduler(computation)

    # submit jobs
    for i in range(njobs):
        rcoro = yield rcoro_scheduler.schedule(compute, random.uniform(5, 10))
        if isinstance(rcoro, asyncoro.Coro):
            print('  job %s processed by %s' % (i, rcoro.location))
        else:
            print('rcoro %s failed: %s' % (i, rcoro))

    # wait for all jobs to be done and close computation
    yield rcoro_scheduler.finish(close=True)
コード例 #9
0
def client_proc(computation, njobs, coro=None):
    # use RemoteCoroScheduler to schedule/submit coroutines; scheduler must be
    # created before computation is scheduled (next step below)
    rcoro_scheduler = RemoteCoroScheduler(computation)
    # schedule computation (if scheduler is shared, this waits until
    # prior computations are finished)
    if (yield computation.schedule()):
        raise Exception('Failed to schedule computation')

    # create njobs remote coroutines
    for n in range(njobs):
        rcoro = yield rcoro_scheduler.schedule(rcoro_proc, random.uniform(5, 10))

    # scheduler will wait until all remote coroutines finish
    yield rcoro_scheduler.finish(close=True)
コード例 #10
0
ファイル: discomp3.py プロジェクト: sunmoonone/asyncoro
def client_proc(computation, njobs, coro=None):
    proc_setup_coros = set() # processes used are kept track to cleanup when done

    # coroutine receives status messages from rcoro_scheduler. If the status is
    # ServerInitialized, send the file to server and wait for initialization
    def status_proc(status, info, coro=None):
        if status != discoro.Scheduler.ServerInitialized:
            raise StopIteration(0)
        rcoro = yield rcoro_scheduler.submit_at(info, proc_setup, coro)
        if isinstance(rcoro, asyncoro.Coro):
            msg = yield coro.receive()
            if msg == 'ready':
                proc_setup_coros.add(rcoro)
                raise StopIteration(0) # success indicated with 0
            else:
                raise StopIteration(-1) # scheduler won't use this server
        else:
            print('Setup of %s failed' % info)
            raise StopIteration(-1) # scheduler won't use this server

    # use RemoteCoroScheduler to start coroutines at servers
    rcoro_scheduler = RemoteCoroScheduler(computation, status_proc)
    if (yield computation.schedule()):
        raise Exception('Failed to schedule computation')

    # remote coroutines send results to this coroutine
    def results_proc(coro=None):
        done = 0
        while done < njobs:
            msg = yield coro.receive()
            if isinstance(msg, tuple) and len(msg) == 2:
                print('result from %s: %s' % (msg[0], msg[1]))
                done += 1
    results_coro = asyncoro.Coro(results_proc)

    submitted = 0
    while submitted < njobs:
        rcoro = yield rcoro_scheduler.schedule(compute_proc, random.uniform(5, 10), results_coro)
        if isinstance(rcoro, asyncoro.Coro):
            submitted += 1

    # wait for results to be received
    yield results_coro.finish()
    # cleanup processes
    for proc_setup_coro in proc_setup_coros:
        yield proc_setup_coro.deliver('cleanup', timeout=5)
    yield rcoro_scheduler.finish(close=True)
コード例 #11
0
ファイル: discomp1.py プロジェクト: sunmoonone/asyncoro
def client_proc(computation, coro=None):

    def results_proc(coro=None):
        coro.set_daemon()
        while True:
            result = yield coro.receive()
            print('result: %s' % result)

    job_scheduler = RemoteCoroScheduler(computation)
    results_coro = asyncoro.Coro(results_proc)

    if (yield computation.schedule()):
        raise Exception('schedule failed')

    # submit jobs
    for i in range(5):
        rcoro = yield job_scheduler.schedule(compute, random.uniform(3, 10), results_coro)

    # wait for all results
    yield job_scheduler.finish()
    yield computation.close()