Esempio n. 1
0
def client_proc(computation, n, coro=None):
    # pair EC2 node with this client with:
    yield asyncoro.AsynCoro().peer(asyncoro.Location('54.204.242.185', 51347))
    # if multiple nodes are used, 'broadcast' option can be used to pair with
    # all nodes with just one statement as:
    # yield asyncoro.AsynCoro().peer(asyncoro.Location('54.204.242.185', 51347), broadcast=True)

    # coroutine to call RemoteCoroScheduler's "execute" method
    def exec_proc(gen, *args, **kwargs):
        # execute computation; result of computation is result of
        # 'yield' which is also result of this coroutine (obtained
        # with 'finish' method below)
        yield job_scheduler.execute(gen, *args, **kwargs)
        # results can be processed here (as they become available), or
        # await in sequence as done below

    # Use RemoteCoroScheduler to run at most one coroutine at a server process
    # This should be created before scheduling computation
    job_scheduler = RemoteCoroScheduler(computation)

    if (yield computation.schedule()):
        raise Exception('schedule failed')

    # execute n jobs (coroutines) and get their results. Note that
    # number of jobs created can be more than number of server
    # processes available; the scheduler will use as many processes as
    # necessary/available, running one job at a server process
    jobs = [asyncoro.Coro(exec_proc, compute, random.uniform(3, 10)) for _ in range(n)]
    for job in jobs:
        print('result: %s' % (yield job.finish()))

    yield job_scheduler.finish(close=True)
Esempio n. 2
0
def client_proc(computation, njobs, coro=None):
    # use RemoteCoroScheduler to start coroutines at servers (should be done
    # before scheduling computation)
    rcoro_scheduler = RemoteCoroScheduler(computation)
    # send 5 requests to remote process (compute_coro)
    def send_requests(rcoro, coro=None):
        # first send this local coroutine (to whom rcoro sends result)
        rcoro.send(coro)
        for i in range(5):
            # even if recipient doesn't use "yield" (such as executing long-run
            # computation, or thread-blocking function such as 'time.sleep' as
            # in this case), the message is accepted by another scheduler
            # (_ReactAsynCoro_) at the receiver and put in recipient's message
            # queue
            rcoro.send(random.uniform(10, 20))
            # assume delay in input availability
            yield coro.sleep(random.uniform(2, 5))
        # end of input is indicated with None
        rcoro.send(None)
        result = yield coro.receive() # get result
        print('    %s computed result: %.4f' % (rcoro.location, result))

    for i in range(njobs):
        rcoro = yield rcoro_scheduler.schedule(compute_coro)
        if isinstance(rcoro, asyncoro.Coro):
            print('  job %d processed by %s' % (i, rcoro.location))
            asyncoro.Coro(send_requests, rcoro)

    yield rcoro_scheduler.finish(close=True)
Esempio n. 3
0
def client_proc(computation, n, coro=None):

    # coroutine to (concurrently) execute computations
    def exec_proc(gen, *args, **kwargs):
        # execute computation; result of computation is result of
        # 'yield' which is also result of this coroutine (obtained
        # with 'finish' method below)
        yield job_scheduler.execute(gen, *args, **kwargs)
        # results can be processed here (as they become available), or
        # await in sequence as done below

    # Use RemoteCoroScheduler to run at most one coroutine at a server process
    # This should be created before scheduling computation
    job_scheduler = RemoteCoroScheduler(computation)

    if (yield computation.schedule()):
        raise Exception('schedule failed')

    # execute n jobs (coroutines) and get their results. Note that
    # number of jobs created can be more than number of server
    # processes available; the scheduler will use as many processes as
    # necessary/available, running one job at a server process
    jobs = [asyncoro.Coro(exec_proc, compute, random.uniform(3, 10)) for _ in range(n)]
    for job in jobs:
        print('result: %s' % (yield job.finish()))

    yield job_scheduler.finish(close=True)
Esempio n. 4
0
def client_proc(computation, coro=None):

    def status_proc(coro=None):
        coro.set_daemon()
        while True:
            msg = yield coro.receive()
            # send message to RemoteCoroScheduler's status_proc:
            job_scheduler.status_coro.send(msg)
            # and to httpd's status_coro:
            httpd.status_coro.send(msg)
            if isinstance(msg, asyncoro.MonitorException):
                if msg.args[1][0] == StopIteration:
                    print('result from %s: %s' % (msg.args[0].location, msg.args[1][1]))
                else:
                    # if computation is reentrant, resubmit this job
                    # (keep track of submitted rcoro, args and kwargs)
                    print('%s failed: %s' % (msg.args[0], msg.args[1][1]))

    job_scheduler = RemoteCoroScheduler(computation)
    computation.status_coro = asyncoro.Coro(status_proc)

    if (yield computation.schedule()):
        raise Exception('schedule failed')

    # submit jobs
    for i in range(3):
        rcoro = yield job_scheduler.schedule(compute, random.uniform(10, 20))

    # wait for all jobs to be done and close computation
    yield job_scheduler.finish(close=True)
Esempio n. 5
0
def client_proc(computation, n, coro=None):
    # pair EC2 node with this client with:
    yield asyncoro.AsynCoro().peer(asyncoro.Location('54.204.242.185', 51347))

    # if multiple nodes are used, 'broadcast' option can be used to pair with
    # all nodes with just one statement as:
    # yield asyncoro.AsynCoro().peer(asyncoro.Location('54.204.242.185', 51347), broadcast=True)

    # coroutine to call RemoteCoroScheduler's "execute" method
    def exec_proc(gen, *args, **kwargs):
        # execute computation; result of computation is result of
        # 'yield' which is also result of this coroutine (obtained
        # with 'finish' method below)
        yield job_scheduler.execute(gen, *args, **kwargs)
        # results can be processed here (as they become available), or
        # await in sequence as done below

    # Use RemoteCoroScheduler to run at most one coroutine at a server process
    # This should be created before scheduling computation
    job_scheduler = RemoteCoroScheduler(computation)

    # execute n jobs (coroutines) and get their results. Note that
    # number of jobs created can be more than number of server
    # processes available; the scheduler will use as many processes as
    # necessary/available, running one job at a server process
    jobs = [
        asyncoro.Coro(exec_proc, compute, random.uniform(3, 10))
        for _ in range(n)
    ]
    for job in jobs:
        print('result: %s' % (yield job.finish()))

    yield job_scheduler.finish(close=True)
Esempio n. 6
0
def client_proc(computation, njobs, coro=None):
    # use RemoteCoroScheduler to schedule/submit coroutines; scheduler must be
    # created before computation is scheduled (next step below)
    rcoro_scheduler = RemoteCoroScheduler(computation)

    # create a separate coroutine to receive results, so they can be processed
    # as soon as received
    def recv_results(coro=None):
        for i in range(njobs):
            msg = yield coro.receive()
            print('    result for job %d: %s' % (i, msg))

    # remote coroutines send replies as messages to this coro
    results_coro = asyncoro.Coro(recv_results)

    # submit njobs; each job will be executed by one discoro server
    for i in range(njobs):
        cobj = C(i)
        cobj.n = random.uniform(5, 10)
        # as noted in 'discoro_client1.py', 'schedule' method is used to run
        # jobs sequentially; use 'submit' to run multiple jobs on one server
        # concurrently
        print('  request %d: %s' % (i, cobj.n))
        rcoro = yield rcoro_scheduler.schedule(compute, cobj, results_coro)
        if not isinstance(rcoro, asyncoro.Coro):
            print('failed to create rcoro %s: %s' % (i, rcoro))

    # wait for all results and close computation
    yield rcoro_scheduler.finish(close=True)
Esempio n. 7
0
def client_proc(computation, data_file, njobs, coro=None):
    proc_setup_coros = set()  # processes used are kept track to cleanup when done

    # coroutine receives status messages from rcoro_scheduler. If the status is
    # ServerInitialized, send the file to server and wait for initialization
    def status_proc(status, info, coro=None):
        if status != discoro.Scheduler.ServerInitialized:
            raise StopIteration(0)
        if (yield asyncoro.AsynCoro().send_file(info, data_file, timeout=10)) < 0:
            print('Could not send data file "%s" to %s' % (data_file, info))
            raise StopIteration
        rcoro = yield rcoro_scheduler.submit_at(info, proc_setup, data_file, coro)
        if isinstance(rcoro, asyncoro.Coro):
            msg = yield coro.receive()
            if msg == "ready":
                proc_setup_coros.add(rcoro)
                raise StopIteration(0)  # success indicated with 0
            else:
                raise StopIteration(-1)  # scheduler won't use this server
        else:
            print("Setup of %s failed" % where)
            raise StopIteration(-1)  # scheduler won't use this server

    rcoro_scheduler = RemoteCoroScheduler(computation, status_proc)

    if (yield computation.schedule()):
        raise Exception("Failed to schedule computation")

    # remote coroutines send results to this coroutine
    def results_proc(coro=None):
        done = 0
        while done < njobs:
            msg = yield coro.receive()
            if isinstance(msg, tuple) and len(msg) == 2:
                print("%s checksum: %s" % (msg[0], msg[1]))
                done += 1

    results_coro = asyncoro.Coro(results_proc)

    algs = ["md5", "sha1", "sha224", "sha256", "sha384", "sha512"]
    submitted = 0
    while submitted < njobs:
        alg = algs[submitted % len(algs)]
        rcoro = yield rcoro_scheduler.schedule(rcoro_proc, alg, random.uniform(1, 5), results_coro)
        if isinstance(rcoro, asyncoro.Coro):
            submitted += 1

    # wait for results to be received
    yield results_coro.finish()
    # cleanup processes
    for proc_setup_coro in proc_setup_coros:
        yield proc_setup_coro.deliver("cleanup", timeout=5)
    yield rcoro_scheduler.finish(close=True)
Esempio n. 8
0
def client_proc(computation, data_file, coro=None):
    used_servers = {} # keep track of which servers to cleanup

    # coroutine to (concurrently) execute computations
    def exec_proc(gen, *args, **kwargs):
        # execute computation; result of computation is result of
        # 'yield' which is also result of this coroutine (obtained
        # with 'finish' method below)
        yield job_scheduler.execute(gen, *args, **kwargs)
        # results can be processed here (as they become available), or
        # await in sequence as done below

    def status_proc(status, info, coro=None):
        # this coroutine is executed with status (either
        # ServerInitialized or ServerClosed) and location of server
        if status != discoro.Scheduler.ServerInitialized:
            raise StopIteration(0)
        # send data file to server
        if (yield asyncoro.AsynCoro().send_file(info, data_file, timeout=10)) < 0:
            print('Could not send data file "%s" to %s' % (data_file, info))
            raise StopIteration(-1)
        # run 'proc_setup' to read file in to memory
        if (yield job_scheduler.execute_at(info, proc_setup, data_file)) == 0:
            used_servers[info] = info
            raise StopIteration(0) # indicate server initialized with exit value 0
        raise StopIteration(-1)

    # Use RemoteCoroScheduler to run at most one coroutine at a server process
    # This should be created before scheduling computation
    job_scheduler = RemoteCoroScheduler(computation, status_proc)

    if (yield computation.schedule()):
        raise Exception('schedule failed')

    # execute 10 jobs (coroutines) and get their results. Note that
    # number of jobs created can be more than number of server
    # processes available; the scheduler will use as many processes as
    # necessary/available, running one job at a server process at a time
    algs = ['md5', 'sha1', 'sha224', 'sha256', 'sha384', 'sha512']
    jobs = [asyncoro.Coro(exec_proc, compute, algs[i % len(algs)], random.uniform(1, 3))
            for i in range(10)]
    for job in jobs:
        result = yield job.finish()
        if isinstance(result, tuple) and len(result) == 2:
            print('%ssum: %s' % (result[0], result[1]))
        else:
            print('rcoro %s failed: %s' % (job, result))

    jobs = [asyncoro.Coro(job_scheduler.execute_at, info, proc_cleanup)
            for info in used_servers.values()]

    yield job_scheduler.finish(close=True)
Esempio n. 9
0
def submit_jobs_proc(computation, njobs, coro=None):
    # use RemoteCoroScheduler to schedule/submit coroutines; scheduler must be
    # created before computation is scheduled (next step below)
    rcoro_scheduler = RemoteCoroScheduler(computation)

    for i in range(njobs):
        # create remote coroutine
        rcoro = yield rcoro_scheduler.schedule(rcoro_proc)
        if isinstance(rcoro, asyncoro.Coro):
            # create local coroutine to send input file and data to rcoro
            asyncoro.Coro(client_proc, i, rcoro)

    yield rcoro_scheduler.finish(close=True)
Esempio n. 10
0
def client_proc(computation, njobs, coro=None):
    # create rcoro scheduler; this replaces computation's current staus_coro (in
    # this case httpd status_coro) with coro that chains messages
    rcoro_scheduler = RemoteCoroScheduler(computation)

    # submit jobs
    for i in range(njobs):
        rcoro = yield rcoro_scheduler.schedule(compute, random.uniform(5, 10))
        if isinstance(rcoro, asyncoro.Coro):
            print('  job %s processed by %s' % (i, rcoro.location))
        else:
            print('rcoro %s failed: %s' % (i, rcoro))

    # wait for all jobs to be done and close computation
    yield rcoro_scheduler.finish(close=True)
Esempio n. 11
0
def client_proc(computation, njobs, coro=None):
    # use RemoteCoroScheduler to schedule/submit coroutines; scheduler must be
    # created before computation is scheduled (next step below)
    rcoro_scheduler = RemoteCoroScheduler(computation)
    # schedule computation (if scheduler is shared, this waits until
    # prior computations are finished)
    if (yield computation.schedule()):
        raise Exception('Failed to schedule computation')

    # create njobs remote coroutines
    for n in range(njobs):
        rcoro = yield rcoro_scheduler.schedule(rcoro_proc, random.uniform(5, 10))

    # scheduler will wait until all remote coroutines finish
    yield rcoro_scheduler.finish(close=True)
Esempio n. 12
0
def client_proc(computation, coro=None):
    # use RemoteCoroScheduler to schedule/submit coroutines; scheduler must be
    # created before computation is scheduled (next step below)
    rcoro_scheduler = RemoteCoroScheduler(computation)
    # in discoro_client6.py, data is sent to each remote coroutine; here, data
    # is broadcast over channel and remote coroutines subscribe to it
    data_channel = asyncoro.Channel('data_channel')
    # not necessary to register channel in this case, as it is sent to remote
    # coroutines; if they were to 'locate' it, it should be registered
    # data_channel.register()

    trend_coro = asyncoro.Coro(trend_proc)

    rcoro_avg = yield rcoro_scheduler.schedule(rcoro_avg_proc, data_channel,
                                               0.4, trend_coro, 10)
    assert isinstance(rcoro_avg, asyncoro.Coro)
    rcoro_save = yield rcoro_scheduler.schedule(rcoro_save_proc, data_channel)
    assert isinstance(rcoro_save, asyncoro.Coro)

    # make sure both remote coroutines have subscribed to channel ('deliver'
    # should return 2 if they both are)
    assert (yield data_channel.deliver('start', n=2)) == 2

    # if data is sent frequently (say, many times a second), enable
    # streaming data to remote peer; this is more efficient as
    # connections are kept open (so the cost of opening and closing
    # connections is avoided), but keeping too many connections open
    # consumes system resources
    yield asyncoro.AsynCoro.instance().peer(rcoro_avg.location,
                                            stream_send=True)
    yield asyncoro.AsynCoro.instance().peer(rcoro_save.location,
                                            stream_send=True)

    # send 1000 items of random data to remote coroutines
    for i in range(1000):
        n = random.uniform(-1, 1)
        item = (i, n)
        # data can be sent to remote coroutines either with 'send' or
        # 'deliver'; 'send' is more efficient but no guarantee data
        # has been sent successfully whereas 'deliver' indicates
        # errors right away
        data_channel.send(item)
        yield coro.sleep(0.02)
    item = (i, None)
    data_channel.send(item)

    yield rcoro_scheduler.finish(close=True)
    data_channel.close()
Esempio n. 13
0
def client_proc(computation, njobs, coro=None):
    proc_setup_coros = set() # processes used are kept track to cleanup when done

    # coroutine receives status messages from rcoro_scheduler. If the status is
    # ServerInitialized, send the file to server and wait for initialization
    def status_proc(status, info, coro=None):
        if status != discoro.Scheduler.ServerInitialized:
            raise StopIteration(0)
        rcoro = yield rcoro_scheduler.submit_at(info, proc_setup, coro)
        if isinstance(rcoro, asyncoro.Coro):
            msg = yield coro.receive()
            if msg == 'ready':
                proc_setup_coros.add(rcoro)
                raise StopIteration(0) # success indicated with 0
            else:
                raise StopIteration(-1) # scheduler won't use this server
        else:
            print('Setup of %s failed' % info)
            raise StopIteration(-1) # scheduler won't use this server

    # use RemoteCoroScheduler to start coroutines at servers
    rcoro_scheduler = RemoteCoroScheduler(computation, status_proc)
    if (yield computation.schedule()):
        raise Exception('Failed to schedule computation')

    # remote coroutines send results to this coroutine
    def results_proc(coro=None):
        done = 0
        while done < njobs:
            msg = yield coro.receive()
            if isinstance(msg, tuple) and len(msg) == 2:
                print('result from %s: %s' % (msg[0], msg[1]))
                done += 1
    results_coro = asyncoro.Coro(results_proc)

    submitted = 0
    while submitted < njobs:
        rcoro = yield rcoro_scheduler.schedule(compute_proc, random.uniform(5, 10), results_coro)
        if isinstance(rcoro, asyncoro.Coro):
            submitted += 1

    # wait for results to be received
    yield results_coro.finish()
    # cleanup processes
    for proc_setup_coro in proc_setup_coros:
        yield proc_setup_coro.deliver('cleanup', timeout=5)
    yield rcoro_scheduler.finish(close=True)
Esempio n. 14
0
def client_proc(computation, njobs, coro=None):
    # use RemoteCoroScheduler to start coroutines at servers (should be done
    # before scheduling computation)
    rcoro_scheduler = RemoteCoroScheduler(computation)
    # execute n jobs (coroutines) and get their results. Number of jobs created
    # can be more than number of server processes available; the scheduler will
    # use as many processes as necessary/available, running one job at a server
    # process

    # arguments must correspond to arguments for computaiton; multiple arguments
    # (as in this case) can be given as tuples
    args = [(i, random.uniform(2, 5)) for i in range(njobs)]
    results = yield rcoro_scheduler.map_results(compute, args)
    # Coroutines may not be executed in the order of given list of args, but
    # results would be in the same order of given list of args
    for result in results:
        print('    result for %d from %s: %s' % result)
Esempio n. 15
0
def client_proc(computation, coro=None):
    # Use RemoteCoroScheduler to run at most one coroutine at a server process
    # This should be created before scheduling computation
    rcoro_scheduler = RemoteCoroScheduler(computation)

    # execute 10 jobs (coroutines) and get their results. Note that number of
    # jobs created can be more than number of server processes available; the
    # scheduler will use as many processes as necessary/available, running one
    # job at a server process
    algorithms = ['md5', 'sha1', 'sha224', 'sha256', 'sha384', 'sha512']
    args = [(algorithms[i % len(algorithms)], random.uniform(1, 3)) for i in range(20)]
    results = yield rcoro_scheduler.map_results(compute, args)
    for i, result in enumerate(results):
        if isinstance(result, tuple) and len(result) == 2:
            print('    %ssum: %s' % (result[0], result[1]))
        else:
            print('  rcoro failed for %s: %s' % (args[i][0], str(result)))

    yield rcoro_scheduler.finish(close=True)
Esempio n. 16
0
def client_proc(computation, coro=None):

    def results_proc(coro=None):
        coro.set_daemon()
        while True:
            result = yield coro.receive()
            print('result: %s' % result)

    job_scheduler = RemoteCoroScheduler(computation)
    results_coro = asyncoro.Coro(results_proc)

    if (yield computation.schedule()):
        raise Exception('schedule failed')

    # submit jobs
    for i in range(5):
        rcoro = yield job_scheduler.schedule(compute, random.uniform(3, 10), results_coro)

    # wait for all results
    yield job_scheduler.finish()
    yield computation.close()
Esempio n. 17
0
def client_proc(computation, coro=None):
    # use RemoteCoroScheduler to schedule/submit coroutines; scheduler must be
    # created before computation is scheduled (next step below)
    rcoro_scheduler = RemoteCoroScheduler(computation)
    trend_coro = asyncoro.Coro(trend_proc)

    rcoro_avg = yield rcoro_scheduler.schedule(rcoro_avg_proc, 0.4, trend_coro,
                                               10)
    assert isinstance(rcoro_avg, asyncoro.Coro)
    rcoro_save = yield rcoro_scheduler.schedule(rcoro_save_proc)
    assert isinstance(rcoro_save, asyncoro.Coro)

    # if data is sent frequently (say, many times a second), enable
    # streaming data to remote peer; this is more efficient as
    # connections are kept open (so the cost of opening and closing
    # connections is avoided), but keeping too many connections open
    # consumes system resources
    yield asyncoro.AsynCoro.instance().peer(rcoro_avg.location,
                                            stream_send=True)
    yield asyncoro.AsynCoro.instance().peer(rcoro_save.location,
                                            stream_send=True)

    # send 1000 items of random data to remote coroutines
    for i in range(1000):
        n = random.uniform(-1, 1)
        item = (i, n)
        # data can be sent to remote coroutines either with 'send' or
        # 'deliver'; 'send' is more efficient but no guarantee data
        # has been sent successfully whereas 'deliver' indicates
        # errors right away; alternately, messages can be sent with a
        # channel, which is more convenient if there are multiple
        # (unknown) recipients
        rcoro_avg.send(item)
        rcoro_save.send(item)
        yield coro.sleep(0.01)
    item = (i, None)
    rcoro_avg.send(item)
    rcoro_save.send(item)

    yield rcoro_scheduler.finish(close=True)
Esempio n. 18
0
def client_proc(computation, coro=None):
    # use RemoteCoroScheduler to start coroutines at servers (should be done
    # before scheduling computation)
    rcoro_scheduler = RemoteCoroScheduler(computation)
    # Creating httpd sets computation's "status_coro" to process status messages
    # from discoro scheduler. RemoteCoroScheduler will reset computation's
    # "status_coro" to itself, but chains messages to existing "status_coro", so
    # both RemoteCoroScheduler and httpd will process messages. See
    # discoro_httpd2.py where messages from discoro are chained explicitly and
    # processed by the client.

    # since RemoteCoroScheduler is not used, computation must be first scheduled
    if (yield computation.schedule()):
        raise Exception('schedule failed')

    i = 0
    while True:
        cmd = yield coro.receive()
        if cmd is None:
            break
        i += 1
        c = C(i)
        if cmd == 'servers':
            c.n = random.uniform(10, 20)
            yield computation.run_servers(compute, c, coro)
        elif cmd == 'nodes':
            c.n = random.uniform(20, 50)
            yield computation.run_nodes(compute, c, coro)
        else:
            try:
                c.n = float(cmd)
            except:
                print('  "%s" is ignored')
                continue
            yield computation.run(compute, c, coro)

    yield computation.close()
Esempio n. 19
0
    algorithms = ['md5', 'sha1', 'sha224', 'sha256', 'sha384', 'sha512']
    args = [(algorithms[i % len(algorithms)], random.uniform(1, 3))
            for i in range(10)]
    results = yield rcoro_scheduler.map_results(compute, args)
    for i, result in enumerate(results):
        if isinstance(result, tuple) and len(result) == 2:
            print('    %ssum: %s' % (result[0], result[1]))
        else:
            print('  rcoro failed for %s: %s' % (args[i][0], str(result)))

    yield rcoro_scheduler.finish(close=True)


if __name__ == '__main__':
    import random, functools, sys
    # asyncoro.logger.setLevel(asyncoro.Logger.DEBUG)
    # if scheduler is not already running (on a node as a program),
    # start private scheduler:
    # Scheduler()
    data_file = sys.argv[0] if len(sys.argv) == 1 else sys.argv[1]
    # send 'compute' generator function; data_file can also be sent with
    # 'depends', but in this case, the client sends it separately when server is
    # initialized (to illustrate how client can transfer files).
    computation = Computation([compute])
    # Use RemoteCoroScheduler to run at most one coroutine at a server process
    # This should be created before scheduling computation
    rcoro_scheduler = RemoteCoroScheduler(computation,
                                          proc_available=proc_available,
                                          proc_close=proc_close)
    asyncoro.Coro(client_proc, computation)
Esempio n. 20
0
            print('  rcoro failed for %s: %s' % (args[i][0], str(result)))

    yield rcoro_scheduler.finish(close=True)


if __name__ == '__main__':
    import logging, random, functools, sys
    # asyncoro.logger.setLevel(logging.DEBUG)
    # if scheduler is not already running (on a node as a program),
    # start private scheduler:
    Scheduler()
    data_file = sys.argv[0] if len(sys.argv) == 1 else sys.argv[1]

    # send 'compute' generator function; data_file can also be sent with
    # 'depends', but in this case, the client sends it separately when node is
    # available (to illustrate how client can transfer files).

    # Since this example doesn't work with Windows, 'node_allocations' feature
    # is used to filter out nodes running Windows.
    node_allocations = [
        DiscoroNodeAllocate(node='*', platform='Windows', cpus=0)
    ]
    computation = Computation([compute],
                              node_available=node_available,
                              node_setup=node_setup,
                              node_allocations=node_allocations)
    # Use RemoteCoroScheduler to run at most one coroutine at a server process
    # This should be created before scheduling computation
    rcoro_scheduler = RemoteCoroScheduler(computation)
    asyncoro.Coro(client_proc, computation)