예제 #1
0
def custom_feeder(input, task=None):
    def write_proc(fin, pipe, task=None):
        while True:
            data = yield os.read(fin.fileno(), 8 * 1024)
            if not data:
                break
            n = yield pipe.write(data, full=True)
            assert n == len(data)
        fin.close()
        pipe.stdin.close()

    def read_proc(pipe, task=None):
        # output from sha1sum is small, so read until EOF
        data = yield pipe.stdout.read()
        pipe.stdout.close()
        raise StopIteration(data)

    if platform.system() == 'Windows':
        # asyncfile.Popen must be used instead of subprocess.Popen
        pipe = pycos.asyncfile.Popen([r'\cygwin64\bin\sha1sum.exe'],
                                     stdin=subprocess.PIPE,
                                     stdout=subprocess.PIPE)
    else:
        pipe = subprocess.Popen(['sha1sum'],
                                stdin=subprocess.PIPE,
                                stdout=subprocess.PIPE)

    async_pipe = pycos.asyncfile.AsyncPipe(pipe)
    reader = pycos.Task(read_proc, async_pipe)
    writer = pycos.Task(write_proc, open(input), async_pipe)
    stdout = yield reader.finish()
    print('     feeder sha1sum: %s' % stdout)
예제 #2
0
def peer_status(task=None):
    client = pycos.Task(rps_client)
    rpss = {}
    while 1:
        status = yield task.receive()
        if not isinstance(status, pycos.PeerStatus):
            if status == 'quit':
                break
            pycos.logger.warning('Invalid peer status %s ignored',
                                 type(status))
            continue
        if status.status == pycos.PeerStatus.Online:
            # if peer has rps_log_monitor, run RPS there
            def discover_rps(location, task=None):
                rps = yield pycos.RPS.locate('rps_log_monitor',
                                             location=location,
                                             timeout=5)
                if isinstance(rps, pycos.RPS):
                    rpss[rps] = rps
                    server = yield rps(client)
                    if isinstance(server, pycos.Task):
                        servers[location] = server

            pycos.Task(discover_rps, status.location)
        else:  # status.status == pycos.PeerStatus.Offline
            servers.pop(status.location, None)

    for rps in rpss.values():
        rps.close()
예제 #3
0
 def create_job(i, task=None):
     # create reader and send to rtask so it can send messages to reader
     client_reader = pycos.Task(get_output, i)
     # schedule rtask on (available) remote server
     rtask = yield computation.run(rtask_proc, client_reader, program_path)
     if isinstance(rtask, pycos.Task):
         print('  job %s processed by %s' % (i, rtask.location))
         # sender sends input data to rtask
         pycos.Task(send_input, rtask)
         # wait for all data to be received
         yield client_reader.finish()
         print('  job %s done' % i)
     else:  # failed to schedule
         print('  job %s failed: %s' % (i, rtask))
         client_reader.terminate()
예제 #4
0
def client_proc(computation, njobs, task=None):
    # schedule computation with the scheduler; scheduler accepts one computation
    # at a time, so if scheduler is shared, the computation is queued until it
    # is done with already scheduled computations
    if (yield computation.schedule()):
        raise Exception('Could not schedule computation')

    # send 5 requests to remote process (compute_task)
    def send_requests(rtask, task=None):
        # first send this local task (to whom rtask sends result)
        rtask.send(task)
        for i in range(5):
            # even if recipient doesn't use "yield" (such as executing long-run
            # computation, or thread-blocking function such as 'time.sleep' as
            # in this case), the message is accepted by another scheduler
            # (netpycos.Pycos) at the receiver and put in recipient's message
            # queue
            rtask.send(random.uniform(10, 20))
            # assume delay in input availability
            yield task.sleep(random.uniform(2, 5))
        # end of input is indicated with None
        rtask.send(None)
        result = yield task.receive()  # get result
        print('    %s computed result: %.4f' % (rtask.location, result))

    for i in range(njobs):
        rtask = yield computation.run(compute_task)
        if isinstance(rtask, pycos.Task):
            print('  job %d processed by %s' % (i, rtask.location))
            pycos.Task(send_requests, rtask)

    yield computation.close()
예제 #5
0
def send_proc(task=None):
    # if server is in a remote network, use 'peer' as (optionally enabling
    # streaming for efficiency):
    # yield pycos.Pycos.instance().peer(pycos.Location('server node/ip', port))
    server = yield pycos.Task.locate('chat_server', timeout=5)
    if not server:
        print('Could not locate server')
        raise StopIteration
    server.send(('join', task))
    client_id = yield task.receive()

    # channel is at same location as server task
    channel = yield pycos.Channel.locate('chat_channel', server.location)
    recv_task = pycos.Task(recv_proc, client_id)
    yield channel.subscribe(recv_task)
    # since readline is synchronous (blocking) call, use async thread
    async_threads = pycos.AsyncThreadPool(1)
    if sys.version_info.major > 2:
        read_input = input
    else:
        read_input = raw_input
    while True:
        try:
            line = yield async_threads.async_task(read_input)
            line = line.strip()
            if line.lower() in ('quit', 'exit'):
                break
        except:
            break
        # send message to channel
        channel.send((line, client_id))
    server.send(('quit', client_id))
    yield channel.unsubscribe(recv_task)
def client_proc(computation, task=None):
    # schedule computation with the scheduler; if remote scheduler is not
    # automatically discovered (e.g., if it is on remote network, or UDP is
    # lossy), use 'peer' method to discover, e.g., with
    # yield pycos.Pycos.instance().peer(pycos.Location('hostname_or_ip_of_scheduler', 9706))
    if (yield computation.schedule()):
        raise Exception('Could not schedule computation')

    # remote tasks send results to this process
    def reply_proc(task=None):
        task.set_daemon()
        while 1:
            msg = yield task.recv()
            print('      Received reply for %s from %s: %s' %
                  (msg[0], msg[1], msg[2]))

    reply_task = pycos.Task(reply_proc)

    i = 0
    while True:
        n = yield task.receive()
        if n is None:
            break
        i += 1
        rtask = yield computation.run(compute, i, n, reply_task)
        if isinstance(rtask, pycos.Task):
            print('  Task %s created for %s at %s' % (i, n, rtask.location))

    # wait for all jobs to be done and close computation
    yield computation.close()
예제 #7
0
def client_proc(computation, njobs, task=None):
    # schedule computation with the scheduler; scheduler accepts one computation
    # at a time, so if scheduler is shared, the computation is queued until it
    # is done with already scheduled computations
    if (yield computation.schedule()):
        raise Exception('Could not schedule computation')

    # create a separate task to receive results, so they can be processed
    # as soon as received
    def recv_results(task=None):
        for i in range(njobs):
            msg = yield task.receive()
            print('    result for job %d: %s' % (i, msg))

    # remote tasks send replies as messages to this task
    results_task = pycos.Task(recv_results)

    # run njobs; each job will be executed by one dispycos server
    for i in range(njobs):
        cobj = C(i)
        cobj.n = random.uniform(5, 10)
        # as noted in 'dispycos_client2.py', 'run' method is used to run jobs
        # sequentially; use 'run_async' to run multiple jobs on one server
        # concurrently
        print('  request %d: %s' % (i, cobj.n))
        rtask = yield computation.run(compute, cobj, results_task)
        if not isinstance(rtask, pycos.Task):
            print('failed to create rtask %s: %s' % (i, rtask))

    # wait for all results and close computation
    yield computation.close()
예제 #8
0
def server_task(task=None):
    task.set_daemon()
    task.register('server_task')
    while True:
        msg = yield task.receive()
        # create task to process message
        pycos.Task(process, msg)
예제 #9
0
파일: channel.py 프로젝트: pgiri/pycos
def client_proc(task=None):
    # create channel
    channel = pycos.Channel('sum_prod')
    # create tasks to compute sum and product of numbers sent
    sum_task = pycos.Task(seqsum)
    prod_task = pycos.Task(seqprod)
    # subscribe tasks to channel so they receive messages
    yield channel.subscribe(sum_task)
    yield channel.subscribe(prod_task)
    # send 4 numbers to channel
    for _ in range(4):
        r = random.uniform(0.5, 3)
        channel.send(r)
        print('sent %f' % r)
    # send None to indicate end of data
    channel.send(None)
    yield channel.unsubscribe(sum_task)
    yield channel.unsubscribe(prod_task)
예제 #10
0
def status_proc(task=None):
    task.set_daemon()
    i = 0
    while 1:
        msg = yield task.receive()
        if not isinstance(msg, DispycosStatus):
            continue
        if msg.status == Scheduler.ServerDiscovered:
            pycos.Task(server_available, msg.info, data_files[i])
            i += 1
예제 #11
0
파일: webserver.py 프로젝트: pgiri/pycos
def server(host, port, task=None):
    task.set_daemon()
    sock = pycos.AsyncSocket(socket.socket(socket.AF_INET, socket.SOCK_STREAM))
    sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
    sock.bind((host, port))
    sock.listen(5000)

    while True:
        conn, addr = yield sock.accept()
        pycos.Task(process, conn)
예제 #12
0
def hcwst(host,
          port,
          repeater_ws,
          proxy_host,
          proxy_port,
          proxy_username,
          proxy_password,
          ssl_verify,
          task=None):

    task.set_daemon()
    sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
    sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
    #sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
    sock = pycos.AsyncSocket(sock)
    sock.bind((host, int(port)))
    sock.listen(1)

    print('Tunnel listening at %s' % str(sock.getsockname()))
    if ssl_verify:
        ws = websocket.WebSocket()
    else:
        ws = websocket.WebSocket(sslopt={"cert_reqs": ssl.CERT_NONE})

    if not proxy_host:
        ws.connect(repeater_ws,
                   subprotocols=["binary"],
                   sockopt=(socket.IPPROTO_TCP, socket.TCP_NODELAY))
    else:
        ws.connect(repeater_ws,
                   http_proxy_host=proxy_host,
                   http_proxy_port=proxy_port,
                   http_proxy_auth=proxy_auth,
                   subprotocols=["binary"],
                   sockopt=(socket.IPPROTO_TCP, socket.TCP_NODELAY))

    print('Tunnel connected to %s' % repeater_ws)

    conn, _ = yield sock.accept()
    pycos.Task(client_send, conn, ws)
    pycos.Task(ws_send, conn, ws)
예제 #13
0
def server(host, port, task=None):
    task.set_daemon()
    sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
    sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
    # setup socket for asynchronous I/O with pycos
    sock = pycos.AsyncSocket(sock)
    sock.bind((host, port))
    sock.listen(128)

    while True:
        conn, addr = yield sock.accept()
        # create a task to process connection
        pycos.Task(process, conn)
예제 #14
0
def client_proc(computation, program_path, n, task=None):
    # schedule computation with the scheduler; scheduler accepts one computation
    # at a time, so if scheduler is shared, the computation is queued until it
    # is done with already scheduled computations
    if (yield computation.schedule()):
        raise Exception('Could not schedule computation')

    # send 10 random numbers to remote process (rtask_proc)
    def send_input(rtask, task=None):
        for i in range(10):
            # encode strings so works with both Python 2.7 and 3
            rtask.send(('%.2f' % random.uniform(0, 5)).encode())
            # assume delay in input availability
            yield task.sleep(random.uniform(0, 2))
        # end of input is indicated with None
        rtask.send(None)

    # read output (messages sent by 'reader_proc' on remote process)
    def get_output(i, task=None):
        while True:
            line = yield task.receive()
            if not line:  # end of output
                break
            print('      job %s output: %s' % (i, line.strip().decode()))

    def create_job(i, task=None):
        # create reader and send to rtask so it can send messages to reader
        client_reader = pycos.Task(get_output, i)
        # schedule rtask on (available) remote server
        rtask = yield computation.run(rtask_proc, client_reader, program_path)
        if isinstance(rtask, pycos.Task):
            print('  job %s processed by %s' % (i, rtask.location))
            # sender sends input data to rtask
            pycos.Task(send_input, rtask)
            # wait for all data to be received
            yield client_reader.finish()
            print('  job %s done' % i)
        else:  # failed to schedule
            print('  job %s failed: %s' % (i, rtask))
            client_reader.terminate()

    # create n jobs (that run concurrently)
    job_tasks = []
    for i in range(1, n + 1):
        job_tasks.append(pycos.Task(create_job, i))
    # wait for jobs to finish
    for job_task in job_tasks:
        yield job_task.finish()

    yield computation.close()
예제 #15
0
def client_proc(computation, task=None):
    # schedule computation with the scheduler; scheduler accepts one computation
    # at a time, so if scheduler is shared, the computation is queued until it
    # is done with already scheduled computations
    if (yield computation.schedule()):
        raise Exception('Could not schedule computation')

    # in dispycos_client6.py, data is sent to each remote task; here, data
    # is broadcast over channel and remote tasks subscribe to it
    data_channel = pycos.Channel('data_channel')
    # not necessary to register channel in this case, as it is sent to remote
    # tasks; if they were to 'locate' it, it should be registered
    # data_channel.register()

    trend_task = pycos.Task(trend_proc)

    rtask_avg = yield computation.run(rtask_avg_proc, data_channel, 0.4,
                                      trend_task, 10)
    assert isinstance(rtask_avg, pycos.Task)
    rtask_save = yield computation.run(rtask_save_proc, data_channel)
    assert isinstance(rtask_save, pycos.Task)

    # make sure both remote tasks have subscribed to channel ('deliver'
    # should return 2 if they both are)
    assert (yield data_channel.deliver('start', n=2)) == 2

    # if data is sent frequently (say, many times a second), enable
    # streaming data to remote peer; this is more efficient as
    # connections are kept open (so the cost of opening and closing
    # connections is avoided), but keeping too many connections open
    # consumes system resources
    yield pycos.Pycos.instance().peer(rtask_avg.location, stream_send=True)
    yield pycos.Pycos.instance().peer(rtask_save.location, stream_send=True)

    # send 1000 items of random data to remote tasks
    for i in range(1000):
        n = random.uniform(-1, 1)
        item = (i, n)
        # data can be sent to remote tasks either with 'send' or
        # 'deliver'; 'send' is more efficient but no guarantee data
        # has been sent successfully whereas 'deliver' indicates
        # errors right away
        data_channel.send(item)
        yield task.sleep(0.02)
    item = (i, None)
    data_channel.send(item)

    yield computation.close()
    data_channel.close()
예제 #16
0
def rtask_proc(client, program, task=None):
    import sys
    import os
    import subprocess
    import pycos.asyncfile

    if program.endswith('.py'):
        # Computation dependencies are saved in parent directory
        program = [sys.executable, os.path.join('..', program)]
    # start program as a subprocess (to read from and write to pipe)
    if os.name == 'nt':  # create pipe with asyncfile under Windows
        pipe = pycos.asyncfile.Popen(program,
                                     stdin=subprocess.PIPE,
                                     stdout=subprocess.PIPE)
    else:
        pipe = subprocess.Popen(program,
                                stdin=subprocess.PIPE,
                                stdout=subprocess.PIPE)
    # convert to asynchronous pipe; see 'pipe_csum.py' and 'pipe_grep.py' for
    # chaining pipes
    pipe = pycos.asyncfile.AsyncPipe(pipe)

    # reader reads (output) from pipe and sends to client as messages
    def reader_proc(task=None):
        while True:
            line = yield pipe.readline()
            if not line:
                break
            # send output to client
            client.send(line)
        pipe.stdout.close()
        if os.name == 'nt':
            pipe.close()
        client.send(None)

    reader = pycos.Task(reader_proc)

    # writer gets messages from client and writes them (input) to pipe
    while True:
        data = yield task.receive()
        if not data:
            break
        # write data as lines to program
        yield pipe.write(data + '\n'.encode(), full=True)
    pipe.stdin.close()
    # wait for all data to be read (subprocess to end)
    yield reader.finish()
    raise StopIteration(pipe.poll())
예제 #17
0
def server_proc(host, port, task=None):
    task.set_daemon()
    sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
    sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
    # convert sock to asynchronous
    sock = pycos.AsyncSocket(sock)
    sock.bind((host, port))
    sock.listen(128)
    print('server at %s' % str(sock.getsockname()))

    try:
        while True:
            conn, addr = yield sock.accept()
            pycos.Task(client_conn_proc, conn)
    except:
        msg_bcast_task.terminate()
예제 #18
0
def run_jobs_proc(computation, data_files, task=None):
    # schedule computation with the scheduler; scheduler accepts one computation
    # at a time, so if scheduler is shared, the computation is queued until it
    # is done with already scheduled computations
    if (yield computation.schedule()):
        raise Exception('Could not schedule computation')

    for i in range(len(data_files)):
        data_file = data_files[i]
        # create remote task
        rtask = yield computation.run(rtask_proc)
        if isinstance(rtask, pycos.Task):
            # create local task to send input file and data to rtask
            pycos.Task(client_proc, i, data_file, rtask)
        else:
            print('  job %s failed: %s' % (i, rtask))

    yield computation.close()
예제 #19
0
def receiver_proc2(task=None):
    # if server is in remote network, add it explicitly
    # scheduler = pycos.Pycos.instance()
    # yield scheduler.peer(pycos.Location('remote.ip', tcp_port))
    rchannel = yield pycos.Channel.locate('2clients', timeout=5)
    if not rchannel:
        print('Could not locate server!')
        raise StopIteration
    # this task subscribes to the channel to get messages to server channel
    print('server is at %s' % rchannel.location)
    if (yield rchannel.subscribe(task)) != 0:
        raise Exception('subscription failed')
    sender = pycos.Task(sender_proc, rchannel)
    while True:
        msg = yield task.receive()
        print('Received "%s" from %s at %s' %
              (msg['msg'], msg['sender'].name, msg['sender'].location))
        if msg['msg'] is None and msg['sender'] == sender:
            break
    yield rchannel.unsubscribe(task)
예제 #20
0
def rti_test(task=None):
    # if server is on remote network, automatic discovery won't work,
    # so add it explicitly
    # yield scheduler.peer(pycos.Location('192.168.21.5', 9705))

    # get reference to RTI at server
    rti1 = yield pycos.RTI.locate('rti_1')
    print('RTI is at %s' % rti1.location)

    # 5 (remote) tasks are created with rti1
    n = 5
    # set monitor (monitor_proc task) for tasks created for this RTI
    yield rti1.monitor(pycos.Task(monitor_proc, n))

    for i in range(n):
        rtask = yield rti1('test%s' % i, b=i)
        pycos.logger.debug('RTI %s created' % rtask)
        # If necessary, each rtask can also be set (different) 'monitor'
        rtask.send('msg:%s' % i)
        yield task.sleep(random.uniform(0, 1))
예제 #21
0
def chat(host, port, task=None):
    task.set_daemon()
    sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
    sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
    sock = pycos.AsyncSocket(sock)
    sock.bind((host, port))
    sock.listen(128)
    print('server at %s' % str(sock.getsockname()))

    clients = set()

    try:
        while True:
            conn, addr = yield sock.accept()
            clients.add(conn)
            pycos.Task(client_send, clients, conn)
    except:
        for client in clients:
            client.close()
        raise
예제 #22
0
def client_proc(computation, task=None):
    # schedule computation with the scheduler; scheduler accepts one computation
    # at a time, so if scheduler is shared, the computation is queued until it
    # is done with already scheduled computations
    if (yield computation.schedule()):
        raise Exception('Could not schedule computation')

    trend_task = pycos.Task(trend_proc)

    # run average and save tasks at two different servers
    rtask_avg = yield computation.run(rtask_avg_proc, 0.4, trend_task, 10)
    assert isinstance(rtask_avg, pycos.Task)
    rtask_save = yield computation.run(rtask_save_proc)
    assert isinstance(rtask_save, pycos.Task)

    # if data is sent frequently (say, many times a second), enable streaming
    # data to remote peer; this is more efficient as connections are kept open
    # (so the cost of opening and closing connections is avoided), but keeping
    # too many connections open consumes system resources
    yield pycos.Pycos.instance().peer(rtask_avg.location, stream_send=True)
    yield pycos.Pycos.instance().peer(rtask_save.location, stream_send=True)

    # send 1000 items of random data to remote tasks
    for i in range(1000):
        n = random.uniform(-1, 1)
        item = (i, n)
        # data can be sent to remote tasks either with 'send' or 'deliver';
        # 'send' is more efficient but no guarantee data has been sent
        # successfully whereas 'deliver' indicates errors right away;
        # alternately, messages can be sent with a channel, which is more
        # convenient if there are multiple (unknown) recipients
        rtask_avg.send(item)
        rtask_save.send(item)
        yield task.sleep(0.01)
    item = (i, None)
    rtask_avg.send(item)
    rtask_save.send(item)

    yield computation.close()
예제 #23
0
if __name__ == '__main__':
    # pycos.logger.setLevel(pycos.logger.DEBUG)
    # optional arg 1 is host IP address and arg 2 is port to use
    host, port = '', 3456
    if len(sys.argv) > 1:
        host = sys.argv[1]
    if len(sys.argv) > 2:
        port = int(sys.argv[2])

    sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
    sock.connect((host, port))
    sock = pycos.AsyncSocket(sock)
    # same connection is used to receive messages in one task and to send
    # messages in another task
    pycos.Task(client_recv, sock)
    sender = pycos.Task(client_send, sock)

    if sys.version_info.major > 2:
        read_input = input
    else:
        read_input = raw_input
    while True:
        try:
            line = read_input().strip()
            if line.lower() in ('quit', 'exit'):
                break
            if not line:
                continue
        except:
            break
예제 #24
0
    recvd += 1

# server accepts connections and creates tasks to deal with them
def server(host, port, task=None):
    task.set_daemon()
    sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
    sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
    # setup socket for asynchronous I/O with pycos
    sock = pycos.AsyncSocket(sock)
    sock.bind((host, port))
    sock.listen(128)

    while True:
        conn, addr = yield sock.accept()
        # create a task to process connection
        pycos.Task(process, conn)

# pycos.logger.setLevel(pycos.Logger.DEBUG)
recvd = 0
pycos.Task(server, '', 8010)

if sys.version_info.major > 2:
    read_input = input
else:
    read_input = raw_input
while True:
    cmd = read_input('Enter "quit" or "exit" to terminate: ').strip().lower()
    if cmd == 'exit' or cmd == 'quit':
        break
print('Received %d messages' % recvd)
예제 #25
0
        rtask = yield computation.run(rtask_proc)
        if isinstance(rtask, pycos.Task):
            # create local task to send input file and data to rtask
            pycos.Task(client_proc, i, data_file, rtask)
        else:
            print('  job %s failed: %s' % (i, rtask))

    yield computation.close()


if __name__ == '__main__':
    import random, os, sys, glob
    # pycos.logger.setLevel(pycos.Logger.DEBUG)
    if os.path.dirname(sys.argv[0]):
        os.chdir(os.path.dirname(sys.argv[0]))
    data_files = glob.glob('dispycos_client*.py')
    if not data_files:
        raise Exception('No data files to process')
    if len(sys.argv) > 1:
        data_files = data_files[:int(sys.argv[1])]

    # if scheduler is not already running (on a node as a program), start it
    # (private scheduler):
    Scheduler()
    # unlike in earlier examples, rtask_proc is not sent with computation (as it
    # is not included in 'components'; instead, it is sent each time a job is
    # submitted, which is a bit inefficient
    computation = Computation([C])

    pycos.Task(run_jobs_proc, computation, data_files)
예제 #26
0
        # computations, use 'run_async' to run more than one computation at a
        # server at the same time.
        rtask = yield computation.run(compute, random.uniform(5, 10))
        if isinstance(rtask, pycos.Task):
            print('  job %s processed by %s' % (i, rtask.location))
        else:
            print('rtask %s failed: %s' % (i, rtask))

    # wait for all jobs to be done and close computation
    yield computation.close()


if __name__ == '__main__':
    import random, sys, pycos.dispycos
    # pycos.logger.setLevel(pycos.Logger.DEBUG)
    # if scheduler is not already running (on a node as a program), start
    # private scheduler:
    Scheduler()
    # send 'compute' generator function; use MinPulseInterval so node status
    # updates are sent more frequently (instead of default 2*MinPulseInterval)
    computation = Computation([compute], pulse_interval=pycos.dispycos.MinPulseInterval)

    # to illustrate relaying of status messages to multiple tasks, httpd is
    # also used in this example; this sets computation's status_task to httpd's status_task
    httpd = pycos.httpd.HTTPServer(computation)
    # run 10 (or given number of) jobs
    pycos.Task(client_proc, computation, 10 if len(sys.argv) < 2 else int(sys.argv[1])).value()
    # shutdown httpd only after computation is closed; alternately, close it in
    # 'client_proc' after the computation is closed.
    httpd.shutdown()
예제 #27
0
            conn, addr = yield sock.accept()
            pycos.Task(client_conn_proc, conn)
    except:
        msg_bcast_task.terminate()


if __name__ == '__main__':
    # optional arg 1 is host IP address and arg 2 is port to use
    host, port = '', 3456
    if len(sys.argv) > 1:
        host = sys.argv[1]
    if len(sys.argv) > 2:
        port = int(sys.argv[2])
    if sys.version_info.major > 2:
        read_input = input
    else:
        read_input = raw_input

    msg_bcast_task = pycos.Task(msg_bcast_proc)
    pycos.Task(server_proc, host, port)

    while True:
        try:
            cmd = read_input(
                'Enter "quit" or "exit" to terminate: ').strip().lower()
            if cmd in ('quit', 'exit'):
                break
        except:
            break
    msg_bcast_task.terminate()
예제 #28
0
    for i in range(1000):
        n = random.uniform(-1, 1)
        item = (i, n)
        # data can be sent to remote tasks either with 'send' or
        # 'deliver'; 'send' is more efficient but no guarantee data
        # has been sent successfully whereas 'deliver' indicates
        # errors right away
        data_channel.send(item)
        yield task.sleep(0.02)
    item = (i, None)
    data_channel.send(item)

    yield computation.close()
    data_channel.close()


if __name__ == '__main__':
    import sys, random
    # pycos.logger.setLevel(pycos.Logger.DEBUG)
    # PyPI / pip packaging adjusts assertion below for Python 3.7+
    if sys.version_info.major == 3:
        assert sys.version_info.minor < 7, \
            ('"%s" is not suitable for Python version %s.%s; use file installed by pip instead' %
             (__file__, sys.version_info.major, sys.version_info.minor))
    # if scheduler is shared (i.e., running as program), nothing needs
    # to be done (its location can optionally be given to 'schedule');
    # othrwise, start private scheduler:
    Scheduler()
    computation = Computation([])
    pycos.Task(client_proc, computation)
예제 #29
0
파일: udp.py 프로젝트: pgiri/pycos
import sys, socket
import pycos


def server_proc(n, sock, task=None):
    for i in range(n):
        msg, addr = yield sock.recvfrom(1024)
        print('Received "%s" from %s:%s' % (msg, addr[0], addr[1]))
    sock.close()


def client_proc(host, port, task=None):
    sock = pycos.AsyncSocket(socket.socket(socket.AF_INET, socket.SOCK_DGRAM))
    msg = 'client socket: %s' % (sock.fileno())
    if sys.version_info.major >= 3:
        msg = bytes(msg, 'ascii')
    yield sock.sendto(msg, (host, port))
    sock.close()


if __name__ == '__main__':
    sock = pycos.AsyncSocket(socket.socket(socket.AF_INET, socket.SOCK_DGRAM))
    sock.bind(('127.0.0.1', 0))
    host, port = sock.getsockname()

    n = 50
    server_task = pycos.Task(server_proc, n, sock)
    for i in range(n):
        pycos.Task(client_proc, host, port)
    server_task.value()
예제 #30
0
        # output from sha1sum is small, so read until EOF
        data = yield pipe.stdout.read()
        pipe.stdout.close()
        raise StopIteration(data)

    if platform.system() == 'Windows':
        # asyncfile.Popen must be used instead of subprocess.Popen
        pipe = pycos.asyncfile.Popen([r'\cygwin64\bin\sha1sum.exe'],
                                     stdin=subprocess.PIPE,
                                     stdout=subprocess.PIPE)
    else:
        pipe = subprocess.Popen(['sha1sum'],
                                stdin=subprocess.PIPE,
                                stdout=subprocess.PIPE)

    async_pipe = pycos.asyncfile.AsyncPipe(pipe)
    reader = pycos.Task(read_proc, async_pipe)
    writer = pycos.Task(write_proc, open(input), async_pipe)
    stdout = yield reader.finish()
    print('     feeder sha1sum: %s' % stdout)


# pycos.logger.setLevel(pycos.Logger.DEBUG)
# simpler version using 'communicate'
task = pycos.Task(communicate,
                  sys.argv[1] if len(sys.argv) > 1 else sys.argv[0])
task.value()  # wait for it to finish

# alternate version with custom read and write processes
pycos.Task(custom_feeder, sys.argv[1] if len(sys.argv) > 1 else sys.argv[0])