Ejemplo n.º 1
0
def client_proc(job_id, data_file, rtask, task=None):
    # send input file to rtask.location; this will be saved to dispycos process's
    # working directory
    if (yield pycos.Pycos().send_file(rtask.location, data_file,
                                      timeout=10)) < 0:
        print('Could not send input data to %s' % rtask.location)
        # terminate remote task
        rtask.send(None)
        raise StopIteration(-1)
    # send info about input
    obj = C(job_id, data_file, random.uniform(5, 8), task)
    if (yield rtask.deliver(obj)) != 1:
        print('Could not send input to %s' % rtask.location)
        raise StopIteration(-1)
    # rtask sends result to this task as message
    result = yield task.receive()
    if not result.result_file:
        print('Processing %s failed' % obj.i)
        raise StopIteration(-1)
    # rtask saves results file at this client, which is saved in pycos's
    # dest_path, not current working directory!
    result_file = os.path.join(pycos.Pycos().dest_path, result.result_file)
    # move file to cwd
    target = os.path.join(os.getcwd(), os.path.basename(result_file))
    os.rename(result_file, target)
    print('    job %s output is in %s' % (obj.i, target))
Ejemplo n.º 2
0
    def shutdown(self, wait=True):
        """This method should be called by user program to close the
        http server. If 'wait' is True the server waits for poll_sec
        so the http client gets all the updates before server is
        closed.
        """
        if wait:
            pycos.logger.info(
                'HTTP server waiting for %s seconds for client updates '
                'before quitting', self._poll_sec)
            if pycos.Pycos().cur_task():

                def _shutdown(task=None):
                    yield task.sleep(self._poll_sec + 0.5)
                    self._server.shutdown()
                    self._server.server_close()

                pycos.Task(_shutdown)
            else:
                time.sleep(self._poll_sec + 0.5)
                self._server.shutdown()
                self._server.server_close()
        else:
            self._server.shutdown()
            self._server.server_close()
Ejemplo n.º 3
0
def rtask_proc(task=None):
    import os
    # receive object from client_proc task
    cobj = yield task.receive()
    if not cobj:
        raise StopIteration
    # Input file is already copied at where this rtask is running (by client).
    # For given input file, create an output file with each line in the output
    # file computed as length of corresponding line in input file
    cobj.result_file = 'result-%s' % cobj.data_file
    with open(cobj.data_file, 'r') as data_fd:
        with open(cobj.result_file, 'w') as result_fd:
            for lineno, line in enumerate(data_fd, start=1):
                result_fd.write('%d: %d\n' % (lineno, len(line) - 1))
    # 'sleep' to simulate computing
    yield task.sleep(cobj.n)
    # transfer the result file to client
    status = yield pycos.Pycos().send_file(cobj.client.location,
                                           cobj.result_file,
                                           overwrite=True,
                                           timeout=30)
    if status:
        print('Could not send %s to %s' %
              (cobj.result_file, cobj.client.location))
        cobj.result_file = None
    cobj.client.send(cobj)
    os.remove(cobj.data_file)
    os.remove(cobj.result_file)
def server_available(location, data_file, task=None):
    import os
    # 'server_available' is executed locally (at client) when a server process
    # is available. 'location' is Location instance of server. When this task is
    # executed, 'depends' of computation would've been transferred.  data_file
    # could've been sent with the computation 'depends'; however, to illustrate
    # how files can be sent separately (to distribute data fragments among
    # servers), files are transferred to servers in this example

    print('  Sending %s to %s' % (data_file, location))
    if (yield pycos.Pycos().send_file(
            location, data_file, timeout=5, overwrite=True)) < 0:
        print('Could not send data file "%s" to %s' % (data_file, location))
        raise StopIteration(-1)

    # 'setup_server' is executed on remote server at 'location' with argument
    # data_file
    yield computation.enable_server(location, data_file)
    raise StopIteration(0)
Ejemplo n.º 5
0
def client_proc(computation, njobs, task=None):
    # schedule computation with the scheduler; scheduler accepts one computation
    # at a time, so if scheduler is shared, the computation is queued until it
    # is done with already scheduled computations
    if (yield computation.schedule()):
        raise Exception('Could not schedule computation')

    # pair EC2 node with this client with:
    yield pycos.Pycos().peer(pycos.Location('54.204.242.185', 9706))
    # if multiple nodes are used, 'broadcast' option can be used to pair with
    # all nodes with just one statement as:
    # yield pycos.Pycos().peer(pycos.Location('54.204.242.185', 9706), broadcast=True)

    # execute n jobs (tasks) and get their results. Note that number of
    # jobs created can be more than number of server processes available; the
    # scheduler will use as many processes as necessary/available, running one
    # job at a server process
    args = [(i, random.uniform(3, 10)) for i in range(njobs)]
    results = yield computation.run_results(compute, args)
    for result in results:
        print('job %s result: %s' % (result[0], result[1]))

    yield computation.close()
Ejemplo n.º 6
0
def node_available(avail_info, data_file, task=None):
    import os
    # 'node_available' is executed locally (at client) when a node is
    # available. 'location' is Location instance of node. When this task is
    # executed, 'depends' of computation would've been transferred.

    # data_file could've been sent with the computation 'depends'; however, to
    # illustrate how files can be sent separately (e.g., to transfer different
    # files to different nodes), file is transferred with 'node_available'.

    print('  Sending %s to %s' % (data_file, avail_info.location.addr))
    sent = yield pycos.Pycos().send_file(avail_info.location,
                                         data_file,
                                         overwrite=True,
                                         timeout=5)
    if (sent < 0):
        print('Could not send data file "%s" to %s' %
              (data_file, avail_info.location))
        raise StopIteration(-1)

    # value of task (last value yield'ed or value of 'raise StopIteration') will
    # be passed to node_setup as argument(s).
    yield computation.enable_node(avail_info.location.addr, data_file)
Ejemplo n.º 7
0
    # pair EC2 node with this client with:
    yield pycos.Pycos().peer(pycos.Location('54.204.242.185', 9706))
    # if multiple nodes are used, 'broadcast' option can be used to pair with
    # all nodes with just one statement as:
    # yield pycos.Pycos().peer(pycos.Location('54.204.242.185', 9706), broadcast=True)

    # execute n jobs (tasks) and get their results. Note that number of
    # jobs created can be more than number of server processes available; the
    # scheduler will use as many processes as necessary/available, running one
    # job at a server process
    args = [(i, random.uniform(3, 10)) for i in range(njobs)]
    results = yield computation.run_results(compute, args)
    for result in results:
        print('job %s result: %s' % (result[0], result[1]))

    yield computation.close()


if __name__ == '__main__':
    import sys, random
    # pycos.logger.setLevel(pycos.Logger.DEBUG)
    pycos.Pycos(node='127.0.0.1', tcp_port=9705, udp_port=9705)
    njobs = 10 if len(sys.argv) == 1 else int(sys.argv[1])
    # if scheduler is not already running (on a node as a program),
    # start private scheduler:
    Scheduler()
    # use 'compute' for computation jobs
    computation = Computation([compute])
    pycos.Task(client_proc, computation, njobs)
Ejemplo n.º 8
0
def rti_1(a, b=1, task=None):
    pycos.logger.debug('running %s/%s with %s, %s', task.name, id(task), a, b)
    msg = yield task.receive()
    if b % 2 == 0:
        yield task.sleep(b)
        pycos.logger.debug('%s/%s done', task.name, id(task))
        # (remote) monitor (if any) gets this exception (to be
        # interpreted as normal termination)
        raise StopIteration(msg)
    else:
        # (remote) monitor (if any) gets this exception, too
        raise Exception('invalid invocation: %s' % b)

pycos.logger.setLevel(pycos.Logger.DEBUG)
# 'secret' is set so only peers that use same secret can communicate
scheduler = pycos.Pycos(name='server', secret='test')
# register rti_1 so remote clients can request execution
rti1 = pycos.RTI(rti_1)
rti1.register()

if sys.version_info.major > 2:
    read_input = input
else:
    read_input = raw_input
while True:
    try:
        line = read_input().strip().lower()
        if line in ('quit', 'exit'):
            break
    except:
        break
Ejemplo n.º 9
0
        else:
            pycos.logger.warning('ignoring invalid message')


def rti_test(task=None):
    # if server is on remote network, automatic discovery won't work,
    # so add it explicitly
    # yield scheduler.peer('192.168.21.5')

    # get reference to RTI at server
    rti1 = yield pycos.RTI.locate('rti_1')
    print('RTI is at %s' % rti1.location)

    # 5 (remote) tasks are created with rti1
    n = 5
    # set monitor (monitor_proc task) for tasks created for this RTI
    yield rti1.monitor(pycos.Task(monitor_proc, n))

    for i in range(n):
        rtask = yield rti1('test%s' % i, b=i)
        pycos.logger.debug('RTI %s created' % rtask)
        # If necessary, each rtask can also be set (different) 'monitor'
        rtask.send('msg:%s' % i)
        yield task.sleep(random.uniform(0, 1))


pycos.logger.setLevel(pycos.Logger.DEBUG)
# use 'test' secret so peers that use same secret are recognized
scheduler = pycos.Pycos(name='client', secret='test')
pycos.Task(rti_test)
Ejemplo n.º 10
0
    # pair EC2 node with this client with:
    yield pycos.Pycos().peer(pycos.Location('54.204.242.185', 51347))
    # if multiple nodes are used, 'broadcast' option can be used to pair with
    # all nodes with just one statement as:
    # yield pycos.Pycos().peer(pycos.Location('54.204.242.185', 51347), broadcast=True)

    # execute n jobs (tasks) and get their results. Note that number of
    # jobs created can be more than number of server processes available; the
    # scheduler will use as many processes as necessary/available, running one
    # job at a server process
    args = [random.uniform(3, 10) for _ in range(njobs)]
    results = yield computation.run_results(compute, args)
    for result in results:
        print('result: %s' % result)

    yield computation.close()


if __name__ == '__main__':
    import sys, random
    # pycos.logger.setLevel(pycos.Logger.DEBUG)
    pycos.Pycos(node='127.0.0.1', tcp_port=4567)
    njobs = 10 if len(sys.argv) == 1 else int(sys.argv[1])
    # if scheduler is not already running (on a node as a program),
    # start private scheduler:
    Scheduler()
    # send 'compute' generator function
    computation = Computation([compute])
    pycos.Task(client_proc, computation, njobs)