Пример #1
0
def main():
    global files

    # SPMD execution: torc_py and MPI initialization
    torc.init()

    # SPMD execution: common global initialization takes place here
    if len(sys.argv) == 1:
        if torc.node_id() == 0:
            print("usage: python3 {} <path>".format(
                os.path.basename(__file__)))
        torc.shutdown()
        return

    files = get_files(sys.argv[1])

    # Switching to master-worker
    torc.launch(None)

    t0 = time.time()
    _ = torc.map(work, range(len(files)))
    t1 = time.time()
    print("t1-t0=", t1 - t0)

    torc.shutdown()
def work(x):
    time.sleep(1)
    y = x**2
    print("work inp={:.3f}, out={:.3f} ...on node {:d} worker {} thread {}".
          format(x, y, torc.node_id(), torc.worker_id(),
                 threading.get_ident()),
          flush=True)
    return y
Пример #3
0
def work(x):
    tid = threading.get_ident()
    time.sleep(1)
    y = x * x
    print("thread {}: work inp={}, out={} ... on node {}".format(
        tid, x, y, torc.node_id()),
          flush=True)
    return y
Пример #4
0
def main():
    global A

    # primary task initializes array A on rank 0
    for i in range(0, N):
        A[i] = 100 * i

    torc.spmd(bcast_task,
              torc.node_id())  # 2nd arg (root) is 0 and can be omitted

    torc.spmd(work)
Пример #5
0
def work(x):
    time.sleep(1)
    y = x*x
    print("taskfun inp={}, out={} ...on node {:d}".format(x, y, torc.node_id()), flush=True)
    return y
Пример #6
0
def work():
    global A
    print("node:{} -> A={}".format(torc.node_id(), A))