Beispiel #1
0
    def test_stop_terminates_the_process(self, manager, abort_script):
        """
        Verify that ProcessManager stops a script execution
        """
        helper = PubSubHelper()
        with Manager() as mgr:
            q = mgr.Queue()
            is_running = multiprocessing.Barrier(2)
            pid = manager.create(abort_script,
                                 init_args=ProcedureInput(q, is_running))
            wait_for_state(manager, pid, ProcedureState.READY)
            manager.run(pid, call="main", run_args=ProcedureInput())

            is_running.wait(0.1)
            helper.wait_for_lifecycle(ProcedureState.RUNNING)
            manager.stop(pid)
            helper.wait_for_lifecycle(ProcedureState.STOPPED)
            assert manager.running is None
            assert q.empty()
Beispiel #2
0
def test_server_multicall(socket_dir):
    def run_server():
        async def coro():
            class MockServer(Server):
                @rpc_method
                async def mock_method(self):
                    logging.info('Calling mock method - sleep...')
                    asyncio.sleep(0.1)
                    logging.info('Calling mock method - reply...')
                    return {'success': True}

            await MockServer(socket_dir=socket_dir).run()

        asyncio.run(coro())

    server_process = multiprocessing.Process(target=run_server, daemon=True)
    server_process.start()

    number_of_clients = 10
    barrier = multiprocessing.Barrier(number_of_clients + 1)

    def run_client():
        client = Client(socket_dir=socket_dir)
        barrier.wait(timeout=1)

        response = client.call(server='mock_server',
                               method='mock_method',
                               timeout=0.1)
        assert response['success']

        barrier.wait(timeout=1)

    client_processes = [
        multiprocessing.Process(target=run_client, daemon=True)
        for _ in range(number_of_clients)
    ]
    [process.start() for process in client_processes]
    barrier.wait(timeout=1)
    barrier.wait(timeout=1)
    [process.join(timeout=1) for process in client_processes]

    server_process.terminate()
    server_process.join()
Beispiel #3
0
def _main(unused_argv):
    # create distribute tf cluster
    start_port = PORT_NUM
    SERVER_DICT["ps"].append("localhost:%d" % start_port)
    for i in range(PARALLEL):
        SERVER_DICT["worker"].append("localhost:%d" % (start_port + 1 + i))

    Cluster = tf.train.ClusterSpec(SERVER_DICT)

    now = datetime.now()
    model_path = "./model/" + now.strftime("%Y%m%d-%H%M%S") + "_source/"
    if not os.path.exists(model_path):
        os.makedirs(model_path)
    log_path = "./logs/" + now.strftime("%Y%m%d-%H%M%S") + "_source/"

    UPDATE_GAME_NUM = NUM_FOR_UPDATE
    per_update_num = np.ceil(UPDATE_GAME_NUM / PARALLEL)

    Synchronizer = mp.Barrier(PARALLEL + 1)
    # Run parallel process

    procs = []
    for index in range(PARALLEL):
        p = mp.Process(name="Worker_%d" % index,
                       target=Worker,
                       args=(index, per_update_num, Synchronizer, Cluster,
                             model_path, log_path))
        procs.append(p)
        p.daemon = True
        p.start()
        time.sleep(1)

    max_win_rate, latest_win_rate = Parameter_Server(Synchronizer, Cluster,
                                                     log_path, model_path,
                                                     procs)
    print('#######################')
    print('Best Win_rate:', max_win_rate)
    print('Latest Win_rate:', latest_win_rate)
    print('#######################')

    for p in procs:
        p.join()
    '''
Beispiel #4
0
    def execute(self, nodes, num_threads=1):
        """Perform the translation with multiprocessing."""
        if num_threads > len(nodes):
            num_threads = len(nodes)

        if sys.version_info[0] == 2:
            barrier = mooseutils.parallel.Barrier(num_threads)
        else:
            barrier = multiprocessing.Barrier(num_threads)

        jobs = []
        for chunk in mooseutils.make_chunks(nodes, num_threads):
            p = multiprocessing.Process(target=self.__target,
                                        args=(chunk, barrier))
            jobs.append(p)
            p.start()

        for job in jobs:
            job.join()
Beispiel #5
0
    def benchmark(self, settings):
        self.set_sim_settings(settings)
        nprocs = settings["num_processes"]

        barrier = multiprocessing.Barrier(nprocs)
        with multiprocessing.Pool(nprocs,
                                  initializer=self._pool_init,
                                  initargs=(barrier, )) as pool:
            perfs = pool.map(self._bench_target, range(nprocs))

        res = {k: [] for k in perfs[0].keys()}
        for p in perfs:
            for k, v in p.items():
                res[k] += [v]

        return dict(
            frame_time=sum(res["frame_time"]),
            fps=sum(res["fps"]),
            total_time=sum(res["total_time"]) / nprocs,
        )
Beispiel #6
0
def run():
    ns = []
    dm = DurationModel.createFromFile(g_compfile)
    qs = [ [ mp.Queue() for _ in range(dm.P) ] for _ in range(dm.P) ]
    next_qs = qs
    prev_qs = [qs[-1]] + qs[:-1]
 
    man = mp.Manager()
    arrivals = man.list(range(dm.P))
    idsMap = man.list(range(dm.P))
    arrivalsBarrier = mp.Barrier(dm.P)

    for it in range(dm.P):
        n = Node(it, qs, dm, arrivals, arrivalsBarrier, idsMap, g_idsfile)
        n.proc = mp.Process(target=n.run)
        n.proc.start()
        ns.append(n)

    for n in ns:
        n.proc.join()
Beispiel #7
0
def run(command,
        tthread_path=default_tthread_path(),
        perf_command="perf",
        perf_log="perf.data",
        user=None,
        group=None,
        processor_trace=True,
        snapshot_mode=False,
        additional_cgroups=[],
        perf_event_cgroup=None,
        env={}):

    cgroup_name = "inspector-%d" % os.getpid()

    if perf_event_cgroup is None:
        perf_event_cgroup = cgroups.perf_event(cgroup_name)
        perf_event_cgroup.create()
        remove_cgroup = True
    else:
        remove_cgroup = False

    additional_cgroups.append(perf_event_cgroup)

    barrier = mp.Barrier(2)
    tthread_cmd = tthread.Command(tthread_path=tthread_path,
                                  user=user,
                                  group=group,
                                  cgroups=additional_cgroups,
                                  env=env)
    process = mp.Process(target=tthread_cmd.exec,
                         args=(command, barrier,))
    process.start()

    return perf.run(perf_command,
                    perf_log,
                    barrier,
                    process,
                    perf_event_cgroup,
                    processor_trace=processor_trace,
                    snapshot_mode=snapshot_mode,
                    remove_cgroup=remove_cgroup)
    def test_reader_blocks_writer(self):
        with self.lock.for_read():
            before_write = multiprocessing.Barrier(2)

            def test():
                self.assert_readers(1)

                before_write.wait()

                with self.lock.for_write():
                    self.assert_writer()
                    return 'written'

            writer = self. async (test)

            # Wait until we can confirm that all writers are locked out.
            before_write.wait()
            self.assert_readers(1)

        self.assertEqual('written', self.get_result(writer))
        self.assert_unlocked()
Beispiel #9
0
 def init_par_objs(self, n_parallel):
     """
     These objects will be inherited by forked subprocesses.
     (Also possible to return these and attach them explicitly within
     subprocess--neeeded in Windows.)
     """
     self.rank = None
     shareds = SimpleContainer(
         feat_mat=np.reshape(
             np.frombuffer(mp.RawArray('d',
                                       (self._vec_dim**2) * n_parallel)),
             (self._vec_dim, self._vec_dim, n_parallel)),
         target_vec=np.reshape(
             np.frombuffer(mp.RawArray('d', self._vec_dim * n_parallel)),
             (self._vec_dim, n_parallel)),
         coeffs=np.frombuffer(mp.RawArray('d', self._vec_dim)),
     )
     barriers = SimpleContainer(
         fit=[mp.Barrier(n_parallel) for _ in range(2)], )
     self._coeffs = shareds.coeffs  # (compatible with inherited predict() method)
     self._par_objs = (shareds, barriers)
Beispiel #10
0
    def test_rdmacm_sync_traffic(self):
        syncer = mp.Barrier(2, timeout=5)
        notifier = mp.Queue()
        passive = mp.Process(target=passive_side, args=[self.ip_addr, syncer,
                                                        notifier])
        active = mp.Process(target=active_side, args=[self.ip_addr, syncer,
                                                      notifier])
        passive.start()
        active.start()
        while notifier.empty():
            pass

        for _ in range(2):
            res = notifier.get()
            if res is not None:
                passive.terminate()
                active.terminate()
                raise PyverbsError(res)

        passive.join()
        active.join()
Beispiel #11
0
def _main(unused_argv):
    """Run agents"""
    maps.get(FLAGS.map)  # Assert the map exists.

    # create distribute tf cluster
    start_port = FLAGS.port_num
    SERVER_DICT["ps"].append("localhost:%d" % start_port)
    for i in range(1):
        SERVER_DICT["worker"].append("localhost:%d" % (start_port + 1 + i))

    Cluster = tf.train.ClusterSpec(SERVER_DICT)

    now = datetime.now()
    model_path = "./model/" + now.strftime("%Y%m%d-%H%M%S") + "_source/"
    if not os.path.exists(model_path):
        os.makedirs(model_path)
    log_path = "./logs/" + now.strftime("%Y%m%d-%H%M%S") + "_source/"

    if FLAGS.restore_model:
        C._LOAD_MODEL_PATH = FLAGS.restore_model_path

    Synchronizer = mp.Barrier(1 + 1)
    # Run parallel process
    procs = []
    for index in range(1):
        p = mp.Process(name="Worker_%d" % index,
                       target=Worker,
                       args=(index, 0, Synchronizer, Cluster, model_path))
        procs.append(p)
        p.daemon = True
        p.start()
        time.sleep(1)

    Parameter_Server(Synchronizer, Cluster, log_path, model_path)

    for p in procs:
        p.join()

    if FLAGS.profile:
        print(stopwatch.sw)
Beispiel #12
0
def main():
    if len(sys.argv) < 2:
        print("Missing rom name !")
        return
    
    romname = sys.argv[1].encode('ascii')
    ale = ALEInterface()
    ale.loadROM(romname)
    nb_actions = len(ale.getMinimalActionSet()) 

    dqn        = DeepQNet(nb_actions, "mainDQN",   True)
    dqn_critic = DeepQNet(nb_actions, "criticDQN", False)
    
    rwlock = RWLock()
    
    agentpool = []
    
    T = mp.RawValue(ctypes.c_uint)
    T.value = 0
    TLock = mp.Lock()

    learning_rate = 10**-3
    
    barrier = mp.Barrier(constants.nb_agent) 

    if 0:
        for i in range(0, constants.nb_agent):
            agentpool.append(mp.Process(target = AgentProcess, args=[rwlock, dqn, dqn_critic, T, TLock, romname, i, learning_rate, barrier]))
        for t in agentpool:
            t.start()

        for t in agentpool:
            t.join()
    else:
        for i in range(0, constants.nb_agent):
            AgentProcess(*[rwlock, dqn, dqn_critic, T, TLock, romname, i, learning_rate, barrier])
    

    dqn.save('network')
Beispiel #13
0
 def _init_par_objs_batchpolopt(self):
     """
     Any init_par_objs() method in a derived class must call this method,
     and, following that, may append() the SimpleContainer objects as needed.
     """
     n = self.n_parallel
     self.rank = None
     shareds = SimpleContainer(
         sum_discounted_return=mp.RawArray('d', n),
         num_traj=mp.RawArray('i', n),
         sum_return=mp.RawArray('d', n),
         max_return=mp.RawArray('d', n),
         min_return=mp.RawArray('d', n),
         sum_raw_return=mp.RawArray('d', n),
         max_raw_return=mp.RawArray('d', n),
         min_raw_return=mp.RawArray('d', n),
         max_bonus=mp.RawArray('d', n),
         min_bonus=mp.RawArray('d', n),
         sum_bonus=mp.RawArray('d', n),
         sum_path_len=mp.RawArray('i', n),
         max_path_len=mp.RawArray('i', n),
         min_path_len=mp.RawArray('i', n),
         num_steps=mp.RawArray('i', n),
         num_valids=mp.RawArray('d', n),
         sum_ent=mp.RawArray('d', n),
     )
     ##HT: for explained variance (yeah I know it's clumsy)
     shareds.append(baseline_stats=SimpleContainer(
         y_sum_vec=mp.RawArray('d', n),
         y_square_sum_vec=mp.RawArray('d', n),
         y_pred_error_sum_vec=mp.RawArray('d', n),
         y_pred_error_square_sum_vec=mp.RawArray('d', n),
     ))
     barriers = SimpleContainer(dgnstc=mp.Barrier(n), )
     self._par_objs = (shareds, barriers)
     self.baseline.init_par_objs(n_parallel=n)
     if self.exemplar is not None:
         self.exemplar.init_par_objs(n_parallel=n)
Beispiel #14
0
def test_complete_from_multiple_child_processes(capsys):
    logger.add(lambda _: None, enqueue=True, catch=False)
    num = 100

    barrier = multiprocessing.Barrier(num)

    def worker(barrier):
        barrier.wait()
        logger.complete()

    processes = []

    for _ in range(num):
        process = multiprocessing.Process(target=worker, args=(barrier, ))
        process.start()
        processes.append(process)

    for process in processes:
        process.join(5)
        assert process.exitcode == 0

    out, err = capsys.readouterr()
    assert out == err == ""
Beispiel #15
0
    def benchmark(self, settings, group_id=ABTestGroup.CONTROL):
        self.set_sim_settings(settings)
        nprocs = settings["num_processes"]
        # set it anyway, but only be used in AB_TEST mode
        self._group_id = group_id

        barrier = multiprocessing.Barrier(nprocs)
        with multiprocessing.Pool(nprocs,
                                  initializer=self._pool_init,
                                  initargs=(barrier, )) as pool:
            perfs = pool.map(self._bench_target, range(nprocs))

        res = {k: [] for k in perfs[0].keys()}
        for p in perfs:
            for k, v in p.items():
                res[k] += [v]

        return dict(
            frame_time=sum(res["frame_time"]),
            fps=sum(res["fps"]),
            total_time=sum(res["total_time"]) / nprocs,
            avg_sim_step_time=sum(res["avg_sim_step_time"]) / nprocs,
        )
Beispiel #16
0
    def test_read_multi_processes(self):
        barrier = multiprocessing.Barrier(2)
        with local.open_zip(os.path.abspath(self.zip_file_path)) as z:
            with z.open(self.testfile_name) as f:
                f.read()

            def func():
                # accessing the shared container isn't supported in v2
                with self.assertRaises(RuntimeError):
                    with z.open(self.testfile_name) as f:
                        barrier.wait()
                        f.read()

            p1 = multiprocessing.Process(target=func)
            p2 = multiprocessing.Process(target=func)
            p1.start()
            p2.start()

            p1.join(timeout=1)
            p2.join(timeout=1)

            self.assertEqual(p1.exitcode, 0)
            self.assertEqual(p2.exitcode, 0)
Beispiel #17
0
def main():

    barrier = mp.Barrier(4)  # 4 is the number of processes to wait

    # Create 3 processes, pass a "process_id" for each thread
    processes = []
    processes.append(
        mp.Process(target=process_function, args=(1, barrier, 1, 1000000)))
    processes.append(
        mp.Process(target=process_function,
                   args=(2, barrier, 1000000, 2000000)))
    processes.append(
        mp.Process(target=process_function,
                   args=(3, barrier, 2000000, 3000000)))
    processes.append(
        mp.Process(target=process_function,
                   args=(4, barrier, 3000000, 4000000)))

    for p in processes:
        p.start()

    for p in processes:
        p.join()
Beispiel #18
0
    def test_read_multi_processes(self):
        barrier = multiprocessing.Barrier(2)
        with self.fs_handler.open_as_container(
                os.path.abspath(self.zip_file_path)) as handler:
            with handler.open(self.testfile_name) as f:
                f.read()

            def func():
                # accessing the shared container
                with handler.open(self.testfile_name) as f:
                    barrier.wait()
                    f.read()

            p1 = multiprocessing.Process(target=func)
            p2 = multiprocessing.Process(target=func)
            p1.start()
            p2.start()

            p1.join(timeout=1)
            p2.join(timeout=1)

            self.assertEqual(p1.exitcode, 0)
            self.assertEqual(p2.exitcode, 0)
Beispiel #19
0
def test_server_client():
    prepare_dist()
    g = create_random_graph(10000)

    # Partition the graph
    num_parts = 1
    graph_name = 'dist_graph_test_2'
    g.ndata['features'] = F.unsqueeze(F.arange(0, g.number_of_nodes()), 1)
    g.edata['features'] = F.unsqueeze(F.arange(0, g.number_of_edges()), 1)
    partition_graph(g, graph_name, num_parts, '/tmp/dist_graph')

    # let's just test on one partition for now.
    # We cannot run multiple servers and clients on the same machine.
    barrier = mp.Barrier(2)
    serv_ps = []
    ctx = mp.get_context('spawn')
    for serv_id in range(1):
        p = ctx.Process(target=run_server, args=(graph_name, serv_id, 1, barrier))
        serv_ps.append(p)
        p.start()

    cli_ps = []
    for cli_id in range(1):
        print('start client', cli_id)
        p = ctx.Process(target=run_client, args=(graph_name, barrier, g.number_of_nodes(),
                                             g.number_of_edges()))
        p.start()
        cli_ps.append(p)

    for p in cli_ps:
        p.join()

    for p in serv_ps:
        p.join()

    print('clients have terminated')
Beispiel #20
0
def test_recreate():

    with tempfile.TemporaryDirectory() as d:
        zipfilename = os.path.join(d, "test.zip")
        z = ZipForTest(zipfilename)
        barrier = mp.Barrier(1)

        with lazify(lambda: from_url(zipfilename)) as f:
            with f.open('file', 'rb') as fp:
                content = fp.read()
                assert content
                assert z.content('file') == content

            def func():
                # accessing the shared container
                with f.open('file', 'rb') as fp:
                    barrier.wait()
                    assert content == fp.read()

            p = mp.Process(target=func)
            p.start()

            p.join(timeout=1)
            assert p.exitcode == 0
Beispiel #21
0
def run(args, logger):
    init_time_start = time.time()
    # load dataset and samplers
    dataset = get_dataset(args.data_path, args.dataset, args.format,
                          args.data_files)

    if args.neg_sample_size_eval < 0:
        args.neg_sample_size_eval = dataset.n_entities
    args.batch_size = get_compatible_batch_size(args.batch_size,
                                                args.neg_sample_size)
    args.batch_size_eval = get_compatible_batch_size(args.batch_size_eval,
                                                     args.neg_sample_size_eval)

    args.eval_filter = not args.no_eval_filter
    if args.neg_deg_sample_eval:
        assert not args.eval_filter, "if negative sampling based on degree, we can't filter positive edges."

    train_data = TrainDataset(dataset, args, ranks=args.num_proc)
    # if there is no cross partition relaiton, we fall back to strict_rel_part
    args.strict_rel_part = args.mix_cpu_gpu and (train_data.cross_part
                                                 == False)
    args.soft_rel_part = args.mix_cpu_gpu and args.soft_rel_part and train_data.cross_part
    args.num_workers = 8  # fix num_worker to 8

    if args.num_proc > 1:
        train_samplers = []
        for i in range(args.num_proc):
            train_sampler_head = train_data.create_sampler(
                args.batch_size,
                args.neg_sample_size,
                args.neg_sample_size,
                mode='head',
                num_workers=args.num_workers,
                shuffle=True,
                exclude_positive=False,
                rank=i)
            train_sampler_tail = train_data.create_sampler(
                args.batch_size,
                args.neg_sample_size,
                args.neg_sample_size,
                mode='tail',
                num_workers=args.num_workers,
                shuffle=True,
                exclude_positive=False,
                rank=i)
            train_samplers.append(
                NewBidirectionalOneShotIterator(train_sampler_head,
                                                train_sampler_tail,
                                                args.neg_sample_size,
                                                args.neg_sample_size, True,
                                                dataset.n_entities))

        train_sampler = NewBidirectionalOneShotIterator(
            train_sampler_head, train_sampler_tail, args.neg_sample_size,
            args.neg_sample_size, True, dataset.n_entities)
    else:  # This is used for debug
        train_sampler_head = train_data.create_sampler(
            args.batch_size,
            args.neg_sample_size,
            args.neg_sample_size,
            mode='head',
            num_workers=args.num_workers,
            shuffle=True,
            exclude_positive=False)
        train_sampler_tail = train_data.create_sampler(
            args.batch_size,
            args.neg_sample_size,
            args.neg_sample_size,
            mode='tail',
            num_workers=args.num_workers,
            shuffle=True,
            exclude_positive=False)
        train_sampler = NewBidirectionalOneShotIterator(
            train_sampler_head, train_sampler_tail, args.neg_sample_size,
            args.neg_sample_size, True, dataset.n_entities)

    if args.valid or args.test:
        if len(args.gpu) > 1:
            args.num_test_proc = args.num_proc if args.num_proc < len(
                args.gpu) else len(args.gpu)
        else:
            args.num_test_proc = args.num_proc
        eval_dataset = EvalDataset(dataset, args)

    if args.valid:
        if args.num_proc > 1:
            valid_sampler_heads = []
            valid_sampler_tails = []
            for i in range(args.num_proc):
                valid_sampler_head = eval_dataset.create_sampler(
                    'valid',
                    args.batch_size_eval,
                    args.neg_sample_size_eval,
                    args.neg_sample_size_eval,
                    args.eval_filter,
                    mode='chunk-head',
                    num_workers=args.num_workers,
                    rank=i,
                    ranks=args.num_proc)
                valid_sampler_tail = eval_dataset.create_sampler(
                    'valid',
                    args.batch_size_eval,
                    args.neg_sample_size_eval,
                    args.neg_sample_size_eval,
                    args.eval_filter,
                    mode='chunk-tail',
                    num_workers=args.num_workers,
                    rank=i,
                    ranks=args.num_proc)
                valid_sampler_heads.append(valid_sampler_head)
                valid_sampler_tails.append(valid_sampler_tail)
        else:  # This is used for debug
            valid_sampler_head = eval_dataset.create_sampler(
                'valid',
                args.batch_size_eval,
                args.neg_sample_size_eval,
                args.neg_sample_size_eval,
                args.eval_filter,
                mode='chunk-head',
                num_workers=args.num_workers,
                rank=0,
                ranks=1)
            valid_sampler_tail = eval_dataset.create_sampler(
                'valid',
                args.batch_size_eval,
                args.neg_sample_size_eval,
                args.neg_sample_size_eval,
                args.eval_filter,
                mode='chunk-tail',
                num_workers=args.num_workers,
                rank=0,
                ranks=1)
    if args.test:
        if args.num_test_proc > 1:
            test_sampler_tails = []
            test_sampler_heads = []
            for i in range(args.num_test_proc):
                test_sampler_head = eval_dataset.create_sampler(
                    'test',
                    args.batch_size_eval,
                    args.neg_sample_size_eval,
                    args.neg_sample_size_eval,
                    args.eval_filter,
                    mode='chunk-head',
                    num_workers=args.num_workers,
                    rank=i,
                    ranks=args.num_test_proc)
                test_sampler_tail = eval_dataset.create_sampler(
                    'test',
                    args.batch_size_eval,
                    args.neg_sample_size_eval,
                    args.neg_sample_size_eval,
                    args.eval_filter,
                    mode='chunk-tail',
                    num_workers=args.num_workers,
                    rank=i,
                    ranks=args.num_test_proc)
                test_sampler_heads.append(test_sampler_head)
                test_sampler_tails.append(test_sampler_tail)
        else:
            test_sampler_head = eval_dataset.create_sampler(
                'test',
                args.batch_size_eval,
                args.neg_sample_size_eval,
                args.neg_sample_size_eval,
                args.eval_filter,
                mode='chunk-head',
                num_workers=args.num_workers,
                rank=0,
                ranks=1)
            test_sampler_tail = eval_dataset.create_sampler(
                'test',
                args.batch_size_eval,
                args.neg_sample_size_eval,
                args.neg_sample_size_eval,
                args.eval_filter,
                mode='chunk-tail',
                num_workers=args.num_workers,
                rank=0,
                ranks=1)

    # load model
    model = load_model(logger, args, dataset.n_entities, dataset.n_relations)
    if args.num_proc > 1 or args.async_update:
        model.share_memory()

    # We need to free all memory referenced by dataset.
    eval_dataset = None
    dataset = None

    print('Total initialize time {:.3f} seconds'.format(time.time() -
                                                        init_time_start))

    # train
    start = time.time()
    rel_parts = train_data.rel_parts if args.strict_rel_part or args.soft_rel_part else None
    cross_rels = train_data.cross_rels if args.soft_rel_part else None
    if args.num_proc > 1:
        procs = []
        barrier = mp.Barrier(args.num_proc)
        for i in range(args.num_proc):
            valid_sampler = [valid_sampler_heads[i], valid_sampler_tails[i]
                             ] if args.valid else None
            proc = mp.Process(target=train_mp,
                              args=(args, model, train_samplers[i],
                                    valid_sampler, i, rel_parts, cross_rels,
                                    barrier))
            procs.append(proc)
            proc.start()
        for proc in procs:
            proc.join()
    else:
        valid_samplers = [valid_sampler_head, valid_sampler_tail
                          ] if args.valid else None
        train(args, model, train_sampler, valid_samplers, rel_parts=rel_parts)

    print('training takes {} seconds'.format(time.time() - start))

    if args.save_emb is not None:
        if not os.path.exists(args.save_emb):
            os.mkdir(args.save_emb)
        model.save_emb(args.save_emb, args.dataset)

        # We need to save the model configurations as well.
        conf_file = os.path.join(args.save_emb, 'config.json')
        with open(conf_file, 'w') as outfile:
            json.dump(
                {
                    'dataset': args.dataset,
                    'model': args.model_name,
                    'emb_size': args.hidden_dim,
                    'max_train_step': args.max_step,
                    'batch_size': args.batch_size,
                    'neg_sample_size': args.neg_sample_size,
                    'lr': args.lr,
                    'gamma': args.gamma,
                    'double_ent': args.double_ent,
                    'double_rel': args.double_rel,
                    'neg_adversarial_sampling': args.neg_adversarial_sampling,
                    'adversarial_temperature': args.adversarial_temperature,
                    'regularization_coef': args.regularization_coef,
                    'regularization_norm': args.regularization_norm
                },
                outfile,
                indent=4)

    # test
    if args.test:
        start = time.time()
        if args.num_test_proc > 1:
            queue = mp.Queue(args.num_test_proc)
            procs = []
            for i in range(args.num_test_proc):
                proc = mp.Process(target=test_mp,
                                  args=(args, model, [
                                      test_sampler_heads[i],
                                      test_sampler_tails[i]
                                  ], i, 'Test', queue))
                procs.append(proc)
                proc.start()

            total_metrics = {}
            metrics = {}
            logs = []
            for i in range(args.num_test_proc):
                log = queue.get()
                logs = logs + log

            for metric in logs[0].keys():
                metrics[metric] = sum([log[metric]
                                       for log in logs]) / len(logs)
            for k, v in metrics.items():
                print('Test average {} : {}'.format(k, v))

            for proc in procs:
                proc.join()
        else:
            test(args, model, [test_sampler_head, test_sampler_tail])
        print('testing takes {:.3f} seconds'.format(time.time() - start))
def run(args):
    startTime = time.time()

    print("Main: Main process.")
    
    # Generate name lists.
    poseFileExpNum, nameList0, nameList1, poseParams = generate_name_lists(
        args.infile0, args.flow, args.test_n )

    # Prepare the filenames.
    files0, files1 = prepare_filenames(
        nameList0, nameList1, 
        args.idx, args.skip, args.outdir)

    nFiles = len(files0)

    # Get the Azure container client.
    cClient = get_azure_container_client( 
        args.download_env, args.download_c )

    # Check if we will perform uploading at the same time.
    cClientUpload = None
    if ( args.upload ):
        cClientUpload = get_azure_container_client( 
            args.upload_env, args.upload_c )

    # Prepare for the job queue.
    jobQ    = multiprocessing.JoinableQueue()
    resultQ = multiprocessing.Queue()

    # Barriers.
    barrierDownload = multiprocessing.Barrier(args.np)
    barrierUpload   = multiprocessing.Barrier(args.np)

    nJobs, bJobs = ceil_integer( nFiles, args.np ) # bJobs is the number of padding jobs for the barriers.
    print("Main: nJobs: %d, bJobs: %d. " % ( nJobs, bJobs ))

    # Create all the worker processes.
    processes = []
    pipes     = []
    print("Main: Create %d processes." % (args.np))
    for i in range(int(args.np)):
        [conn1, conn2] = multiprocessing.Pipe(False)
        processes.append( 
            multiprocessing.Process( 
                target=worker, 
                args=["P%03d" % (i), jobQ, conn1, resultQ, 
                    barrierDownload, barrierUpload, 
                    cClient, args.upload, cClientUpload, 
                    args.npy_force_convert, not args.disable_child_silent] ) )
        pipes.append(conn2)

    for p in processes:
        p.start()

    print("Main: All processes started.")

    # Submit all actual jobs.
    for i in range(nFiles):
        jobQ.put([ files0[i], files1[i], args.outdir, "null" ])

    # Submit the padding jobs.
    for i in range(bJobs):
        jobQ.put( [ "null", "null", "null", "Barrier" ] )

    # Note that nJobs = nFiles + bJobs.

    print("Main: All jobs submitted.")

    # Main process starts to handle the messages in the result queue.
    resultList = []
    resultCount = 0
    while(resultCount < nJobs):
        try:
            r = resultQ.get(block=True, timeout=1)
            resultList.append(r)
            resultCount += 1
        except Empty as exp:
            if ( resultCount == nJobs ):
                print("Main: Last element of the result queue is reached.")
                break
            else:
                print("%sMain: Wait on rq-index %d. " % (args.main_prefix, resultCount))
                time.sleep(0.5)

    # Break all barriers.
    barrierUpload.abort()
    barrierDownload.abort()

    # Main process wait untill all worker. This should be always joined with out long blocking.
    jobQ.join()

    print("Main: Queue joined.")

    # Send commands to terminate all worker processes.
    for p in pipes:
        p.send("exit")

    print("Main: Exit command sent to all processes.")

    for p in processes:
        p.join()

    print("Main: All processes joined.")

    print("Main: Starts process the result.")
    resFn = "%s/RQ_%s.txt" % ( args.outdir, args.zipname )
    main_write_result_queue_2_file( resFn, resultList )

    # Get the the pose files from the server.
    print("Main: Get pose files...........")
    poses, totalPosesSize = get_pose_objects(cClient, poseParams)

    # Write the pose files to local filesystem.
    write_poses(poses, args.outdir)

    if ( poseFileExpNum != totalPosesSize ):
        raise Exception("poseFileExpNumiles != totalPosesSize. poseFileExpNum = %d, totalPosesSize = %d. " % 
            ( poseFileExpNum, totalPosesSize ) )

    # Upload the pose files.
    if ( args.upload ):
        print("Main: Uploading the pose files.")
        upload_pose_files(cClientUpload, poses, args.outdir)

    if ( args.zip ):
        # Zip.
        print("Main: Start zipping...")
        zipFn = "%s/%s.zip" % (args.outdir, args.zipname)
        test_directory_by_filename(zipFn)
        z = zipfile.ZipFile(zipFn, "w")
        for f in files1:
            z.write( "%s/%s" % (args.outdir, f) )
        
        # Pose files.
        for pose in poses:
            z.write( "%s/%s" % ( args.outdir, pose["targetLeftFile"] ) )
            z.write( "%s/%s" % ( args.outdir, pose["targetRightFile"] ) )

        z.close()

        # Uploading the zip file.
        if ( args.upload ):
            bZipFn = "%s.zip" % (args.zipname) # Blob name.
            print("Main: Uploading %s. " % (bZipFn))
            upload_file_2_blob( cClientUpload, bZipFn, zipFn, args.upload_zip_overwrite )

        if ( args.remove_zip ):
            print("Main: Removing the zip file... ")
            if ( os.path.isfile(zipFn) ):
                os.remove(zipFn)
            else:
                print("Main: %s dose not exist. " % (zipFn))

    if ( args.remove_temporary_files ):
        print("Main: Removing the temporary files... ")

        # Remove the pose files.
        for pose in poses:
            os.remove( "%s/%s" % ( args.outdir, pose["targetLeftFile"] ) )
            os.remove( "%s/%s" % ( args.outdir, pose["targetRightFile"] ) )

        # Delete the temporary files.
        prefix = get_leading_directory(files1[0])
        removeDir = "%s/%s" % ( args.outdir, prefix )
        removeDir = removeDir.strip()

        # Check if removeDir is /.
        if ( "/" == removeDir ):
            raise Exception("Remove /. ")

        shutil.rmtree(removeDir)

    endTime = time.time()

    print("Main: Download-upload job done. Total time is %ds." % (endTime - startTime))
    for i in range(PARALLEL):
        SERVER_DICT["worker"].append("localhost:%d" % (start_port + 1 + i))

    Cluster = tf.train.ClusterSpec(SERVER_DICT)

    now = datetime.now()
    model_path = "./model/" + now.strftime(
        "%Y%m%d-%H%M%S") + "_" + FLAGS.type + "/"
    if not os.path.exists(model_path):
        os.makedirs(model_path)
    LOG = "./logs/" + now.strftime("%Y%m%d-%H%M%S") + "_" + FLAGS.type + "/"

    UPDATE_GAME_NUM = NUM_FOR_UPDATE
    per_update_num = np.ceil(UPDATE_GAME_NUM / PARALLEL)

    Synchronizer = mp.Barrier(PARALLEL + 1)
    # Run parallel process
    procs = []
    for index in range(PARALLEL):
        p = mp.Process(name="Worker_%d" % index,
                       target=Worker,
                       args=(index, per_update_num, Synchronizer, Cluster,
                             model_path))
        procs.append(p)
        p.daemon = True
        p.start()
        time.sleep(1)

    Parameter_Server(Synchronizer, Cluster, LOG, model_path, procs)

    # for p in procs:
Beispiel #24
0
def main():
    args = ArgParser().parse_args()
    prepare_save_path(args)

    init_time_start = time.time()
    # load dataset and samplers
    dataset = get_dataset(args.data_path, args.dataset, args.format,
                          args.delimiter, args.data_files,
                          args.has_edge_importance)

    if args.neg_sample_size_eval < 0:
        args.neg_sample_size_eval = dataset.n_entities
    args.batch_size = get_compatible_batch_size(args.batch_size,
                                                args.neg_sample_size)
    args.batch_size_eval = get_compatible_batch_size(args.batch_size_eval,
                                                     args.neg_sample_size_eval)
    # We should turn on mix CPU-GPU training for multi-GPU training.
    if len(args.gpu) > 1:
        args.mix_cpu_gpu = True
        if args.num_proc < len(args.gpu):
            args.num_proc = len(args.gpu)
    # We need to ensure that the number of processes should match the number of GPUs.
    if len(args.gpu) > 1 and args.num_proc > 1:
        assert args.num_proc % len(args.gpu) == 0, \
                'The number of processes needs to be divisible by the number of GPUs'
    # For multiprocessing training, we need to ensure that training processes are synchronized periodically.
    if args.num_proc > 1:
        args.force_sync_interval = 1000

    args.eval_filter = not args.no_eval_filter
    if args.neg_deg_sample_eval:
        assert not args.eval_filter, "if negative sampling based on degree, we can't filter positive edges."

    args.soft_rel_part = args.mix_cpu_gpu and args.rel_part
    train_data = TrainDataset(dataset,
                              args,
                              ranks=args.num_proc,
                              has_importance=args.has_edge_importance)
    # if there is no cross partition relaiton, we fall back to strict_rel_part
    args.strict_rel_part = args.mix_cpu_gpu and (train_data.cross_part
                                                 == False)
    args.num_workers = 8  # fix num_worker to 8

    if args.num_proc > 1:
        train_samplers = []
        for i in range(args.num_proc):
            # for each GPU, allocate num_proc // num_GPU processes
            train_sampler_head = train_data.create_sampler(
                args.batch_size,
                args.neg_sample_size,
                args.neg_sample_size,
                mode='head',
                num_workers=args.num_workers,
                shuffle=True,
                exclude_positive=False,
                rank=i)
            train_sampler_tail = train_data.create_sampler(
                args.batch_size,
                args.neg_sample_size,
                args.neg_sample_size,
                mode='tail',
                num_workers=args.num_workers,
                shuffle=True,
                exclude_positive=False,
                rank=i)
            train_samplers.append(
                NewBidirectionalOneShotIterator(train_sampler_head,
                                                train_sampler_tail,
                                                args.neg_sample_size,
                                                args.neg_sample_size, True,
                                                dataset.n_entities,
                                                args.has_edge_importance))

        train_sampler = NewBidirectionalOneShotIterator(
            train_sampler_head, train_sampler_tail, args.neg_sample_size,
            args.neg_sample_size, True, dataset.n_entities,
            args.has_edge_importance)
    else:  # This is used for debug
        train_sampler_head = train_data.create_sampler(
            args.batch_size,
            args.neg_sample_size,
            args.neg_sample_size,
            mode='head',
            num_workers=args.num_workers,
            shuffle=True,
            exclude_positive=False)
        train_sampler_tail = train_data.create_sampler(
            args.batch_size,
            args.neg_sample_size,
            args.neg_sample_size,
            mode='tail',
            num_workers=args.num_workers,
            shuffle=True,
            exclude_positive=False)
        train_sampler = NewBidirectionalOneShotIterator(
            train_sampler_head, train_sampler_tail, args.neg_sample_size,
            args.neg_sample_size, True, dataset.n_entities,
            args.has_edge_importance)

    if args.valid or args.test:
        if len(args.gpu) > 1:
            args.num_test_proc = args.num_proc if args.num_proc < len(
                args.gpu) else len(args.gpu)
        else:
            args.num_test_proc = args.num_proc
        if args.valid:
            assert dataset.valid is not None, 'validation set is not provided'
        if args.test:
            assert dataset.test is not None, 'test set is not provided'
        eval_dataset = EvalDataset(dataset, args)

    if args.valid:
        if args.num_proc > 1:
            valid_sampler_heads = []
            valid_sampler_tails = []
            for i in range(args.num_proc):
                valid_sampler_head = eval_dataset.create_sampler(
                    'valid',
                    args.batch_size_eval,
                    args.neg_sample_size_eval,
                    args.neg_sample_size_eval,
                    args.eval_filter,
                    mode='chunk-head',
                    num_workers=args.num_workers,
                    rank=i,
                    ranks=args.num_proc)
                valid_sampler_tail = eval_dataset.create_sampler(
                    'valid',
                    args.batch_size_eval,
                    args.neg_sample_size_eval,
                    args.neg_sample_size_eval,
                    args.eval_filter,
                    mode='chunk-tail',
                    num_workers=args.num_workers,
                    rank=i,
                    ranks=args.num_proc)
                valid_sampler_heads.append(valid_sampler_head)
                valid_sampler_tails.append(valid_sampler_tail)
        else:  # This is used for debug
            valid_sampler_head = eval_dataset.create_sampler(
                'valid',
                args.batch_size_eval,
                args.neg_sample_size_eval,
                args.neg_sample_size_eval,
                args.eval_filter,
                mode='chunk-head',
                num_workers=args.num_workers,
                rank=0,
                ranks=1)
            valid_sampler_tail = eval_dataset.create_sampler(
                'valid',
                args.batch_size_eval,
                args.neg_sample_size_eval,
                args.neg_sample_size_eval,
                args.eval_filter,
                mode='chunk-tail',
                num_workers=args.num_workers,
                rank=0,
                ranks=1)
    if args.test:
        if args.num_test_proc > 1:
            test_sampler_tails = []
            test_sampler_heads = []
            for i in range(args.num_test_proc):
                test_sampler_head = eval_dataset.create_sampler(
                    'test',
                    args.batch_size_eval,
                    args.neg_sample_size_eval,
                    args.neg_sample_size_eval,
                    args.eval_filter,
                    mode='chunk-head',
                    num_workers=args.num_workers,
                    rank=i,
                    ranks=args.num_test_proc)
                test_sampler_tail = eval_dataset.create_sampler(
                    'test',
                    args.batch_size_eval,
                    args.neg_sample_size_eval,
                    args.neg_sample_size_eval,
                    args.eval_filter,
                    mode='chunk-tail',
                    num_workers=args.num_workers,
                    rank=i,
                    ranks=args.num_test_proc)
                test_sampler_heads.append(test_sampler_head)
                test_sampler_tails.append(test_sampler_tail)
        else:
            test_sampler_head = eval_dataset.create_sampler(
                'test',
                args.batch_size_eval,
                args.neg_sample_size_eval,
                args.neg_sample_size_eval,
                args.eval_filter,
                mode='chunk-head',
                num_workers=args.num_workers,
                rank=0,
                ranks=1)
            test_sampler_tail = eval_dataset.create_sampler(
                'test',
                args.batch_size_eval,
                args.neg_sample_size_eval,
                args.neg_sample_size_eval,
                args.eval_filter,
                mode='chunk-tail',
                num_workers=args.num_workers,
                rank=0,
                ranks=1)

    # load model
    model = load_model(args, dataset.n_entities, dataset.n_relations)
    if args.num_proc > 1 or args.async_update:
        model.share_memory()

    emap_file = dataset.emap_fname
    rmap_file = dataset.rmap_fname
    # We need to free all memory referenced by dataset.
    eval_dataset = None
    dataset = None

    print('Total initialize time {:.3f} seconds'.format(time.time() -
                                                        init_time_start))

    # train
    start = time.time()
    rel_parts = train_data.rel_parts if args.strict_rel_part or args.soft_rel_part else None
    cross_rels = train_data.cross_rels if args.soft_rel_part else None
    if args.num_proc > 1:
        procs = []
        barrier = mp.Barrier(args.num_proc)
        for i in range(args.num_proc):
            valid_sampler = [valid_sampler_heads[i], valid_sampler_tails[i]
                             ] if args.valid else None
            proc = mp.Process(target=train_mp,
                              args=(args, model, train_samplers[i],
                                    valid_sampler, i, rel_parts, cross_rels,
                                    barrier))
            procs.append(proc)
            proc.start()
        for proc in procs:
            proc.join()
    else:
        valid_samplers = [valid_sampler_head, valid_sampler_tail
                          ] if args.valid else None
        train(args, model, train_sampler, valid_samplers, rel_parts=rel_parts)

    print('training takes {} seconds'.format(time.time() - start))

    if not args.no_save_emb:
        save_model(args, model, emap_file, rmap_file)

    # test
    if args.test:
        start = time.time()
        if args.num_test_proc > 1:
            queue = mp.Queue(args.num_test_proc)
            procs = []
            for i in range(args.num_test_proc):
                proc = mp.Process(target=test_mp,
                                  args=(args, model, [
                                      test_sampler_heads[i],
                                      test_sampler_tails[i]
                                  ], i, 'Test', queue))
                procs.append(proc)
                proc.start()

            total_metrics = {}
            metrics = {}
            logs = []
            for i in range(args.num_test_proc):
                log = queue.get()
                logs = logs + log

            for metric in logs[0].keys():
                metrics[metric] = sum([log[metric]
                                       for log in logs]) / len(logs)
            print("-------------- Test result --------------")
            for k, v in metrics.items():
                print('Test average {} : {}'.format(k, v))
            print("-----------------------------------------")

            for proc in procs:
                proc.join()
        else:
            test(args, model, [test_sampler_head, test_sampler_tail])
        print('testing takes {:.3f} seconds'.format(time.time() - start))
Beispiel #25
0
    n = 4  # Number of processes in the group

    # Check for command line parameters m, n.
    if len(sys.argv) > 2:
        m = int(sys.argv[1])
        n = int(sys.argv[2])

    # Flush communication channel
    chan = lab_channel.Channel()
    chan.channel.flushall()

    # we need to spawn processes for support of windows
    mp.set_start_method('spawn')

    # create barriers to synchonize bootstrapping
    bar1 = mp.Barrier(n)  # Wait for channel population to complete
    bar2 = mp.Barrier(n)  # Wait for process-group init to complete

    # start n competing peers in separate processes
    children = []
    for i in range(n):
        peer_proc = mp.Process(target=create_and_run,
                               name="Peer-" + str(i),
                               args=(m, Process, bar1, bar2))
        children.append(peer_proc)
        peer_proc.start()

    # terminate a random process after some time (10 seconds)
    time.sleep(10)
    proc_id = random.randint(0, len(children) - 1)
    proc_to_crash = children[proc_id]
Beispiel #26
0
                           spi=SPI.SpiDev(port=0,
                                          device=0,
                                          max_speed_hz=64 * 1000000))
        R_eye = TFT.ST7735(rst=6,
                           dc=12,
                           x_offset=2,
                           y_offset=3,
                           rotate=180,
                           spi=SPI.SpiDev(port=0,
                                          device=1,
                                          max_speed_hz=64 * 1000000))
        #process간 통신을 위한 파이프
        Lpup, Lsup = mp.Pipe()
        Rpup, Rsup = mp.Pipe()

        barrier1 = mp.Barrier(2, timeout=2)
        barrier2 = mp.Barrier(3, timeout=2)

        left = mp.Process(target=lcd, args=(L_eye, Lsup))
        right = mp.Process(target=lcd, args=(R_eye, Rsup))

        left.start()
        right.start()
        #입력받아 정보를 전달할 메인 프로세스
        print("change fucntion on")
        while True:
            for n, name in enumerate(imgl):
                print(n, name)
            try:
                i = int(input())
            except:
Beispiel #27
0
    def floyd_warshall_predecessor_and_distance(self):
        """

        Parallel Floyd Warshall's APSP algorithm. The predecessors
        and distance matrices are evaluated, together with the nested
        dictionaries for shortest-path, length of the paths and
        efficiency attributes.

        .. note:: Edges weight is taken into account in the distance matrix.
            Edge weight attributes must be numerical. Distances are calculated
            as sums of weighted edges traversed.

        :return: nested dictionary with key corresponding to
            source, while as value a dictionary keyed by target and valued
            by the source-target shortest path;
            nested dictionary with key corresponding to
            source, while as value a dictionary keyed by target and valued
            by the source-target shortest path length.
        :rtype: dict, dict
        """

        dist, pred = self.floyd_warshall_initialization()

        shared_d = mp.sharedctypes.RawArray(ctypes.c_double, dist.shape[0]**2)
        dist_shared = np.frombuffer(shared_d, 'float64').reshape(dist.shape)
        dist_shared[:] = dist

        shared_p = mp.sharedctypes.RawArray(ctypes.c_double,pred.shape[0]**2)
        pred_shared = np.frombuffer(shared_p, 'float64').reshape(pred.shape)
        pred_shared[:] = pred

        n = len(self.nodes())
        chunk = [(0, int(n / self.num))]
        node_chunks = chunk_it(list(self.nodes()), self.num)

        for i in range(1, self.num):
            chunk.append((chunk[i - 1][1],
                          chunk[i - 1][1] + len(node_chunks[i])))

        barrier = mp.Barrier(self.num)
        processes = [
            mp.Process( target=self.floyd_warshall_kernel,
            args=(dist_shared, pred_shared, chunk[p][0], chunk[p][1], barrier))
            for p in range(self.num) ]

        for proc in processes:
            proc.start()

        for proc in processes:
            proc.join()

        all_shortest_path = self.manager.dict()

        processes = [
            mp.Process( target=self.measure_iteration,
            args=(list(map(self.ids_reversed.get, node_chunks[p])),
                all_shortest_path, self.construct_path_kernel, pred_shared) )
            for p in range(self.num) ]

        for proc in processes:
            proc.start()

        for proc in processes:
            proc.join()

        nonempty_shortest_path = {}
        for k in all_shortest_path.keys():
            nonempty_shortest_path[k] = {
                key: value
                for key, value in all_shortest_path[k].items() if value
            }

        shortest_path_length = {}
        for i in list(self.H):

            shortest_path_length[self.ids[i]] = {}

            for key, value in nonempty_shortest_path[self.ids[i]].items():
                length_path = dist_shared[self.ids_reversed[value[0]],
                                          self.ids_reversed[value[-1]]]
                shortest_path_length[self.ids[i]][key] = length_path

        return nonempty_shortest_path, shortest_path_length
Beispiel #28
0
    try:
        mqmarket = sysv_ipc.MessageQueue(keyMarket)
    except sysv_ipc.ExistentialError:
        print("Cannot connect to MQ", keyMarket)
        sys.exit(1)

    try:
        mqhome = sysv_ipc.MessageQueue(keyHome)
    except sysv_ipc.ExistentialError:
        print("Cannot connect to MQ", keyHome)
        sys.exit(1)

    # Home number recuperation
    nMaison = int(sys.argv[1])
    # Synchro barrier creation
    b = multiprocessing.Barrier(nMaison)
    # Home PID table
    pidProcesses = []
    # Lock and shared variable nbEchange to count the donations
    lock = multiprocessing.Lock()
    nbEchange = 0
    homeExchange = multiprocessing.Value('d', nbEchange)

    # Launch of houses
    for x in range(nMaison):
        InitProd = random.randrange(100, 1000, 100)
        ConsoRate = random.randrange(100, 1000, 100)
        # 0 to Always give away, 1 to Always sell, 2 to Sell if no takers
        SalePol = random.randrange(0, 3, 1)
        p = multiprocessing.Process(target=maison,
                                    args=(InitProd, ConsoRate, SalePol, mqhome,
Beispiel #29
0
    def run(self):
        modules = len(self.script_info)
        barrierWait = modules + 1
        self.flags = {}
        self.flags["simulation_active"] = mp.Event()
        self.flags["simulation_next"] = mp.Barrier(barrierWait)
        self.flags["simulation_stop"] = mp.Event()
        self.flags["Module_done"] = mp.Barrier(barrierWait)
        self.flags["simulation_result"] = mp.Barrier(barrierWait)

        self.flags["simulation_active"].set()
        self.flags["simulation_stop"].clear()

        self.simData["scriptNames"] = self.script_info

        self.create_sim_data()

        connectivityMatrix = self.connectivity_matrix
        process = []

        info("Creating processes...")
        # Create module processes
        for idx, script in enumerate(self.scripts):
            p = script.Module(name=self.script_info[idx][0], \
                              args=(self.flags, self.simData, connectivityMatrix,))
            process.append(p)
        info("Processes created!")

        # Start module Processes
        info("Starting processes...")
        for p in process:
            p.start()
        info("Processes started!")

        self.flags["simulation_next"].wait()
        t1 = time.time()
        ## Simulation Loop
        while True:
            #Wait for all modules to complete
            self.flags["Module_done"].wait()

            ## Process Simulation Results
            t2 = time.time()
            if t2 - t1 > 0:
                ips = 1.0 / (t2 - t1)
                self.simData[
                    "/simulation/iterations_per_second"].value = 1.0 / (t2 -
                                                                        t1)
                debug("ITERATION RATE {:.3f}s delay,  {:.2f} IPS".format(
                    t2 - t1, ips))

            self.keypress()

            if self.flags["simulation_stop"].is_set():
                self.flags["simulation_active"].clear()

            #Wait for all results to be processed
            self.flags["simulation_result"].wait()
            if not self.flags["simulation_active"].is_set():
                break
## Prepare Next Simulation Step

# Increase iteration step
            self.simData["/simulation/iteration"].value += 1

            # Switch simulation buffer
            # Note: The simulation buffer gives the buffer where outputs
            #       will be stored. This in the beginning of a simulation iteration
            #       the inputs uses the previous buffer value.
            if self.simData['/simulation/buffer_current'].value == 0:
                self.simData['/simulation/buffer_current'].value = 1
            else:
                self.simData['/simulation/buffer_current'].value = 0

            # Clear the keypress placed as input to the simulation iteration that just finished.
            self.simData["/control/outputs/keypress"] \
                        [int(self.simData['/simulation/buffer_current'].value)].value = 0

            # The longestDelay are introduced to help interactive modules to re-execute and
            # prevent "lockup" of rendering processes such as cv2.waitKey. waitkey can now have
            # a shorter wait time allowing maximum executino of the full simulation but
            # continue rendering the windows.
            # The longest delay are used to estimate re-execution of modules and need to be reset
            # after every iteration to prevent iteration time to monotonically increase.
            self.simData["/simulation/longestDelay"].value = 0.0

            # Wait for all modules to syncronise and update simulation variables
            self.flags["simulation_next"].wait()
            ## Start Simulation Step
            info("Iteration {}".format(
                self.simData["/simulation/iteration"].value))
            t1 = time.time()
            if self.flags["simulation_stop"].is_set():
                break
            ## While loop END

        self.flags["simulation_active"].clear()
        self.flags["simulation_next"].abort()
        self.flags["Module_done"].abort()
        self.flags["simulation_result"].abort()

        info("Waiting for Modules to exit!")

        # Terminate all processes
        for p in process:
            p.join()
        info("Simulation Complete.")
Beispiel #30
0
from concurrent.futures import ProcessPoolExecutor

from time import time

from life_game_core import (
    init_generation, next_generation, next_generation_pool, Pool)


size = (100, 100)

running = True

pixels, old_shared, new_shared = init_generation(size)
# pixels = init_generation(size, shared=False)

cores = 4
generation = 1
barrier = mp.Barrier(cores+1)
kwargs = {'barrier': barrier}
with Pool(cores, initargs=(old_shared, new_shared, barrier)) as executor:
# with ProcessPoolExecutor(cores) as executor:
    s = time()
    while running:
        pixels = next_generation_pool(pixels, executor, cores, **kwargs)
        # pixels = next_generation(pixels, executor, cores, **kwargs)
        sys.stdout.flush()
        sys.stdout.write('FPS: {:.3f} Generation: {}\r'.format(
            1 / (time() - s), generation))
        generation += 1
        s = time()