Ejemplo n.º 1
0
def test_dumps_local_func():
    pipe_0, pipe_1 = Pipe(duplex=True)
    ctx = get_context("fork")
    process_0 = Process(target=subproc_test_dumps_local_func,
                        args=(pipe_0, ),
                        ctx=ctx)
    process_0.start()
    while process_0.is_alive():
        process_0.watch()
    assert t.all(loads(pipe_1.recv())() == t.zeros([10]))
    process_0.join()
Ejemplo n.º 2
0
 def _spawn(self):
     ctx = mp.get_context("spawn")
     processes = [
         Process(
             target=self.new_process,
             ctx=ctx,
             args=(i, self.lightning_module.trainer, self.mp_queue),
         )
         for i in range(self.mp_spawn_kwargs["nprocs"])
     ]
     for p in processes:
         p.start()
     while True:
         should_exit = False
         for p in processes:
             try:
                 p.watch()
             except Exception:
                 traceback.print_exc()
                 should_exit = True
         if should_exit:
             for p in processes:
                 p.terminate()
                 p.join()
             raise RuntimeError("One or more exceptions raised in sub-processes.")
         elif not all([p.is_alive() for p in processes]):
             break
         sleep(0.1)
     for p in processes:
         p.join()
Ejemplo n.º 3
0
def processes():
    ctx = mp.get_context("spawn")
    pipes = [mp.Pipe(duplex=True) for _ in [0, 1, 2]]
    man = ctx.Manager()
    queue = man.Queue()
    processes = [Process(target=process_main,
                         args=(pipes[i][0], queue), ctx=ctx)
                 for i in [0, 1, 2]]

    handler = logging.StreamHandler()
    handler.setFormatter(logging.Formatter(
        "[%(asctime)s] <%(levelname)s>:%(name)s:%(message)s"))

    ql = QueueListener(queue, handler)
    ql.start()
    default_logger.addHandler(handler)

    for p, i in zip(processes, [0, 1, 2]):
        default_logger.info("processes {} started".format(i))
        p.start()
    yield processes, [pi[1] for pi in pipes]
    for p, pi, i in zip(processes, pipes, [0, 1, 2]):
        # try graceful shutdown first
        pi[1].send(dill.dumps((exit, 0, {})))
        p.join(timeout=1)
        if p.is_alive():
            # ungraceful shutdown
            default_logger.info("processes {} ungraceful shutdown".format(i))
            p.terminate()
            p.join()
    default_logger.removeHandler(handler)
    ql.stop()
    man.shutdown()
    man.join()
    default_logger.info("processes stopped")
Ejemplo n.º 4
0
    def __init__(self, env_creators: List[Callable[[int], gym.Env]]) -> None:
        """
        Args:
            env_creators: List of gym environment creators, used to create
                environments on sub process workers, accepts a index as your
                environment id.
        """
        super().__init__()
        self.workers = []

        # Some environments will hang or collapse when using fork context.
        # E.g.: in "CarRacing-v0". pyglet used by gym will have render problems.

        # In case users wants to pass tensors to environments,
        # always copy all tensors to avoid errors
        ctx = get_context("spawn")
        self.cmd_queues = [
            SimpleQueue(ctx=ctx, copy_tensor=True)
            for _ in range(len(env_creators))
        ]
        self.result_queue = SimpleQueue(ctx=ctx, copy_tensor=True)
        for cmd_queue, ec, env_idx in zip(self.cmd_queues, env_creators,
                                          range(len(env_creators))):
            # enable recursive serialization to support
            # lambda & local function creators.
            self.workers.append(
                Process(
                    target=self._worker,
                    args=(
                        cmd_queue,
                        self.result_queue,
                        dumps(ec, recurse=True, copy_tensor=True),
                        env_idx,
                    ),
                    ctx=ctx,
                ))

        for worker in self.workers:
            worker.daemon = True
            worker.start()

        self.env_size = env_size = len(env_creators)
        self._cmd_lock = Lock()
        self._closed = False
        tmp_env = env_creators[0](0)
        self._action_space = tmp_env.action_space
        self._obsrv_space = tmp_env.observation_space
        tmp_env.close()
        self._terminal = np.zeros([env_size], dtype=np.bool)
Ejemplo n.º 5
0
from machin.parallel.process import Process, ProcessException
import time


def test1():
    time.sleep(1)
    print("Exception occurred at {}".format(time.time()))
    raise RuntimeError("Error")


if __name__ == "__main__":
    t1 = Process(target=test1)
    t1.start()
    while True:
        try:
            t1.watch()
        except ProcessException as e:
            print("Exception caught at {}".format(time.time()))
            print("Exception is: {}".format(e))
            break
    t1.join()
Ejemplo n.º 6
0
def exec_sub_proc(func):
    loads(func)()


if __name__ == "__main__":
    spawn_ctx = get_context("spawn")
    fork_ctx = get_context("fork")
    # cpu tensor, not in shared memory
    # If you would like to pass this tensor to a sub process
    # set copy_tensor to `True`, otherwise only a pointer to
    # memory will be passed to the subprocess.
    # However, if you do this in the same process, no SEGFAULT
    # will happen, because memory map is the same.
    tensor = t.ones([10])
    p = Process(target=print_tensor_sub_proc,
                args=(dumps(tensor, copy_tensor=True), ),
                ctx=fork_ctx)
    p.start()
    p.join()
    # cpu tensor, in shared memory

    # If you would like to pass this tensor to a sub process
    # set copy_tensor to `False` is more efficient, because
    # only a pointer to the shared memory will be passed, and
    # not all data in the tensor.
    tensor.share_memory_()
    p = Process(target=print_tensor_sub_proc,
                args=(dumps(tensor, copy_tensor=False), ),
                ctx=fork_ctx)
    p.start()
    p.join()