class MultiProcessRunner(BaseRunner):
    JOIN_TIMEOUT = 60

    def __init__(self, target, **kwargs):
        super(MultiProcessRunner, self).__init__(target, **kwargs)
        self.process = None  # type: Process

    @capture_monitor_exception
    def start(self):
        self.process = Process(target=self.target, kwargs=self.kwargs)
        self.process.start()

    @capture_monitor_exception
    def stop(self):
        if self.process and self.is_alive():
            self.process.terminate()
            self.process.join(MultiProcessRunner.JOIN_TIMEOUT)
            if self.process.is_alive():
                self.process.kill()

    @capture_monitor_exception
    def heartbeat(self):
        # do we want to do something here?
        pass

    @capture_monitor_exception
    def is_alive(self):
        return self.process.is_alive()

    def __str__(self):
        s = super(MultiProcessRunner, self).__str__()
        return f"{s}({self.process})"
Exemple #2
0
 def func(*args, **kwargs):
     deltas, before, after = [], 0, 0
     sys.stdout.write(" " * 147)
     for item in iterator:
         if isinstance(item, tuple):
             args = list(item) + list(args)
             item = item[0]
         else:
             args = [item] + args
         delta = after - before
         eta = 0
         if delta > 0:
             deltas.append(delta)
             eta = (length - item) * (sum(deltas) / len(deltas))
         percentage = 100 * item / length
         p = Process(target=draw_progress_bar,
                     args=(eta, percentage,
                           f"{item + 1} of {length}..."))
         before = time.time()
         p.start()
         function(*args, **kwargs)
         p.terminate()
         after = time.time()
     p = Process(target=draw_progress_bar, args=(0, 100, "Ready!"))
     p.start()
     time.sleep(0.1)
     p.terminate()
     print()
Exemple #3
0
class AIOProcess:
    """ Execute a coroutine on a separate process """
    def __init__(self,
                 coroutine: Callable = None,
                 *args,
                 daemon: bool = False,
                 target_override: Callable = None,
                 **kwargs):
        if not asyncio.iscoroutinefunction(coroutine):
            raise ValueError("target must be a coroutine function")

        self.aio_process = Process(target=target_override
                                   or partial(AIOProcess.run_async, coroutine),
                                   args=args,
                                   kwargs=kwargs,
                                   daemon=daemon)

    @staticmethod
    def run_async(coroutine: Callable, *args, **kwargs):
        try:
            loop = uvloop.new_event_loop()
            asyncio.set_event_loop(loop)
            result = loop.run_until_complete(coroutine(*args, **kwargs))

            return result
        except BaseException:
            log.exception(f"aio process {os.getpid()} failed")
            raise

    def start(self):
        self.aio_process.start()

    async def join(self, timeout=None):
        if not self.is_alive() and self.exit_code is None:
            raise ValueError("must start process before joining")

        if timeout is not None:
            return await asyncio.wait_for(self.join(), timeout)

        while self.exit_code is None:
            await asyncio.sleep(0.005)

    @property
    def pid(self):
        return self.aio_process.pid

    @property
    def daemon(self):
        return self.aio_process.daemon

    @property
    def exit_code(self):
        return self.aio_process.exitcode

    def is_alive(self):
        return self.aio_process.is_alive()

    def terminate(self):
        self.aio_process.terminate()
def process_file(file_path_input, file_path_output, i):
    if os.path.exists(file_path_output):
        return print('{}    {}  Already done'.format(now(), file_path_output))

    input_file = open(file_path_input, 'r')
    output_file = open(file_path_output, 'a')

    # every worker create separate file for a input file
    writer = csv.writer(output_file, delimiter='\t')

    # for every file we create seperate driver (100 URLs)
    driver = webdriver.PhantomJS(executable_path=path_to_phantomjs)

    for line in input_file:
        splited = line.split('\t')
        # property_type = splited[0]
        url = splited[1]
        print('{}   Process={}  Current url: {}'.format(now(), i, url))

        # start process for getting microformat properties
        temp_queue = Queue()
        # p = Process(target=get_microformat_properties_by_type, args=(url, property_type, temp_queue, i))
        p = Process(target=get_element_features,
                    args=(url, driver, temp_queue, i))
        print("{}   {}  Process={}  {}  {}".format(now(), i, "Started: ",
                                                   "feature extraction", url))
        p.start()
        event_features = temp_queue.get(timeout=TIME_OUT_FEATURE)
        # try:
        #     pass
        # except Empty:
        #     print("{}   {}  Process={}  {}  {}".format(now(), i, "Timed out on: ", "feature extraction", url))

        if p.is_alive():
            p.terminate()

        print("Event features:" + str(event_features))

        if event_features is not None:
            print("{}   Process={}  Got properties for  {}".format(
                now(), i, url))

            # start process for feature extraction and writing to separate file
            # p_event_features = Process(target=get_event_features_and_write,
            #                            args=(event_features, driver, writer, i, output_file))
            #
            p_event_features = Process(target=write_element_features,
                                       args=(event_features, writer, i,
                                             output_file))
            p.start()
            # start_with_timeout(p_event_features, TIME_OUT_LOAD, "feature writing", url, i)
            if p_event_features.is_alive():
                p.terminate()

    driver.service.process.send_signal(signal.SIGTERM)
    driver.quit()
    return 'done'
Exemple #5
0
def dashboard():
    # This fixture guarantees the proper termination of all spawned subprocesses
    # after the tests.
    dashboard = Process(target=run_detection)
    dashboard.start()
    yield
    for child in psutil.Process(dashboard.pid).children(recursive=True):
        child.kill()
    dashboard.terminate()
    dashboard.join()
 def serve(routing: dict, open_browser=False, timeout=None, filename=''):
     p = Process(target=WebServer.serve_and_browse,
                 args=(
                     routing,
                     open_browser,
                     filename,
                 ))
     p.start()
     wait_for_server_seconds = timeout
     logging.info("Waiting for server %d seconds" % wait_for_server_seconds)
     time.sleep(wait_for_server_seconds)
     p.terminate()
Exemple #7
0
def main():
    logging.info("Main thread started")
    maxValue = 3
    worker = Process(target=work,
                     args=("working", maxValue),
                     daemon=True,
                     name="Worker")
    worker.start()
    time.sleep(5)
    # if the process is running , stop it
    if worker.is_alive:
        worker.terminate()  # pretty dangerous
    worker.join()

    logging.info(f"Main thread finished {worker.exitcode}")
def main():
    print('主进程开始')
    p = Process(target=test, args=(1, ))
    p.start()
    print(p.is_alive())
    print('主进程结束')
    print(p.is_alive())
    print(p.is_alive())
    print(p.is_alive())
    print(p.is_alive())
    print(p.is_alive())
    print('子进程名字:', p.name)
    print('子进程pid:', p.pid)
    p.terminate()
    p.join(20)
Exemple #9
0
def setup_and_teardown_flask_app(app: Flask, host: str, port: int):
    """
    Manages setup of provided flask app on given `host` and `port` and its teardown.

    As for setup process following things are done:
        * `/health` endpoint is added to provided flask app,
        * app is launched in separate process,
        * function waits for flask app to fully launch - to do this it repetitively checks `/health` endpoint if it will
            return status code 200.

    Example use of this function in fixture:

    >>> with setup_and_teardown_flask_app(Flask(__name__), "localhost", 10000):
    >>>     yield

    :param app: app to launch
    :param host: host on which to launch app
    :param port: port on which to launch app
    """
    def wait_for_flask_app_to_be_accessible():
        timeout = 1
        end_time = datetime.now() + timedelta(seconds=timeout)
        response = requests.Response()
        response.status_code = HTTP_404_NOT_FOUND

        while response.status_code != HTTP_200_OK and datetime.now(
        ) < end_time:
            with contextlib.suppress(requests.exceptions.ConnectionError):
                response = requests.request(
                    "POST", "http://{}:{}/health".format(host, port))
            time.sleep(0.01)

        fail_message = "Timeout expired: failed to start mock REST API in {} seconds".format(
            timeout)
        assert response.status_code == HTTP_200_OK, fail_message

    app.route("/health", methods=["POST"])(lambda: "OK")

    process = Process(target=app.run, args=(host, port))
    process.start()

    wait_for_flask_app_to_be_accessible()
    yield

    process.terminate()
    process.join()
Exemple #10
0
def main():
    logging.info(f'App started')

    max = 2
    worker = Process(target=work, args=['Working', max], daemon=True, name='Worker')
    worker.start()

    time.sleep(5)

    # if the process is running, stop it
    if worker.is_alive():
        worker.terminate()   # kill the process with SIGTERM
    worker.join()

    # exicode == 0 is good
    # anything else is an error
    logging.info(f'App finished: {worker.exitcode}')
Exemple #11
0
def run_server(launcher, wait_before_entering=0, verbose=False, **kwargs):
    """Context manager to launch server on entry, and shut it down on exit"""
    from warnings import warn

    clog = conditional_logger(verbose)
    server = None
    try:
        server = Process(target=launcher, kwargs=kwargs)
        clog(f'Starting server...')
        server.start()
        clog(f'... server started.')
        sleep(wait_before_entering)
        yield server
    finally:
        if server is not None and server.is_alive():
            clog(f'Terminating server...')
            server.terminate()
        clog(f'... server terminated')
def test_gunicorn_uvicorn():

    options = {
        "bind": "%s:%s" % ("127.0.0.1", "8000"),
        "workers": 1,
        "worker_class": "uvicorn.workers.UvicornWorker",
        "log_level": "debug",
    }
    gunicorn_uvicorn_server = StandaloneApplication(handler_app, options)
    process = Process(target=gunicorn_uvicorn_server.run)
    process.start()
    time.sleep(0.1)
    response = requests.get("http://127.0.0.1:8000")
    assert response.status_code == 200
    assert response.content == b"Hello"
    process.terminate()
    # needed timeout for travis or the port wont get released fast enough
    # and will "block" subsequent tests
    time.sleep(1)
Exemple #13
0
def process_file(file_path_input, file_path_output, i):
    if os.path.exists(file_path_output):
        return print('{}    {}  Already done'.format(now(), file_path_output))

    input_file = open(file_path_input, 'r')
    output_file = open(file_path_output, 'a')

    # every worker create separate file for a input file
    writer = csv.writer(output_file, delimiter='\t')

    # for every file we create seperate driver (100 URLs)
    driver = webdriver.PhantomJS(executable_path=path_to_phantomjs)

    for line in input_file:
        splited = line.split('\t')
        property_type = splited[0]
        url = splited[1]
        print('{}   Process={}  Current url: {}'.format(now(), i, url))

        # start process for getting microformat properties
        temp_queue = Queue()
        p = Process(target=get_microformat_properties_by_type,
                    args=(url, property_type, temp_queue, i))
        start_with_timeout(p, TIME_OUT_LOAD, "loading", url, i)

        event_properties = temp_queue.get() if not temp_queue.empty() else None
        if p.is_alive():
            p.terminate()
        if event_properties is not None:
            print("{}   Process={}  Got properties for  {}".format(
                now(), i, url))

            # start process for feature extraction and writing to separate file
            p_event_features = Process(target=get_event_features_and_write,
                                       args=(event_properties, driver, writer,
                                             i, output_file))
            start_with_timeout(p_event_features, TIME_OUT_FEATURE,
                               "feature extraction", url, i)
            if p_event_features.is_alive():
                p.terminate()

    return 'done'
Exemple #14
0
def main():
    logging.info('Started')

    max = 2
    worker = Process(target=work,
                     args=['Working', max],
                     daemon=True,
                     name='Super Mario')
    worker.start()

    time.sleep(5)

    #if the process is running, stop it
    if worker.is_alive():
        worker.terminate()
    worker.join()

    #exitcode == 0
    #Anything else is an error
    logging.info(f'Finished: {worker.exitcode}')
Exemple #15
0
def main():
    name = process.current_process().name
    logging.info(f'{name} started')

    #Setup the process
    address = 'localhost'  #127.0.0.1
    port = 2823  # above 1024
    password = b'password'

    p = Process(target=proc,
                args=[address, port, password],
                daemon=True,
                name="Worker")
    p.start()

    logging.info(f'{name} waiting on the worker...')
    time.sleep(1)

    #Connect to the process
    dest = (address, port)
    conn = Client(dest, authkey=password)

    #Command loop
    while True:
        command = input('\r\nEnter a command or type quit:\r\n').strip()
        logging.info(f'{name} command: {command}')
        conn.send(command)
        if command == 'quit':
            break

    #Cleanup and shutdown
    if p.is_alive:
        logging.info(f'{name} terminating worker')
        conn.close()
        time.sleep(1)
        p.terminate()
    p.join()

    logging.info(f'{name} finished')
Exemple #16
0
class CreateProcess:
    """A context manager to launch a parallel process and close it on exit.
    """

    def __init__(
        self,
        proc_func: Callable,
        process_name=None,
        wait_before_entering=2,
        verbose=False,
        args=(),
        **kwargs,
    ):
        """
        Essentially, this context manager will call
        ```
            proc_func(*args, **kwargs)
        ```
        in an independent process.

        :param proc_func: A function that will be launched in the process
        :param process_name: The name of the process.
        :param wait_before_entering: A pause (in seconds) before returning from the enter phase.
            (in case the outside should wait before assuming everything is ready)
        :param verbose: If True, will print some info on the starting/stoping of the process
        :param args: args that will be given as arguments to the proc_func call
        :param kwargs: The kwargs that will be given as arguments to the proc_func call

        The following should print 'Hello console!' in the console.
        >>> with CreateProcess(print, verbose=True, args=('Hello console!',)) as p:
        ...     print("-------> Hello module!")  # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
        Starting process: print...
        ... print process started.
        -------> Hello module!
        ... print process terminated
        """
        self.proc_func = proc_func
        self.process_name = process_name or getattr(proc_func, '__name__', '')
        self.wait_before_entering = float(wait_before_entering)
        self.verbose = verbose
        self.args = args
        self.kwargs = kwargs
        self.clog = conditional_logger(verbose)
        self.process = None
        self.exception_info = None

    def process_is_running(self):
        return self.process is not None and self.process.is_alive()

    def __enter__(self):
        self.process = Process(
            target=self.proc_func,
            args=self.args,
            kwargs=self.kwargs,
            name=self.process_name,
        )
        self.clog(f'Starting process: {self.process_name}...')
        try:
            self.process.start()
            if self.process_is_running():
                self.clog(f'... {self.process_name} process started.')
                sleep(self.wait_before_entering)
                return self
            else:
                raise RuntimeError('Process is not running')
        except Exception:
            raise RuntimeError(
                f'Something went wrong when trying to launch process {self.process_name}'
            )

    def __exit__(self, exc_type, exc_val, exc_tb):
        if self.process is not None and self.process.is_alive():
            self.clog(f'Terminating process: {self.process_name}...')
            self.process.terminate()
        self.clog(f'... {self.process_name} process terminated')
        if exc_type is not None:
            self.exception_info = dict(
                exc_type=exc_type, exc_val=exc_val, exc_tb=exc_tb
            )
Exemple #17
0
class SubprocEnvWorker(EnvWorker):
    """Subprocess worker used in SubprocVectorEnv and ShmemVectorEnv."""

    def __init__(
        self, env_fn: Callable[[], gym.Env], share_memory: bool = False
    ) -> None:
        super().__init__(env_fn)
        self.parent_remote, self.child_remote = Pipe()
        self.share_memory = share_memory
        self.buffer: Optional[Union[dict, tuple, ShArray]] = None
        if self.share_memory:
            dummy = env_fn()
            obs_space = dummy.observation_space
            dummy.close()
            del dummy
            self.buffer = _setup_buf(obs_space)
        args = (
            self.parent_remote,
            self.child_remote,
            CloudpickleWrapper(env_fn),
            self.buffer,
        )
        self.process = Process(target=_worker, args=args, daemon=True)
        self.process.start()
        self.child_remote.close()

    def __getattr__(self, key: str) -> Any:
        self.parent_remote.send(["getattr", key])
        return self.parent_remote.recv()

    def _decode_obs(self) -> Union[dict, tuple, np.ndarray]:
        def decode_obs(
            buffer: Optional[Union[dict, tuple, ShArray]]
        ) -> Union[dict, tuple, np.ndarray]:
            if isinstance(buffer, ShArray):
                return buffer.get()
            elif isinstance(buffer, tuple):
                return tuple([decode_obs(b) for b in buffer])
            elif isinstance(buffer, dict):
                return {k: decode_obs(v) for k, v in buffer.items()}
            else:
                raise NotImplementedError

        return decode_obs(self.buffer)

    def reset(self) -> Any:
        self.parent_remote.send(["reset", None])
        obs = self.parent_remote.recv()
        if self.share_memory:
            obs = self._decode_obs()
        return obs

    @staticmethod
    def wait(  # type: ignore
        workers: List["SubprocEnvWorker"],
        wait_num: int,
        timeout: Optional[float] = None,
    ) -> List["SubprocEnvWorker"]:
        remain_conns = conns = [x.parent_remote for x in workers]
        ready_conns: List[connection.Connection] = []
        remain_time, t1 = timeout, time.time()
        while len(remain_conns) > 0 and len(ready_conns) < wait_num:
            if timeout:
                remain_time = timeout - (time.time() - t1)
                if remain_time <= 0:
                    break
            # connection.wait hangs if the list is empty
            new_ready_conns = connection.wait(
                remain_conns, timeout=remain_time)
            ready_conns.extend(new_ready_conns)  # type: ignore
            remain_conns = [
                conn for conn in remain_conns if conn not in ready_conns]
        return [workers[conns.index(con)] for con in ready_conns]

    def send_action(self, action: np.ndarray) -> None:
        self.parent_remote.send(["step", action])

    def get_result(
        self,
    ) -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]:
        obs, rew, done, info = self.parent_remote.recv()
        if self.share_memory:
            obs = self._decode_obs()
        return obs, rew, done, info

    def seed(self, seed: Optional[int] = None) -> Optional[List[int]]:
        self.parent_remote.send(["seed", seed])
        return self.parent_remote.recv()

    def render(self, **kwargs: Any) -> Any:
        self.parent_remote.send(["render", kwargs])
        return self.parent_remote.recv()

    def close_env(self) -> None:
        try:
            self.parent_remote.send(["close", None])
            # mp may be deleted so it may raise AttributeError
            self.parent_remote.recv()
            self.process.join()
        except (BrokenPipeError, EOFError, AttributeError):
            pass
        # ensure the subproc is terminated
        self.process.terminate()
Exemple #18
0
class AsyncMCTS(MoveChooser):
    """
    Implementation of Monte Carlo Tree Search that uses the other player's time to continue thinking.
    This is achieved using multiprocessing, and a Pipe for transferring data to and from the worker process.
    """
    def __init__(self,
                 GameClass,
                 starting_position,
                 time_limit=3,
                 network=None,
                 c=np.sqrt(2),
                 d=1,
                 threads=1):
        """
        Either:
        If network is provided, threads must be 1.
        If network is not provided, then threads will be used for leaf parallelization
        """
        super().__init__(GameClass, starting_position)
        if network is not None and threads != 1:
            raise Exception('Threads != 1 with Network != None')

        self.parent_pipe, worker_pipe = Pipe()
        self.worker_process = Process(target=self.loop_func,
                                      args=(GameClass, starting_position,
                                            time_limit, network, c, d, threads,
                                            worker_pipe))

    def start(self):
        self.worker_process.start()

    def report_user_move(self, user_chosen_move):
        """
        Reports the given user chosen move to the worker thread.
        This allows the search tree to be narrowed.

        :param user_chosen_move:
        """
        self.parent_pipe.send(user_chosen_move)
        self.position = user_chosen_move

    def choose_move(self, return_distribution=False):
        """
        Instructs the worker thread to decide on an optimal move.
        The worker thread will then continue thinking for time_limit, and then return a list of its chosen moves.
        If multiple states are passed through before the ai's turn is completed,
        then they will be the contents of the list. Otherwise the list will have a single state.

        :return: The moves chosen by monte carlo tree search.
        """
        self.parent_pipe.send(None)
        chosen_positions = self.parent_pipe.recv()
        self.position = chosen_positions[-1][0]
        return chosen_positions if return_distribution else [
            position for position, _ in chosen_positions
        ]

    def terminate(self):
        self.worker_process.terminate()
        self.worker_process.join()

    @staticmethod
    def loop_func(GameClass, position, time_limit, network, c, d, threads,
                  worker_pipe):
        if network is None:
            pool = Pool(threads) if threads > 1 else None
            root = RolloutNode(position,
                               parent=None,
                               GameClass=GameClass,
                               c=c,
                               rollout_batch_size=threads,
                               pool=pool,
                               verbose=True)
        else:
            network.initialize()
            root = HeuristicNode(position,
                                 None,
                                 GameClass,
                                 network,
                                 c,
                                 d,
                                 verbose=True)

        while True:
            best_node = root.choose_expansion_node()

            if best_node is not None:
                best_node.expand()

            if root.children is not None and worker_pipe.poll():
                user_chosen_position = worker_pipe.recv()

                if user_chosen_position is not None:
                    # an updated position has been received so we can truncate the tree
                    for child in root.children:
                        if np.all(child.position == user_chosen_position):
                            root = child
                            root.parent = None
                            break
                    else:
                        print(user_chosen_position)
                        raise Exception('Invalid user chosen move!')

                    if GameClass.is_over(root.position):
                        print('Game Over in Async MCTS: ',
                              GameClass.get_winner(root.position))
                        return
                else:
                    # this move chooser has been requested to decide on a move via the choose_move function
                    start_time = time()
                    while time() - start_time < time_limit:
                        best_node = root.choose_expansion_node()

                        # best_node will be None if the tree is fully expanded
                        if best_node is None:
                            break

                        best_node.expand()

                    is_ai_player_1 = GameClass.is_player_1_turn(root.position)
                    chosen_positions = []
                    print(
                        f'MCTS choosing move based on {root.count_expansions()} expansions!'
                    )

                    # choose moves as long as it is still the ai's turn
                    while GameClass.is_player_1_turn(
                            root.position) == is_ai_player_1:
                        if root.children is None:
                            best_node = root.choose_expansion_node()
                            if best_node is not None:
                                best_node.expand()
                        root, distribution = root.choose_best_node(
                            return_probability_distribution=True, optimal=True)
                        chosen_positions.append((root.position, distribution))

                    print('Expected outcome: ', root.get_evaluation())
                    root.parent = None  # delete references to the parent and siblings
                    worker_pipe.send(chosen_positions)
                    if GameClass.is_over(root.position):
                        print('Game Over in Async MCTS: ',
                              GameClass.get_winner(root.position))
                        return

    def reset(self):
        raise NotImplementedError('')
class TestOrchestrator(BaseTestCase):

    def setUp(self):
        self.test_directory = os.path.dirname(os.path.realpath(__file__))
        self.test_config = os.path.join(self.test_directory,
                                        "packager_config.cfg")
        self.packager_config = PackagerConfig(self.test_config)
        """Spin up a server in a seperate process"""
        def start_test_server():
            app = Orchestrator(self.packager_config)
            from werkzeug.serving import run_simple
            run_simple('127.0.0.1', 5000, app)

        self.p = Process(target=start_test_server, daemon=True)
        self.p.start()

    def tearDown(self):
        """Make sure we tear down the process properly at the end"""
        self.p.terminate()

    def testOrchestratorAcceptingInputCorrectly(self):
        @exponential_back_off
        def wait_for_process():
            return requests.get("http://127.0.0.1:5000/health")

        wait_seconds = 1
        print("await process spin up for %d seconds" % wait_seconds)
        time.sleep(wait_seconds)
        wait_for_process()

        with open(self.get_file('code'), 'r') as code, \
                open(self.get_file('tests'), 'r') as test:
            language = 'python'
            question_name = 'foo'
            code_payload = code.read()
            test_payload = test.read()
            payload = {
                'question_name': question_name,
                'language': language,
                'code': code_payload,
                'test': test_payload
            }

            # http:// required,
            # requests library needs protocol scheme to connect
            got_resp_raw = requests.post('http://127.0.0.1:5000/submit',
                                         data=payload)
            got_resp = json.loads(got_resp_raw.text)
            exp_data = {
                'language': language,
                'code': code_payload,
                'test': test_payload,
                'question_name': question_name
            }
            exp_response_message = 'not tested for in this testcase'
            exp_resp = {
                'execution_result': exp_response_message,
                'got_data': exp_data
            }

            self.assertEqual(exp_resp['got_data'], got_resp['got_data'])

            # we just need to check if the result contains success
            for testcase, test_result in got_resp['execution_result'].items():
                self.assertTrue(test_result['success'],
                                'testcase {} failure with message {}'
                                .format(testcase, test_result['message']))
class AsyncIterativeDeepening(MoveChooser):
    def __init__(self, GameClass, starting_position=None, time_limit=3):
        super(AsyncIterativeDeepening, self).__init__(GameClass, starting_position)
        self.root = DeepeningNode(GameClass, starting_position)
        self.time_limit = time_limit

        self.parent_pipe, worker_pipe = Pipe()
        self.worker_process = Process(target=self.loop_func,
                                      args=(GameClass, starting_position, worker_pipe))
        self.receipt_backlog = 0

    def start(self):
        self.worker_process.start()

    def terminate(self):
        self.worker_process.terminate()
        self.worker_process.join()

    @staticmethod
    def loop_func(GameClass, starting_position, worker_pipe):
        root = DeepeningNode(GameClass, starting_position)
        while True:
            root.deepen()
            print(root.get_depth())
            worker_pipe.send(root.children[0].state)

            while worker_pipe.poll():
                chosen_position = worker_pipe.recv()
                if root.children is None:
                    root.deepen()

                for child in root.children:
                    if np.all(chosen_position == child.state):
                        root = child
                        break
                else:
                    raise ValueError('Invalid move!')
                worker_pipe.send(None)  # send acknowledgement receipt

    def report_user_move(self, user_chosen_position):
        # notify process that position was chosen
        self.parent_pipe.send(user_chosen_position)
        self.receipt_backlog += 1
        self.position = user_chosen_position

    def reset(self):
        raise NotImplementedError

    def choose_move(self, return_distribution=False):
        if return_distribution:
            raise NotImplementedError

        start_time = time()
        while self.receipt_backlog > 0:
            while self.parent_pipe.recv() is not None:
                pass
            self.receipt_backlog -= 1

        remaining_time = self.time_limit - (time() - start_time)
        if remaining_time > 0:
            sleep(remaining_time)

        self.position = self.parent_pipe.recv()
        while self.parent_pipe.poll():
            self.position = self.parent_pipe.recv()

        # notify process that position was chosen
        self.parent_pipe.send(self.position)
        self.receipt_backlog += 1

        return [self.position]
Exemple #21
0
import os, time, random

# 写数据进程执行的代码:
def write(q):
    print('Process to write: %s' % os.getpid())
    for value in ['A', 'B', 'C']:
        print('Put %s to queue...' % value)
        q.put(value)
        time.sleep(random.random())

# 读数据进程执行的代码:
def read(q):
    print('Process to read: %s' % os.getpid())
    while True:
        value = q.get(True)
        print('Get %s from queue.' % value)

if __name__=='__main__':
    # 父进程创建Queue,并传给各个子进程:
    q = Queue()
    pw = Process(target=write, args=(q,))
    pr = Process(target=read, args=(q,))
    # 启动子进程pw,写入:
    pw.start()
    # 启动子进程pr,读取:
    pr.start()
    # 等待pw结束:
    pw.join()
    # pr进程里是死循环,无法等待其结束,只能强行终止:
    pr.terminate()
def run_in_parallel(
    target: Callable[[Any], Any],
    target_name: str,
    data_set: Dict[str, Any],
    table_name: str,
):

    return_values = []

    processes = []
    parent_connections = []

    chunked_kwargs = _get_chunked_list(dataset=data_set, tablename=table_name)

    logger.info(
        f"Starting up {len(chunked_kwargs)} process(es) for [{target_name}]")

    try:
        for chunk_index, target_kwargs in enumerate(chunked_kwargs):
            parent_connection, child_connection = Pipe()
            parent_connections.append(parent_connection)

            process = Process(
                target=__chunk_wrapper,
                kwargs=dict(
                    target=target,
                    target_kwargs=target_kwargs,
                    target_name=target_name,
                    connection=child_connection,
                    chunk_index=chunk_index,
                    chunk_count=len(chunked_kwargs),
                ),
                daemon=True,
            )

            process.start()
            processes.append(process)

        for connection in parent_connections:
            response_data = connection.recv()

            if isinstance(response_data, Exception):
                raise response_data

            return_values.append(response_data)

        for process in processes:
            process.join()

    except Exception as error:
        logger.exception(f"Error when running {len(processes)} process(es)"
                         f" for {target_name}")

        raise error

    finally:
        for process in processes:
            process.terminate()

    logger.info(
        f"Successfully executed {len(chunked_kwargs)} process(es) for [{target_name}]"
    )

    return return_values
Exemple #23
0
class ProcessExecution(OutputExecution):
    def __init__(self, target, args):
        self.target = target
        self.args = args
        self.output_queue = Queue(maxsize=2048)
        self._process: Union[Process, None] = None
        self._status = None
        self._stopped: bool = False
        self._interrupted: bool = False
        self._output_observers = []

    @property
    def is_async(self) -> bool:
        return False

    def execute(self) -> ExecutionState:
        if not self._stopped and not self._interrupted:
            self._process = Process(target=self._run)
            self._process.start()
            output_reader = Thread(target=self._read_output,
                                   name='Output-Reader',
                                   daemon=True)
            output_reader.start()
            self._process.join()
            self.output_queue.put_nowait(_QueueStop())
            output_reader.join(timeout=1)
            self.output_queue.close()
            if self._process.exitcode == 0:
                return ExecutionState.COMPLETED
        if self._stopped:
            return ExecutionState.STOPPED
        if self._interrupted:
            return ExecutionState.INTERRUPTED
        raise ExecutionError(
            "Process returned non-zero code " + str(self._process.exitcode),
            ExecutionState.FAILED)

    def _run(self):
        with self._capture_stdout():
            try:
                self.target(*self.args)
            except:
                for line in traceback.format_exception(*sys.exc_info()):
                    self.output_queue.put_nowait(line)
                raise

    @contextmanager
    def _capture_stdout(self):
        import sys
        original_stdout = sys.stdout
        original_stderr = sys.stderr
        stdout_writer = _CapturingWriter(original_stdout, self.output_queue)
        stderr_writer = _CapturingWriter(original_stderr, self.output_queue)
        sys.stdout = stdout_writer
        sys.stderr = stderr_writer

        try:
            yield
        finally:
            sys.stdout = original_stdout
            sys.stderr = original_stderr

    @property
    def status(self):
        return self._status

    def stop(self):
        self._stopped = True
        if self._process:
            self._process.terminate()

    def interrupt(self):
        self._interrupted = True
        if self._process:
            self._process.terminate()

    def add_output_observer(self, observer):
        self._output_observers.append(observer)

    def remove_output_observer(self, observer):
        self._output_observers.remove(observer)

    def _read_output(self):
        while True:
            output_text = self.output_queue.get()
            if isinstance(output_text, _QueueStop):
                break
            self._status = output_text
            self._notify_output_observers(output_text)

    def _notify_output_observers(self, output):
        for observer in self._output_observers:
            # noinspection PyBroadException
            try:
                if isinstance(observer, ExecutionOutputObserver):
                    observer.output_update(output)
                elif callable(observer):
                    observer(output)
                else:
                    log.warning(
                        "event=[unsupported_output_observer] observer=[%s]",
                        observer)
            except BaseException:
                log.exception("event=[state_observer_exception]")