Example #1
0
def prepare_env_and_recheck(input_dir, out_dir, target, num_jobs, config_file):
    if not common.check_if_dir_exists(input_dir):
        common.print_and_exit("Can't use input directory")
    common.check_dir_and_create(out_dir)

    run_gen.gen_test_makefile_and_copy(out_dir, config_file)
    run_gen.dump_testing_sets(target)
    run_gen.print_compilers_version(target)

    lock = multiprocessing.Lock()

    task_queue = multiprocessing.JoinableQueue()
    process_dir(input_dir, task_queue)
    failed_queue = multiprocessing.SimpleQueue()
    passed_queue = multiprocessing.SimpleQueue()

    task_threads = [0] * num_jobs
    for num in range(num_jobs):
        task_threads[num] = \
            multiprocessing.Process(target=recheck,
                                    args=(num, lock, task_queue, failed_queue, passed_queue, target, out_dir))
        task_threads[num].start()

    task_queue.join()
    task_queue.close()

    for num in range(num_jobs):
        task_threads[num].join()
Example #2
0
 def __init__(self):
     self.queue = multiprocessing.SimpleQueue()
     self.result_queue = multiprocessing.SimpleQueue()
     self.process = multiprocessing.Process(target=_start_and_run_window,
                                            args=(self.queue,
                                                  self.result_queue))
     self.process.start()
Example #3
0
def _simulate_in_parallel(env, game, nn_att, nn_def, n_processes: int, n_episodes: int, save_dir: str = None, summary_writer=None):
    """ Run simulations in parallel processes. """
    worker_processes = []
    simulation_request_queue = multiprocessing.SimpleQueue()
    attacker_reward_queue = multiprocessing.SimpleQueue()
    defender_reward_queue = multiprocessing.SimpleQueue()
    # Set-up all the processes.
    for _ in range(n_processes):
        worker_processes += [SimulationWorker(
            simulation_request_queue,
            defender_reward_queue,
            attacker_reward_queue,
            nn_att,
            nn_def,
            settings.get_attacker_strategy_dir(),
            settings.get_defender_strategy_dir())]
        worker_processes[-1].start()
    # Request all simulations.
    for _ in range(n_episodes):
        simulation_request_queue.put(CloudpickleWrapper(game))
    # Send sentinel values to tell processes to cleanly shutdown (1 per worker).
    for _ in range(n_processes):
        simulation_request_queue.put(None)
    for process in worker_processes:
        process.join()

    # Aggregate results.
    attacker_rewards = np.zeros([n_episodes])
    defender_rewards = np.zeros([n_episodes])
    for episode_i in range(n_episodes):
        attacker_rewards[episode_i] = attacker_reward_queue.get()
        defender_rewards[episode_i] = defender_reward_queue.get()

    return attacker_rewards, defender_rewards
def spinup_server(manager_config):
    reg_queue = multiprocessing.SimpleQueue()
    worker_msg_queue = multiprocessing.SimpleQueue()
    trainer_in_queue = multiprocessing.SimpleQueue()
    trainer_out_queue = multiprocessing.SimpleQueue()

    manager_client = Server(
        manager_config, {
            "register": reg_queue,
            "worker": worker_msg_queue,
            "trainer": trainer_in_queue
        })

    p1 = multiprocessing.Process(target=mqtt_process, args=(manager_client, ))

    p2 = multiprocessing.Process(target=manager_process,
                                 args=(manager_client, worker_msg_queue,
                                       reg_queue, trainer_in_queue,
                                       trainer_out_queue))

    p3 = multiprocessing.Process(
        target=trainer_process,
        args=(
            # config, queue for new sessions, model, environment
            manager_config,
            trainer_out_queue,
            trainer_in_queue,
            None,
            None))

    jobs = [p1, p2, p3]
    for j in jobs:
        j.start()
Example #5
0
    def __init__(self, loader):
        self.loader = loader
        self.dataset = loader.dataset
        self.batch_size = loader.batch_size
        self.collate_fn = loader.collate_fn
        self.sampler = loader.sampler
        self.num_workers = loader.num_workers
        self.done_event = threading.Event()

        self.samples_remaining = len(self.sampler)
        self.sample_iter = iter(self.sampler)

        if self.num_workers > 0:
            self.index_queue = multiprocessing.SimpleQueue()
            self.data_queue = multiprocessing.SimpleQueue()
            self.batches_outstanding = 0
            self.shutdown = False
            self.send_idx = 0
            self.rcvd_idx = 0
            self.reorder_dict = {}

            self.workers = [
                multiprocessing.Process(
                    target=_worker_loop,
                    args=(self.dataset, self.index_queue, self.data_queue, self.collate_fn))
                for _ in range(self.num_workers)]

            for w in self.workers:
                w.daemon = True  # ensure that the worker exits on process exit
                w.start()

            # prime the prefetch loop
            for _ in range(2 * self.num_workers):
                self._put_indices()
Example #6
0
    def run(self, result):
        # We use SimpleQueues because they are more predictable.
        # They do the necessary IO directly, without using a
        # helper thread.
        result_queue = multiprocessing.SimpleQueue()
        status_queue = multiprocessing.SimpleQueue()
        worker_param_queue = multiprocessing.SimpleQueue()

        # Prepopulate the worker param queue with server connection
        # information.
        for _ in range(self.num_workers):
            worker_param_queue.put((self.server_conn, self.postgres_dsn))

        result_thread = threading.Thread(name='test-monitor',
                                         target=monitor_thread,
                                         args=(result_queue, result),
                                         daemon=True)
        result_thread.start()

        initargs = (status_queue, worker_param_queue, result_queue)

        pool = multiprocessing.Pool(self.num_workers,
                                    initializer=mproc_fixes.WorkerScope(
                                        init_worker, shutdown_worker),
                                    initargs=initargs)

        # Wait for all workers to initialize.
        for _ in range(self.num_workers):
            status_queue.get()

        with pool:
            ar = pool.map_async(_run_test, iter(self.tests), chunksize=1)

            while True:
                try:
                    ar.get(timeout=0.1)
                except multiprocessing.TimeoutError:
                    if self.stop_requested:
                        break
                    else:
                        continue
                else:
                    break

            # Post the terminal message to the queue so that
            # test-monitor can stop.
            result_queue.put((None, None, None))

            # Give the test-monitor thread some time to
            # process the queue messages.  If something
            # goes wrong, the thread will be forcibly
            # joined by a timeout.
            result_thread.join(timeout=3)

        # Wait for pool to shutdown, this includes test teardowns.
        pool.join()

        return result
Example #7
0
 def __init__(self, port=DEFAULT_PORT,authkey=DEFAULT_AUTHKEY,timeout=DEFAULT_TIMEOUT,output=DEFAULT_OUTPUT):
     self.port = port
     self.timeout = timeout
     self.output=output
     self.authkey=authkey
     self.job_q = multiprocessing.SimpleQueue()
     self.result_q = multiprocessing.SimpleQueue()
     self._manager = self._create_manager()
     self.commands=[]
Example #8
0
    def run(self, nprocs):

        #create
        if (self.runner_output == "file"):
          pathlib.Path(self.runner_dir).mkdir(parents=True,exist_ok=True)

        #properties of this blockrunner
        hosttags={"hostname": socket.gethostname(),"multirunnerid": self.blockrunnerid, "nprocs": nprocs}

        #inter-block shared objects
        job_q = multiprocessing.SimpleQueue()
        result_q = multiprocessing.SimpleQueue()
        idle_q = multiprocessing.SimpleQueue()
        job_count = multiprocessing.Value('i',0)
        result_count = multiprocessing.Value('i',0)
        job_outofstock = multiprocessing.Value('i',False)

        #start the retriever
        workers = []
        p_retriever = multiprocessing.Process(
            target=self._commandretriever,
            args=(job_q, idle_q, job_count, job_outofstock, nprocs))
        try:
            p_retriever.start()

            #start workers
            for i in range(nprocs):
                p = multiprocessing.Process(
                        target=self._runner,
                        args=(job_q, result_q,idle_q,i))
                p.start()
                workers.append(p)

            #comunicate to the master that we are starting
            self.remote_result_q.put({"tag":"start"})

            #send back results
            while True:
                oos=job_outofstock.value  #save it now because it could change during next "if condition" evaluation
                if (result_count.value < job_count.value):
                    result=result_q.get()
                    result.update(hosttags)
                    self.remote_result_q.put(result)
                    result_count.value+=1  #I'm the only one to increment this
                elif (oos):
                    break
                else:
                    time.sleep(1)

        except Exception:
            pass

        finally:
            #just in case
            for p in workers:
                p.terminate()
            p_retriever.terminate()
Example #9
0
    def __init__(self, loader):
        self.dataset = loader.dataset
        self.collate_fn = loader.collate_fn
        self.batch_sampler = loader.batch_sampler
        self.num_workers = loader.num_workers
        self.pin_memory = loader.pin_memory
        self.done_event = threading.Event()

        self.worker_init_fn = loader.worker_init_fn
        self.worker_init_args = loader.worker_init_args
        self.worker_init_kwargs = loader.worker_init_kwargs

        self.sample_iter = iter(self.batch_sampler)

        if self.num_workers > 0:
            self.index_queue = multiprocessing.SimpleQueue()
            self.data_queue = multiprocessing.SimpleQueue()
            self.batches_outstanding = 0
            self.shutdown = False
            self.send_idx = 0
            self.rcvd_idx = 0
            self.reorder_dict = {}

            self.seeds = loader.gen_seeds()
            self.workers = [
                multiprocessing.Process(
                    target=_worker_loop_seed,
                    args=(i, self.dataset, self.index_queue, self.data_queue,
                          self.collate_fn, self.seeds[i], self.worker_init_fn,
                          self.worker_init_args[i],
                          self.worker_init_kwargs[i]))
                for i in range(self.num_workers)
            ]

            for w in self.workers:
                w.daemon = True  # ensure that the worker exits on process exit
                w.start()

            if self.pin_memory:
                in_data = self.data_queue
                self.data_queue = queue.Queue()
                self.pin_thread = threading.Thread(target=_pin_memory_loop,
                                                   args=(in_data,
                                                         self.data_queue,
                                                         self.done_event))
                self.pin_thread.daemon = True
                self.pin_thread.start()

            # prime the prefetch loop
            for _ in range(2 * self.num_workers):
                self._put_indices()
        else:
            if self.worker_init_fn is not None:
                self.worker_init_fn(-1, *self.worker_init_args,
                                    **self.worker_init_kwargs)
Example #10
0
 def _after_prelude(self):
     inputQ = multiprocessing.SimpleQueue()
     outputQ = multiprocessing.SimpleQueue()
     inputP = multiprocessing.Process(target=_queue_read_json_lines,
                                      args=(inputQ, sys.stdin))
     inputP.start()
     outputP = multiprocessing.Process(target=_queue_dump_json_lines,
                                       args=(outputQ, sys.stdout))
     outputP.start()
     for jsIn in _queue_iterate(inputQ):
         jsOut = self.each(jsIn)
         outputQ.put(jsOut)
Example #11
0
    def mpAPrimes(self, limit):
        limit += 1
        cutoffs = [1]
        for i in range(1, self.threads):
            cutoffs.append(int(sqrt(i * (limit**2) / self.threads)))
        cutoffs.append(limit)

        resultQueue = multiprocessing.SimpleQueue()
        printQueue = multiprocessing.SimpleQueue()

        printThread = threading.Thread(target=self.printThread,
                                       args=(printQueue, ),
                                       name='printThread')
        printThread.start()

        for i in range(1, self.threads):
            printQueue.put('cutoff ' + str(i) + ': ' + str(cutoffs[i]))

        jobs = []
        for i in range(self.threads):
            jobs.append(
                multiprocessing.Process(target=self.aprime_process,
                                        args=(cutoffs[i], cutoffs[i + 1],
                                              resultQueue, printQueue),
                                        name='p' + str(i)))

        for job in jobs:
            job.start()

        aPrimeList = []
        for i in range(self.threads):
            aPrimeList += resultQueue.get()
            printQueue.put('got result ' + str(i))
        for job in jobs:
            job.join()

        printQueue.put(aPrimeList)
        aPrimeList.sort()
        antiprimes = []
        most = 0
        for num in aPrimeList:
            factors = self.num_factors(num)
            if factors > most:
                most = factors
                antiprimes.append(num)
                printQueue.put(num)
        printQueue.put(antiprimes)

        printQueue.put('stop')
        printThread.join()

        return antiprimes
Example #12
0
    def start_training(self, feeding_q, gpu_device):
        """ gpu_device = -1 for CPU """
        if self._verbose:
            print("start AE on device:", gpu_device)

        self._exit_msg = self.__hash__()  # not super-safe but safe enough

        self._q = feeding_q
        self._serialized_net_file = mp.SimpleQueue()
        self._eval_score = mp.SimpleQueue()
        self._train_process = mp.Process(target=self._train,
                                         args=(gpu_device, ))
        self._train_process.start()
Example #13
0
    def __init__(self, env, n_actors, extras):
        self.env = env
        self.n_actors = n_actors

        self.proc_objs = []
        for proc_index in range(n_actors):
            in_q = mp.SimpleQueue()  #mp.Queue()
            out_q = mp.SimpleQueue()  #mp.Queue()
            args = (proc_index, env, in_q, out_q, extras)
            proc = mp.Process(target=actor_process_main, args=args)
            self.proc_objs.append((proc, in_q, out_q))
        self._started = False
        self._joined = False
Example #14
0
def main(argv=None):
    log_queue = mp.Queue()
    with launch_logging_thread(log_queue), set_up_logging(log_queue):
        prog_args = parse_args(argv)
        access_queue = mp.SimpleQueue()
        scandir_queue = mp.SimpleQueue()
        access_process_args = prog_args, log_queue, access_queue, scandir_queue
        access_process = mp.Process(target=access_main,
                                    args=access_process_args)
        access_queue.put([prog_args.root_dir])
        access_process.start()
        scandir_main(access_queue, scandir_queue)
        access_process.join()
Example #15
0
    def __init__(self,
                 worker_class,
                 initargs=(),
                 worker_count=None,
                 batch_size=None,
                 wrap_exception=False):
        worker_count = worker_count or os.cpu_count() or 1

        use_fastqueue = True
        if use_fastqueue:
            self._task_queue = FastQueue()
            self._result_queue = FastQueue()
            self._quick_put = self._task_queue.put
            self._quick_get = self._result_queue.get
        else:
            self._task_queue = multiprocessing.SimpleQueue()
            self._result_queue = multiprocessing.SimpleQueue()
            self._quick_put = self._task_queue._writer.send
            self._quick_get = self._result_queue._reader.recv

        self._batch_size = batch_size
        self._callback = None
        self._error_callback = None
        self._listener = None

        main_module = sys.modules['__main__']
        is_profiling = os.path.basename(
            main_module.__file__) in ['profile.py', 'cProfile.py']

        self._pool = []
        for i in range(worker_count):
            worker_params = _WorkerParams(i,
                                          self._task_queue,
                                          self._result_queue,
                                          worker_class,
                                          initargs,
                                          wrap_exception=wrap_exception,
                                          is_profiling=is_profiling)
            w = multiprocessing.Process(target=worker_func,
                                        args=(worker_params, ))
            w.name = w.name.replace('Process', 'PoolWorker')
            w.daemon = True
            w.start()
            self._pool.append(w)

        self._result_handler = threading.Thread(
            target=WorkerPool._handleResults, args=(self, ))
        self._result_handler.daemon = True
        self._result_handler.start()

        self._closed = False
Example #16
0
    def run(self, result):
        # We use SimpleQueues because they are more predictable.
        # The do the necessary IO directly, without using a
        # helper thread.
        result_queue = multiprocessing.SimpleQueue()
        worker_param_queue = multiprocessing.SimpleQueue()

        # Prepopulate the worker param queue with server connection
        # information.
        for server_conn in self.server_conns:
            worker_param_queue.put(server_conn)

        result_thread = threading.Thread(name='test-monitor',
                                         target=monitor_thread,
                                         args=(result_queue, result),
                                         daemon=True)
        result_thread.start()

        initargs = (worker_param_queue, result_queue)

        pool = multiprocessing.Pool(self.num_workers,
                                    initializer=init_worker,
                                    initargs=initargs)

        with pool:
            ar = pool.map_async(_run_test, iter(self.tests), chunksize=1)

            while True:
                try:
                    ar.get(timeout=0.1)
                except multiprocessing.TimeoutError:
                    if self.stop_requested:
                        pool.terminate()
                        break
                    continue
                else:
                    break

            # Post the terminal message to the queue so that
            # test-monitor can stop.
            result_queue.put((None, None, None))

            # Give the test-monitor thread some time to
            # process the queue messages.  If something
            # goes wrong, the thread will be forcibly
            # joined by a timeout.
            result_thread.join(timeout=3)

        return result
def main(args):
    # Build the pipeline.
    procs = []
    # Read from input.
    q_games = mp.SimpleQueue()
    if args:
        prefix = args.pop(0)
        print("Reading from chunkfiles {}".format(prefix))
        procs.append(
            mp.Process(target=disk_fetch_games, args=(q_games, prefix)))
    else:
        print("Reading from MongoDB")
        #procs.append(mp.Process(target=fake_fetch_games, args=(q_games, 20)))
        procs.append(
            mp.Process(target=mongo_fetch_games, args=(q_games, 275000)))
    # Split into train/test
    q_test = mp.SimpleQueue()
    q_train = mp.SimpleQueue()
    procs.append(
        mp.Process(target=split_train_test, args=(q_games, q_train, q_test)))
    # Convert v1 to v2 format and shuffle, writing 8192 moves per chunk.
    q_write_train = mp.SimpleQueue()
    q_write_test = mp.SimpleQueue()
    # Shuffle buffer is ~ 2.2GB of RAM with 2^20 (~1e6) entries. A game is ~500 moves, so
    # there's ~2000 games in the shuffle buffer. Selecting 8k moves gives an expected
    # number of ~4 moves from the same game in a given chunk file.
    #
    # The output files are in parse.py via another 1e6 sized shuffle buffer. At 8192 moves
    # per chunk, there's ~ 128 chunks in the shuffle buffer. With a batch size of 4096,
    # the expected max number of moves from the same game in the batch is < 1.14
    procs.append(
        mp.Process(target=chunk_parser,
                   args=(q_train, q_write_train, 1 << 20, 8192)))
    procs.append(
        mp.Process(target=chunk_parser,
                   args=(q_test, q_write_test, 1 << 16, 8192)))
    # Write to output files
    procs.append(
        mp.Process(target=chunk_writer,
                   args=(q_write_train, NameSrc('train_'))))
    procs.append(
        mp.Process(target=chunk_writer, args=(q_write_test, NameSrc('test_'))))

    # Start all the child processes running.
    for p in procs:
        p.start()
    # Wait for everything to finish.
    for p in procs:
        p.join()
Example #18
0
 def run(self):
     yield messages.StartedMessage.get()
     if self.runnable.config.get("nrunner.max_parallel_tasks", 1) != 1:
         yield messages.FinishedMessage.get(
             'cancel',
             fail_reason="parallel run is not"
             " allowed for vt tests")
     else:
         try:
             queue = multiprocessing.SimpleQueue()
             vt_test = VirtTest(queue, self.runnable)
             process = multiprocessing.Process(target=vt_test.runTest)
             process.start()
             while True:
                 time.sleep(nrunner.RUNNER_RUN_CHECK_INTERVAL)
                 if queue.empty():
                     yield messages.RunningMessage.get()
                 else:
                     message = queue.get()
                     yield message
                     if message.get('status') == 'finished':
                         break
         except Exception:
             yield messages.StderrMessage.get(traceback.format_exc())
             yield messages.FinishedMessage.get('error')
    def train(self, episodes=1000):
        """
        Lower level training orchestration method. Written in the generator style. Intended to be used with "for update in train(...):"
        """

        # create the requested number of background agents and runners:
        worker_agents = self.agent.clone(num = self.workers)
        runners = [ Runner(agent=agent, ix=(ix + 1), train=True) for ix, agent in enumerate(worker_agents) ]

        # we're going to communicate the workers' updates via the thread safe queue:
        queue = mp.SimpleQueue()

        # if we've not been given a number of episodes: assume the process is going to be interrupted with the keyboard interrupt once the user (us) decides so:
        if episodes is None:
            print('Starting out an infinite training process')

        # create the actual background processes, making their entry be the train_one method:
        processes = [ mp.Process(target=self.train_one, args=(runners[ix - 1], queue, episodes, ix)) for ix in range(1, self.workers + 1) ]

        # run those processes:
        for process in processes:
            process.start()

        try:
            # what follows is a rather naive implementation of listening to workers updates. it works though for our purposes:
            while any([ process.is_alive() for process in processes ]):
                results = queue.get()
                yield results
        except Exception as e:
            logger.error(str(e))
Example #20
0
    def run(self):
        if not self.runnable.uri:
            yield {'status': 'error',
                   'output': 'uri is required but was not given'}
            return

        queue = multiprocessing.SimpleQueue()
        process = multiprocessing.Process(target=self._run,
                                          args=(self.runnable.uri, queue))
        time_start = time.time()
        time_start_sent = False
        process.start()

        last_status = None
        while queue.empty():
            time.sleep(RUNNER_RUN_CHECK_INTERVAL)
            now = time.time()
            if last_status is None or now > last_status + RUNNER_RUN_STATUS_INTERVAL:
                last_status = now
                if not time_start_sent:
                    time_start_sent = True
                    yield {'status': 'running',
                           'time_start': time_start}
                yield {'status': 'running'}

        yield queue.get()
Example #21
0
    def run(self):
        if not self.runnable.uri:
            yield {
                'status': 'finished',
                'result': 'error',
                'output': 'uri is required but was not given'
            }
            return

        queue = multiprocessing.SimpleQueue()
        process = multiprocessing.Process(target=self._run,
                                          args=(self.runnable.uri, queue))
        time_start = time.time()
        time_start_sent = False
        process.start()

        most_current_execution_state_time = None
        while queue.empty():
            time.sleep(nrunner.RUNNER_RUN_CHECK_INTERVAL)
            now = time.time()
            if most_current_execution_state_time is not None:
                next_execution_state_mark = (
                    most_current_execution_state_time +
                    nrunner.RUNNER_RUN_STATUS_INTERVAL)
            if (most_current_execution_state_time is None
                    or now > next_execution_state_mark):
                most_current_execution_state_time = now
                if not time_start_sent:
                    time_start_sent = True
                    yield {'status': 'running', 'time_start': time_start}
                yield {'status': 'running'}

        yield queue.get()
Example #22
0
def multi_proc_run(num_proc, fun, fun_args=(), fun_kwargs=None):
    """Runs a function in a multi-proc setting (unless num_proc == 1)."""

    # There is no need for multi-proc in the single-proc case
    fun_kwargs = fun_kwargs if fun_kwargs else {}
    if num_proc == 1:
        fun(*fun_args, **fun_kwargs)
        return

    # Handle errors from training subprocesses
    error_queue = multiprocessing.SimpleQueue()
    error_handler = ErrorHandler(error_queue)

    # Run each training subprocess
    ps = []
    for i in range(num_proc):
        p_i = multiprocessing.Process(target=run,
                                      args=(i, num_proc, error_queue, fun,
                                            fun_args, fun_kwargs))
        ps.append(p_i)
        p_i.start()
        error_handler.add_child(p_i.pid)

    # Wait for each subprocess to finish
    for p in ps:
        p.join()
Example #23
0
def run_amplifiers(filename, phase_settings):
    num_amplifiers = len(phase_settings)
    amplifiers = []
    # initialise buffers with phase settings
    buffers = []
    for idx in range(0,num_amplifiers):
        buf = multiprocessing.SimpleQueue()
        buf.put(phase_settings[idx])
        buffers.append(buf)

    for idx in range(0,num_amplifiers):
        amplifier_name = chr(ord('A') + idx)
        input_buf = buffers[idx]
        if idx == num_amplifiers - 1:
            # output of last amplifier is fed back to first amplifier
            output_buf = buffers[0]
        else:
            output_buf = buffers[idx + 1]
        amplifiers.append(run_amplifier(filename, amplifier_name, input_buf, output_buf))
    # start input
    buffers[0].put(0)
    for a in amplifiers:
        a.join()
    #print("Amplifiers done")
    return buffers[0].get()
Example #24
0
    def test_wrap(self):
        none = ""
        stdout_log = os.path.join(self.test_dir, "stdout.log")
        stderr_log = os.path.join(self.test_dir, "stderr.log")
        redirs = [
            (none, none),
            (none, stderr_log),
            (stdout_log, none),
            (stdout_log, stderr_log),
        ]

        for stdout_redir, stderr_redir in redirs:
            queue = multiprocessing.SimpleQueue()
            _wrap(
                local_rank=0,
                fn=echo1,
                args={0: ("hello", )},
                envs={0: {
                    "RANK": "0"
                }},
                stdout_redirects={0: stdout_redir},
                stderr_redirects={0: stderr_redir},
                ret_vals={0: queue},
            )
            self.assertEqual("hello_0", queue.get())
            if stdout_redir:
                self.assert_in_file(["hello stdout from 0"], stdout_log)
            if stderr_redir:
                self.assert_in_file(["hello stderr from 0"], stderr_log)
Example #25
0
    def run(self):
        if not self.runnable.uri:
            yield messages.FinishedMessage.get('error',
                                               fail_reason='uri is required '
                                                           'but was not given')
            return

        queue = multiprocessing.SimpleQueue()
        process = multiprocessing.Process(target=self._run,
                                          args=(self.runnable.uri, queue))
        process.start()
        yield messages.StartedMessage.get()

        most_current_execution_state_time = None
        while queue.empty():
            time.sleep(nrunner.RUNNER_RUN_CHECK_INTERVAL)
            now = time.monotonic()
            if most_current_execution_state_time is not None:
                next_execution_state_mark = (most_current_execution_state_time +
                                             nrunner.RUNNER_RUN_STATUS_INTERVAL)
            if (most_current_execution_state_time is None or
                    now > next_execution_state_mark):
                most_current_execution_state_time = now
                yield messages.RunningMessage.get()

        status = queue.get()
        yield messages.StdoutMessage.get(status['stdout'])
        yield messages.StderrMessage.get(status['stderr'])
        yield messages.FinishedMessage.get(status['result'])
Example #26
0
    def run(self, runnable):
        # pylint: disable=W0201
        self.runnable = runnable
        file_name, suite_name, test_name = self._uri_to_file_suite_test()
        if not all([file_name, suite_name, test_name]):

            yield messages.FinishedMessage.get('error',
                                               fail_reason='Invalid URI given')
            return

        queue = multiprocessing.SimpleQueue()
        process = multiprocessing.Process(target=self._run,
                                          args=(file_name, suite_name,
                                                test_name, queue))
        process.start()
        yield messages.StartedMessage.get()

        most_current_execution_state_time = None
        while queue.empty():
            time.sleep(RUNNER_RUN_CHECK_INTERVAL)
            now = time.monotonic()
            if most_current_execution_state_time is not None:
                next_execution_state_mark = (
                    most_current_execution_state_time +
                    RUNNER_RUN_STATUS_INTERVAL)
            if (most_current_execution_state_time is None
                    or now > next_execution_state_mark):
                most_current_execution_state_time = now
                yield messages.RunningMessage.get()

        status = queue.get()
        yield messages.StdoutMessage.get(status['stdout'])
        yield messages.StderrMessage.get(status['stderr'])
        yield messages.FinishedMessage.get(status['result'])
Example #27
0
    def run(self):
        if not self.module_class_method:
            error_msg = ("Invalid URI: could not be converted to an unittest "
                         "dotted name.")
            yield self.prepare_status('finished', {'result': 'error',
                                                   'output': error_msg})
            return

        queue = multiprocessing.SimpleQueue()
        process = multiprocessing.Process(target=self._run_unittest,
                                          args=(self.module_path,
                                                self.module_class_method,
                                                queue))
        process.start()
        yield self.prepare_status('started')

        most_current_execution_state_time = None
        while queue.empty():
            time.sleep(RUNNER_RUN_CHECK_INTERVAL)
            now = time.monotonic()
            if most_current_execution_state_time is not None:
                next_execution_state_mark = (most_current_execution_state_time +
                                             RUNNER_RUN_STATUS_INTERVAL)
            if (most_current_execution_state_time is None or
                    now > next_execution_state_mark):
                most_current_execution_state_time = now
                yield self.prepare_status('running')

        status = queue.get()
        yield self.prepare_status('running',
                                  {'type': 'stdout',
                                   'log': status.pop('output').encode()})
        status['time'] = time.monotonic()
        yield status
Example #28
0
        def test_wrap_bad(self):
            none = ""
            stdout_log = os.path.join(self.test_dir, "stdout.log")
            stderr_log = os.path.join(self.test_dir, "stderr.log")
            redirs = [
                (none, none),
                (none, stderr_log),
                (stdout_log, none),
                (stdout_log, stderr_log),
            ]

            for stdout_redir, stderr_redir in redirs:
                queue = multiprocessing.SimpleQueue()
                worker_finished_event_mock = mock.Mock()
                _wrap(
                    local_rank=0,
                    fn=echo1,
                    args={0: ("hello",)},
                    envs={0: {"RANK": "0"}},
                    stdout_redirects={0: stdout_redir},
                    stderr_redirects={0: stderr_redir},
                    ret_vals={0: queue},
                    queue_finished_reading_event=worker_finished_event_mock,
                )
                self.assertEqual("hello_0", queue.get())
                if stdout_redir:
                    self.assert_in_file(["hello stdout from 0"], stdout_log)
                if stderr_redir:
                    self.assert_in_file(["hello stderr from 0"], stderr_log)
                worker_finished_event_mock.wait.assert_called_once()
Example #29
0
    def run(self):
        if not self.runnable.uri:
            error_msg = 'uri is required but was not given'
            yield self.prepare_status('finished', {'result': 'error',
                                                   'output': error_msg})
            return

        queue = multiprocessing.SimpleQueue()
        process = multiprocessing.Process(target=self._run_unittest,
                                          args=(self.runnable.uri, queue))
        process.start()
        yield self.prepare_status('started')

        most_current_execution_state_time = None
        while queue.empty():
            time.sleep(RUNNER_RUN_CHECK_INTERVAL)
            now = time.monotonic()
            if most_current_execution_state_time is not None:
                next_execution_state_mark = (most_current_execution_state_time +
                                             RUNNER_RUN_STATUS_INTERVAL)
            if (most_current_execution_state_time is None or
                    now > next_execution_state_mark):
                most_current_execution_state_time = now
                yield self.prepare_status('running')

        status = queue.get()
        status['time'] = time.monotonic()
        yield status
Example #30
0
    def __init__(self, data_structure, processes, scan_function, init_args,
                 _mp_init_function):
        """ Init the scanner """
        assert isinstance(data_structure, world.DataSet)
        self.data_structure = data_structure
        self.list_files_to_scan = data_structure._get_list()
        self.processes = processes
        self.scan_function = scan_function

        # Queue used by processes to pass results
        self.queue = multiprocessing.SimpleQueue()
        init_args.update({'queue': self.queue})
        # NOTE TO SELF: initargs doesn't handle kwargs, only args!
        # Pass a dict with all the args
        self.pool = multiprocessing.Pool(processes=processes,
                                         initializer=_mp_init_function,
                                         initargs=(init_args, ))

        # Recommended time to sleep between polls for results
        self.SCAN_START_SLEEP_TIME = 0.001
        self.SCAN_MIN_SLEEP_TIME = 1e-6
        self.SCAN_MAX_SLEEP_TIME = 0.1
        self.scan_sleep_time = self.SCAN_START_SLEEP_TIME
        self.queries_without_results = 0
        self.last_time = time()
        self.MIN_QUERY_NUM = 1
        self.MAX_QUERY_NUM = 5

        # Holds a friendly string with the name of the last file scanned
        self._str_last_scanned = None