Exemplo n.º 1
0
def test_context():
    """Test whether context mode works."""
    total = 100
    with jabbar(total=total - 20) as bar:
        for _ in range(total):
            bar.inc()
        assert bar.n_done == total
Exemplo n.º 2
0
    def sample_until_n_accepted(self,
                                n,
                                simulate_one,
                                t,
                                *,
                                max_eval=np.inf,
                                all_accepted=False,
                                ana_vars=None):
        nr_simulations = 0
        sample = self._create_empty_sample()

        for _ in jabbar(range(n), enable=self.show_progress, keep=False):
            while True:
                if self.check_max_eval and nr_simulations >= max_eval:
                    break
                new_sim = simulate_one()
                sample.append(new_sim)
                nr_simulations += 1
                if new_sim.accepted:
                    break
        self.nr_evaluations_ = nr_simulations

        if sample.n_accepted < n:
            sample.ok = False

        return sample
Exemplo n.º 3
0
def test_manual():
    """Test whether manual setup mode works."""
    total = 50
    bar = jabbar(total=total)
    for _ in range(total):
        bar.inc()
    bar.finish()
    assert bar.n_done == total
    def sample_until_n_accepted(
            self, n, simulate_one, max_eval=np.inf, all_accepted=False):
        n_eval = Value(c_longlong)
        n_eval.value = 0

        n_acc = Value(c_longlong)
        n_acc.value = 0

        queue = Queue()

        # wrap arguments
        if self.pickle:
            simulate_one = pickle.dumps(simulate_one)
        args = (simulate_one, queue,
                n_eval, n_acc, n,
                self.check_max_eval, max_eval, all_accepted,
                self._create_empty_sample)

        processes = [Process(target=work, args=args, daemon=self.daemon)
                     for _ in range(self.n_procs)]

        for proc in processes:
            proc.start()

        id_results = []

        # make sure all results are collected
        # and the queue is emptied to prevent deadlocks
        n_done = 0
        with jabbar(total=n, enable=self.show_progress, keep=False) as bar:
            while n_done < len(processes):
                val = get_if_worker_healthy(processes, queue)
                if val == DONE:
                    n_done += 1
                else:
                    id_results.append(val)
                    bar.inc()

        for proc in processes:
            proc.join()

        # avoid bias toward short running evaluations
        id_results.sort(key=lambda x: x[0])
        id_results = id_results[:min(len(id_results), n)]

        self.nr_evaluations_ = n_eval.value

        results = [res[1] for res in id_results]

        # create 1 to-be-returned sample from results
        sample = self._create_empty_sample()
        for result in results:
            sample += result

        if sample.n_accepted < n:
            sample.ok = False

        return sample
Exemplo n.º 5
0
    def sample_until_n_accepted(
            self, n, simulate_one, t, *,
            max_eval=np.inf, all_accepted=False, ana_vars=None):
        # starting more than n jobs
        # does not help in this parallelization scheme
        n_procs = min(n, self.n_procs)
        logger.debug("Start sampling on {} cores ({} requested)"
                     .format(n_procs, self.n_procs))
        feed_q = Queue()
        result_q = Queue()

        feed_process = Process(target=feed, args=(feed_q, n,
                                                  n_procs))

        single_core_sampler = SingleCoreSampler(
            check_max_eval=self.check_max_eval)
        # the max_eval handling in this sampler is certainly not optimal
        single_core_sampler.sample_factory = self.sample_factory

        # wrap arguments
        if self.pickle:
            simulate_one = pickle.dumps(simulate_one)
        args = (feed_q, result_q, simulate_one, max_eval, single_core_sampler)

        worker_processes = [Process(target=work, args=args)
                            for _ in range(n_procs)]

        for proc in worker_processes:
            proc.start()

        feed_process.start()

        collected_results = []

        for _ in jabbar(range(n), enable=self.show_progress, keep=False):
            res = get_if_worker_healthy(worker_processes, result_q)
            collected_results.append(res)

        feed_process.join()

        for proc in worker_processes:
            proc.join()

        # Queues get closed automatically on garbage collection
        # No explicit closing necessary.

        results, evaluations = zip(*collected_results)
        self.nr_evaluations_ = sum(evaluations)

        # create 1 to-be-returned sample from results
        sample = self._create_empty_sample()
        for result in results:
            sample += result

        if sample.n_accepted < n:
            sample.ok = False

        return sample
Exemplo n.º 6
0
def test_output():
    """Test the actual output."""
    total = 50
    bar = jabbar(total=total)
    for _ in range(20):
        bar.inc()
    out = bar.get_line()
    assert out == "\r 40% |█████████▋              | 20/50 "
    assert bar.n_done == 20
    bar.finish()
Exemplo n.º 7
0
def test_output_custom_symbols():
    """Test the output with custom symbols."""
    total = 50
    bar = jabbar(total=total, symbols="xo")
    for _ in range(17):
        bar.inc()
    out = bar.get_line()
    assert out == "\r 34% |xoxoxoxo                | 17/50 "
    assert bar.n_done == 17
    bar.finish()
Exemplo n.º 8
0
    def sample_until_n_accepted(
        self,
        n,
        simulate_one,
        t,
        *,
        max_eval=np.inf,
        all_accepted=False,
        ana_vars=None,
    ):
        # get the analysis id
        ana_id = self.analysis_id

        # tell workers to start
        self.start_generation_t(n=n, t=t, simulate_one=simulate_one)

        # collect samples
        samples = []
        with jabbar(total=n, enable=self.show_progress, keep=False) as bar:
            while len(samples) < n:
                dump = self.redis.blpop(idfy(QUEUE, ana_id, t))[1]
                sample = pickle.loads(dump)
                if len(sample.accepted_particles) != 1:
                    # this should never happen
                    raise AssertionError(
                        "Expected exactly one accepted particle in sample.")
                samples.append(sample)
                bar.inc()

        # wait for all workers to join
        #  this is necessary for clear intermediate states
        while int(self.redis.get(idfy(N_WORKER, ana_id, t)).decode()) > 0:
            sleep(SLEEP_TIME)

        # set total number of evaluations
        self.nr_evaluations_ = int(
            self.redis.get(idfy(N_EVAL, ana_id, t)).decode())

        # remove all time-specific variables
        self.clear_generation_t(t)

        # create a single sample result, with start time correction
        sample = self.create_sample(samples, n)

        return sample
Exemplo n.º 9
0
    def sample_until_n_accepted(self,
                                n,
                                simulate_one,
                                max_eval=np.inf,
                                all_accepted=False):
        # open pipeline
        pipeline = self.redis.pipeline()

        # write initial values to pipeline
        self.redis.set(SSA,
                       cloudpickle.dumps((simulate_one, self.sample_factory)))
        pipeline.set(N_EVAL, 0)
        pipeline.set(N_ACC, 0)
        pipeline.set(N_REQ, n)
        pipeline.set(ALL_ACCEPTED, int(all_accepted))  # encode as int
        pipeline.set(N_WORKER, 0)
        pipeline.set(BATCH_SIZE, self.batch_size)
        # delete previous results
        pipeline.delete(QUEUE)
        # execute all commands
        pipeline.execute()

        id_results = []

        # publish start message
        self.redis.publish(MSG, START)

        # wait until n acceptances
        with jabbar(total=n, enable=self.show_progress, keep=False) as bar:
            while len(id_results) < n:
                # pop result from queue, block until one is available
                dump = self.redis.blpop(QUEUE)[1]
                # extract pickled object
                particle_with_id = pickle.loads(dump)
                # append to collected results
                id_results.append(particle_with_id)
                bar.inc()

        # wait until all workers done
        while int(self.redis.get(N_WORKER).decode()) > 0:
            sleep(SLEEP_TIME)

        # make sure all results are collected
        while self.redis.llen(QUEUE) > 0:
            id_results.append(pickle.loads(self.redis.blpop(QUEUE)[1]))

        # set total number of evaluations
        self.nr_evaluations_ = int(self.redis.get(N_EVAL).decode())

        # delete keys from pipeline
        pipeline = self.redis.pipeline()
        pipeline.delete(SSA)
        pipeline.delete(N_EVAL)
        pipeline.delete(N_ACC)
        pipeline.delete(N_REQ)
        pipeline.delete(ALL_ACCEPTED)
        pipeline.delete(BATCH_SIZE)
        pipeline.execute()

        # avoid bias toward short running evaluations (for
        # dynamic scheduling)
        id_results.sort(key=lambda x: x[0])
        id_results = id_results[:n]

        results = [res[1] for res in id_results]

        # create 1 to-be-returned sample from results
        sample = self._create_empty_sample()
        for j in range(n):
            sample += results[j]

        return sample
Exemplo n.º 10
0
def test_no_iterable():
    """Test failure when using iterable mode without iterable."""
    with pytest.raises(ValueError):
        for _ in jabbar(total=10):
            pass
Exemplo n.º 11
0
    def sample_until_n_accepted(self,
                                n,
                                simulate_one,
                                t,
                                *,
                                max_eval=np.inf,
                                all_accepted=False,
                                ana_vars=None):
        # get the analysis id
        ana_id = self.analysis_id

        def get_int(var: str):
            """Convenience function to read an int variable."""
            return int(self.redis.get(idfy(var, ana_id, t)).decode())

        if self.generation_t_was_started(t):
            # update the SSA function
            self.redis.set(idfy(SSA, ana_id, t),
                           pickle.dumps((simulate_one, self.sample_factory)))
            # update the required population size
            self.redis.set(idfy(N_REQ, ana_id, t), n)
            # let the workers know they should update their ssa
            self.redis.set(idfy(IS_LOOK_AHEAD, ana_id, t), int(False))
            # it can happen that the population size increased, but the workers
            #  believe they are done already
            if get_int(N_WORKER) == 0 and get_int(N_ACC) < get_int(N_REQ):
                # send the start signal again
                self.redis.publish(MSG, START)
        else:
            # set up all variables for the new generation
            self.start_generation_t(n=n,
                                    t=t,
                                    simulate_one=simulate_one,
                                    all_accepted=all_accepted,
                                    is_look_ahead=False)

        # for the results
        id_results = []
        # reset logging counters
        self.logger.reset_counters()

        # wait until n acceptances
        with jabbar(total=n, enable=self.show_progress, keep=False) as bar:
            while len(id_results) < n:
                # pop result from queue, block until one is available
                dump = self.redis.blpop(idfy(QUEUE, ana_id, t))[1]
                # extract pickled object
                sample_with_id = pickle.loads(dump)

                # check whether the sample is really acceptable
                sample_with_id, any_particle_accepted = \
                    post_check_acceptance(
                        sample_with_id, ana_id=ana_id, t=t, redis=self.redis,
                        ana_vars=ana_vars, logger=self.logger)

                if any_particle_accepted:
                    # append to collected results
                    id_results.append(sample_with_id)
                    bar.update(len(id_results))

        # log active set
        _log_active_set(redis=self.redis,
                        ana_id=ana_id,
                        t=t,
                        id_results=id_results,
                        batch_size=self.batch_size)

        # maybe head-start the next generation already
        self.maybe_start_next_generation(t=t,
                                         n=n,
                                         id_results=id_results,
                                         all_accepted=all_accepted,
                                         ana_vars=ana_vars)

        # wait until all relevant simulations done
        if self.wait_for_all_samples:
            while get_int(N_WORKER) > 0:
                sleep(SLEEP_TIME)
        else:
            max_ix = max(id_result[0] for id_result in id_results)
            while (
                    # check whether any active evaluation was started earlier
                    any(ix <= max_ix for ix in get_active_set(
                        redis=self.redis, ana_id=ana_id, t=t))
                    # also stop if no worker is active, useful for server resets
                    and get_int(N_WORKER) > 0):
                sleep(SLEEP_TIME)

        # collect all remaining results in queue at this point
        while self.redis.llen(idfy(QUEUE, ana_id, t)) > 0:
            # pop result from queue, block until one is available
            dump = self.redis.blpop(idfy(QUEUE, ana_id, t))[1]
            # extract pickled object
            sample_with_id = pickle.loads(dump)

            # check whether the sample is really acceptable
            sample_with_id, any_particle_accepted = \
                post_check_acceptance(
                    sample_with_id, ana_id=ana_id, t=t, redis=self.redis,
                    ana_vars=ana_vars, logger=self.logger)

            if any_particle_accepted:
                # append to collected results
                id_results.append(sample_with_id)

        # set total number of evaluations
        self.nr_evaluations_ = get_int(N_EVAL)
        n_lookahead_eval = get_int(N_LOOKAHEAD_EVAL)

        # remove all time-specific variables if no more active workers,
        #  also for previous generations
        for _t in range(-1, t + 1):
            n_worker_b = self.redis.get(idfy(N_WORKER, ana_id, _t))
            if n_worker_b is not None and int(n_worker_b.decode()) == 0:
                self.clear_generation_t(t=_t)

        # create a single sample result, with start time correction
        sample = self.create_sample(id_results, n)

        # logging
        self.logger.add_row(t=t,
                            n_evaluated=self.nr_evaluations_,
                            n_lookahead=n_lookahead_eval)
        self.logger.write()

        # weight samples correctly
        sample = self_normalize_within_subpopulations(sample, n)

        return sample
Exemplo n.º 12
0
    def sample_until_n_accepted(
        self,
        n,
        simulate_one,
        t,
        *,
        max_eval=np.inf,
        all_accepted=False,
        ana_vars=None,
    ) -> Sample:
        # get the analysis id
        ana_id = self.analysis_id

        def get_int(var: str):
            """Convenience function to read an int variable."""
            return int(self.redis.get(idfy(var, ana_id, t)).decode())

        if self.generation_t_was_started(t):
            # update the SSA function
            self.redis.set(
                idfy(SSA, ana_id, t),
                pickle.dumps((simulate_one, self.sample_factory)),
            )
            # update the required population size
            self.redis.set(idfy(N_REQ, ana_id, t), n)
            # let the workers know they should update their ssa
            self.redis.set(idfy(IS_LOOK_AHEAD, ana_id, t), int(False))
            # it can happen that the population size increased, but the workers
            #  believe they are done already
            if get_int(N_WORKER) == 0 and get_int(N_ACC) < get_int(N_REQ):
                # send the start signal again
                self.redis.publish(MSG, START)
        else:
            # set up all variables for the new generation
            self.start_generation_t(
                n=n,
                t=t,
                simulate_one=simulate_one,
                all_accepted=all_accepted,
                is_look_ahead=False,
            )

        # for the results
        id_results = []
        # reset logging counters
        self.logger.reset_counters()

        # wait until n acceptances
        with jabbar(total=n, enable=self.show_progress, keep=False) as bar:
            while len(id_results) < n:
                # pop result from queue, block until one is available
                dump = self.redis.blpop(idfy(QUEUE, ana_id, t))[1]
                # extract pickled object
                sample_with_id = pickle.loads(dump)

                # check whether the sample is really acceptable
                sample_with_id, any_particle_accepted = post_check_acceptance(
                    sample_with_id,
                    ana_id=ana_id,
                    t=t,
                    redis=self.redis,
                    ana_vars=ana_vars,
                    logger=self.logger,
                )

                if any_particle_accepted:
                    # append to collected results
                    id_results.append(sample_with_id)
                    bar.update(len(id_results))

        # maybe head-start the next generation already
        self.maybe_start_next_generation(
            t=t,
            n=n,
            id_results=id_results,
            all_accepted=all_accepted,
            ana_vars=ana_vars,
        )

        # wait until all relevant simulations done
        if self.wait_for_all_samples:
            while get_int(N_WORKER) > 0:
                sleep(SLEEP_TIME)
        else:
            # we only need to wait for simulations that were started
            #  before the last started one among the first n accepted ones
            #  as later once would be discarded anyway
            max_ix = sorted(id_result[0] for id_result in id_results)[n - 1]
            # first time index is 1
            missing_ixs = set(range(1, max_ix + 1))
            while (
                    # check whether any active evaluation was started earlier
                    missing_ixs
                    # also stop if no worker is active, useful for server resets
                    and get_int(N_WORKER) > 0):
                # extract done indices
                # use a pipeline for efficient retrieval
                # transactions are atomic
                _var = idfy(DONE_IXS, ana_id, t)
                with self.redis.pipeline(transaction=True) as p:
                    p.lrange(_var, 0, -1).delete(_var)
                    vals = p.execute()[0]

                # check if missing list can be reduced
                for val in vals:
                    done_ix = int(val.decode())
                    # remove done ix from missing ix list
                    if done_ix in missing_ixs:
                        missing_ixs.discard(done_ix)

                sleep(SLEEP_TIME)

        # collect all remaining results in queue at this point
        while self.redis.llen(idfy(QUEUE, ana_id, t)) > 0:
            # pop result from queue, block until one is available
            dump = self.redis.blpop(idfy(QUEUE, ana_id, t))[1]
            # extract pickled object
            sample_with_id = pickle.loads(dump)

            # check whether the sample is really acceptable
            sample_with_id, any_particle_accepted = post_check_acceptance(
                sample_with_id,
                ana_id=ana_id,
                t=t,
                redis=self.redis,
                ana_vars=ana_vars,
                logger=self.logger,
            )

            if any_particle_accepted:
                # append to collected results
                id_results.append(sample_with_id)

        # set total number of evaluations
        self.nr_evaluations_ = get_int(N_EVAL)
        n_lookahead_eval = get_int(N_LOOKAHEAD_EVAL)

        # remove all time-specific variables if no more active workers,
        #  also for previous generations
        if self.wait_for_all_samples:
            self.clear_generation_t(t=t)
        else:
            for _t in range(-1, t + 1):
                n_worker_b = self.redis.get(idfy(N_WORKER, ana_id, _t))
                if n_worker_b is not None and int(n_worker_b.decode()) == 0:
                    # TODO For fast-running models, communication does not
                    #  always work.
                    # Until that is fixed, simply do not clear up.
                    # self.clear_generation_t(t=_t)
                    pass

        # create a single sample result, with start time correction
        sample = self.create_sample(id_results, n)

        # logging
        self.logger.add_row(t=t,
                            n_evaluated=self.nr_evaluations_,
                            n_lookahead=n_lookahead_eval)
        self.logger.write()

        # weight sub-populations suitably
        sample = self_normalize_within_subpopulations(sample, n)

        return sample
Exemplo n.º 13
0
def test_no_total():
    """Test failure when no total is available."""
    with pytest.raises(ValueError):
        jabbar()
Exemplo n.º 14
0
def test_custom_symbols():
    """Test passing custom bar symbols."""
    for _ in jabbar(range(100), symbols="yo"):
        pass
Exemplo n.º 15
0
def test_keep():
    """Test the keep functionality."""
    for _ in jabbar(range(100), keep=False):
        pass
Exemplo n.º 16
0
def test_enable():
    """Test enabling functionality."""
    for _ in jabbar(range(100), enable=False):
        pass
Exemplo n.º 17
0
def test_file():
    """Test output to file."""
    with tempfile.NamedTemporaryFile(mode="w+") as file:
        for _ in jabbar(range(100), file=file):
            pass
Exemplo n.º 18
0
def test_stderr():
    """Test output to stderr."""
    for _ in jabbar(range(100), file=sys.stderr):
        pass
Exemplo n.º 19
0
    def sample_until_n_accepted(self,
                                n,
                                simulate_one,
                                t,
                                *,
                                max_eval=np.inf,
                                all_accepted=False,
                                ana_vars=None):
        # get the analysis id
        ana_id = self.analysis_id

        def get_int(var: str):
            """Convenience function to read an int variable."""
            return int(self.redis.get(idfy(var, ana_id, t)).decode())

        if self.generation_t_was_started(t):
            # update the SSA function
            self.redis.set(
                idfy(SSA, ana_id, t),
                cloudpickle.dumps((simulate_one, self.sample_factory)))
            # update the required population size
            self.redis.set(idfy(N_REQ, ana_id, t), n)
            # let the workers know they should update their ssa
            self.redis.set(idfy(IS_LOOK_AHEAD, ana_id, t), int(False))
            # it can happen that the population size increased, but the workers
            #  believe they are done already
            if get_int(N_WORKER) == 0 and get_int(N_ACC) < get_int(N_REQ):
                # send the start signal again
                self.redis.publish(MSG, START)
        else:
            # set up all variables for the new generation
            self.start_generation_t(n=n,
                                    t=t,
                                    simulate_one=simulate_one,
                                    all_accepted=all_accepted,
                                    is_look_ahead=False)

        # for the results
        id_results = []
        # reset logging counters
        self.logger.reset_counters()

        # wait until n acceptances
        with jabbar(total=n, enable=self.show_progress, keep=False) as bar:
            while len(id_results) < n:
                # pop result from queue, block until one is available
                dump = self.redis.blpop(idfy(QUEUE, ana_id, t))[1]
                # extract pickled object
                sample_with_id = pickle.loads(dump)

                # check whether the sample is really acceptable
                sample_with_id, any_particle_accepted = \
                    post_check_acceptance(
                        sample_with_id, ana_id=ana_id, t=t, redis=self.redis,
                        ana_vars=ana_vars, logger=self.logger)

                if any_particle_accepted:
                    # append to collected results
                    id_results.append(sample_with_id)
                    bar.update(len(id_results))

        # maybe head-start the next generation already
        self.maybe_start_next_generation(t=t,
                                         n=n,
                                         id_results=id_results,
                                         all_accepted=all_accepted,
                                         ana_vars=ana_vars)

        # wait until all workers done
        while int(self.redis.get(idfy(N_WORKER, ana_id, t)).decode()) > 0:
            sleep(SLEEP_TIME)

        # make sure all results are collected
        while self.redis.llen(idfy(QUEUE, ana_id, t)) > 0:
            # pop result from queue, block until one is available
            dump = self.redis.blpop(idfy(QUEUE, ana_id, t))[1]
            # extract pickled object
            sample_with_id = pickle.loads(dump)

            # check whether the sample is really acceptable
            sample_with_id, any_particle_accepted = \
                post_check_acceptance(
                    sample_with_id, ana_id=ana_id, t=t, redis=self.redis,
                    ana_vars=ana_vars, logger=self.logger)

            if any_particle_accepted:
                # append to collected results
                id_results.append(sample_with_id)

        # set total number of evaluations
        self.nr_evaluations_ = int(
            self.redis.get(idfy(N_EVAL, ana_id, t)).decode())

        # remove all time-specific variables
        self.clear_generation_t(t)

        # create a single sample result, with start time correction
        sample = self.create_sample(id_results, n)

        # logging
        self.logger.add_row(t=t,
                            n_evaluated=self.nr_evaluations_,
                            n_accepted=sample.n_accepted)
        self.logger.write()

        return sample
Exemplo n.º 20
0
def test_iter():
    """Test whether iter mode works."""
    for _ in jabbar(range(100)):
        pass