Example #1
0
        def work(self):
            bandit = self.bandit
            random_algo = Random(bandit)
            # build an experiment of 10 trials
            trials = Trials()
            exp = Experiment(trials, random_algo)
            #print random_algo.s_specs_idxs_vals
            exp.run(10)
            ids = trials.tids
            assert len(ids) == 10
            tpe_algo = TreeParzenEstimator(bandit)
            #print pyll.as_apply(tpe_algo.post_idxs)
            #print pyll.as_apply(tpe_algo.post_vals)
            argmemo = {}

            print trials.miscs
            idxs, vals = miscs_to_idxs_vals(trials.miscs)
            argmemo[tpe_algo.observed['idxs']] = idxs
            argmemo[tpe_algo.observed['vals']] = vals
            argmemo[tpe_algo.observed_loss['idxs']] = trials.tids
            argmemo[tpe_algo.observed_loss['vals']] = trials.losses()
            stuff = pyll.rec_eval(
                [tpe_algo.post_below['idxs'], tpe_algo.post_below['vals']],
                memo=argmemo)
            print stuff
Example #2
0
        def work(self):
            bandit = self.bandit
            random_algo = Random(bandit)
            # build an experiment of 10 trials
            trials = Trials()
            exp = Experiment(trials, random_algo)
            #print random_algo.s_specs_idxs_vals
            exp.run(10)
            ids = trials.tids
            assert len(ids) == 10
            tpe_algo = TreeParzenEstimator(bandit)
            #print pyll.as_apply(tpe_algo.post_idxs)
            #print pyll.as_apply(tpe_algo.post_vals)
            argmemo = {}

            print trials.miscs
            idxs, vals = miscs_to_idxs_vals(trials.miscs)
            argmemo[tpe_algo.observed['idxs']] = idxs
            argmemo[tpe_algo.observed['vals']] = vals
            argmemo[tpe_algo.observed_loss['idxs']] = trials.tids
            argmemo[tpe_algo.observed_loss['vals']] = trials.losses()
            stuff = pyll.rec_eval([tpe_algo.post_below['idxs'],
                        tpe_algo.post_below['vals']],
                        memo=argmemo)
            print stuff
Example #3
0
def test_distributions():
    # test that the distributions come out right

    # XXX: test more distributions
    bandit = base.Bandit({
        'loss':
        hp_loguniform('lu', -2, 2) +
        hp_qloguniform('qlu', np.log(1 + 0.01), np.log(20), 2) +
        hp_quniform('qu', -4.999, 5, 1) + hp_uniform('u', 0, 10)
    })
    algo = base.Random(bandit)
    trials = base.Trials()
    exp = base.Experiment(trials, algo)
    exp.catch_bandit_exceptions = False
    N = 1000
    exp.run(N)
    assert len(trials) == N
    idxs, vals = base.miscs_to_idxs_vals(trials.miscs)
    print idxs.keys()

    COUNTMAX = 130
    COUNTMIN = 70

    # -- loguniform
    log_lu = np.log(vals['lu'])
    assert len(log_lu) == N
    assert -2 < np.min(log_lu)
    assert np.max(log_lu) < 2
    h = np.histogram(log_lu)[0]
    print h
    assert np.all(COUNTMIN < h)
    assert np.all(h < COUNTMAX)

    # -- quantized log uniform
    qlu = vals['qlu']
    assert np.all(np.fmod(qlu, 2) == 0)
    assert np.min(qlu) == 2
    assert np.max(qlu) == 20
    bc_qlu = np.bincount(qlu)
    assert bc_qlu[2] > bc_qlu[4] > bc_qlu[6] > bc_qlu[8]

    # -- quantized uniform
    qu = vals['qu']
    assert np.min(qu) == -5
    assert np.max(qu) == 5
    assert np.all(np.fmod(qu, 1) == 0)
    bc_qu = np.bincount(np.asarray(qu).astype('int') + 5)
    assert np.all(40 < bc_qu), bc_qu  # XXX: how to get the distribution flat
    # with new rounding rule?
    assert np.all(bc_qu < 125), bc_qu
    assert np.all(bc_qu < COUNTMAX)

    # -- uniform
    u = vals['u']
    assert np.min(u) > 0
    assert np.max(u) < 10
    h = np.histogram(u)[0]
    print h
    assert np.all(COUNTMIN < h)
    assert np.all(h < COUNTMAX)
def notest_opt_qn_normal(f=hp_normal):
    bandit = Bandit(
        {'loss': scope.sum([f('v%i' % ii, 0, 1) for ii in range(25)])**2},
        loss_target=0)
    algo = TreeParzenEstimator(bandit,
                               prior_weight=.5,
                               n_startup_jobs=0,
                               n_EI_candidates=1,
                               gamma=0.15)
    trials = Trials()
    experiment = Experiment(trials, algo, async=False)
    experiment.max_queue_len = 1
    experiment.run(40)
    print 'sorted losses:', list(sorted(trials.losses()))

    idxs, vals = miscs_to_idxs_vals(trials.miscs)

    if 1:
        import hyperopt.plotting
        hyperopt.plotting.main_plot_vars(trials, bandit, do_show=1)
    else:
        import matplotlib.pyplot as plt
        begin = [v[:10] for k, v in vals.items()]
        end = [v[-10:] for k, v in vals.items()]
        plt.subplot(2, 1, 1)
        plt.title('before')
        plt.hist(np.asarray(begin).flatten())
        plt.subplot(2, 1, 2)
        plt.title('after')
        plt.hist(np.asarray(end).flatten())
        plt.show()
Example #5
0
def notest_opt_qn_normal(f=hp_normal):
    bandit = Bandit(
            {'loss': scope.sum([f('v%i' % ii, 0, 1)
                for ii in range(25)]) ** 2},
            loss_target=0)
    algo = TreeParzenEstimator(bandit,
            prior_weight=.5,
            n_startup_jobs=0,
            n_EI_candidates=1,
            gamma=0.15)
    trials = Trials()
    experiment = Experiment(trials, algo, async=False)
    experiment.max_queue_len = 1
    experiment.run(40)
    print 'sorted losses:', list(sorted(trials.losses()))

    idxs, vals = miscs_to_idxs_vals(trials.miscs)

    if 1:
        import hyperopt.plotting
        hyperopt.plotting.main_plot_vars(trials, bandit, do_show=1)
    else:
        import matplotlib.pyplot as plt
        begin = [v[:10] for k, v in vals.items()]
        end = [v[-10:] for k, v in vals.items()]
        plt.subplot(2, 1, 1)
        plt.title('before')
        plt.hist(np.asarray(begin).flatten())
        plt.subplot(2, 1, 2)
        plt.title('after')
        plt.hist(np.asarray(end).flatten())
        plt.show()
Example #6
0
def test_distributions():
    # test that the distributions come out right

    # XXX: test more distributions
    bandit = base.Bandit({
                'loss': hp_loguniform('lu', -2, 2) +
                    hp_qloguniform('qlu', np.log(1 + 0.01), np.log(20), 2) +
                    hp_quniform('qu', -4.999, 5, 1) +
                    hp_uniform('u', 0, 10)})
    algo = base.Random(bandit)
    trials = base.Trials()
    exp = base.Experiment(trials, algo)
    exp.catch_bandit_exceptions = False
    N = 1000
    exp.run(N)
    assert len(trials) == N
    idxs, vals = base.miscs_to_idxs_vals(trials.miscs)
    print idxs.keys()

    COUNTMAX = 130
    COUNTMIN = 70

    # -- loguniform
    log_lu = np.log(vals['lu'])
    assert len(log_lu) == N
    assert -2 < np.min(log_lu)
    assert np.max(log_lu) < 2
    h = np.histogram(log_lu)[0]
    print h
    assert np.all(COUNTMIN < h)
    assert np.all(h < COUNTMAX)

    # -- quantized log uniform
    qlu = vals['qlu']
    assert np.all(np.fmod(qlu, 2) == 0)
    assert np.min(qlu) == 2
    assert np.max(qlu) == 20
    bc_qlu = np.bincount(qlu)
    assert bc_qlu[2] > bc_qlu[4] > bc_qlu[6] > bc_qlu[8]

    # -- quantized uniform
    qu = vals['qu']
    assert np.min(qu) == -5
    assert np.max(qu) == 5
    assert np.all(np.fmod(qu, 1) == 0)
    bc_qu = np.bincount(np.asarray(qu).astype('int') + 5)
    assert np.all(40 < bc_qu), bc_qu  # XXX: how to get the distribution flat
    # with new rounding rule?
    assert np.all(bc_qu < 125), bc_qu
    assert np.all(bc_qu < COUNTMAX)

    # -- uniform
    u = vals['u']
    assert np.min(u) > 0
    assert np.max(u) < 10
    h = np.histogram(u)[0]
    print h
    assert np.all(COUNTMIN < h)
    assert np.all(h < COUNTMAX)
Example #7
0
 def test_suggest_1(self):
     docs = self.algo.suggest([0], Trials())
     assert len(docs) == 1
     # -- assert validity of docs
     trials = trials_from_docs(docs)
     assert docs[0]['misc']['idxs']['node_4'] == [0]
     idxs, vals = miscs_to_idxs_vals(trials.miscs)
     assert idxs['node_4'] == [0]
Example #8
0
    def test_seeding(self):
        # -- assert that the seeding works a particular way

        domain = coin_flip()
        docs = rand.suggest(range(10), domain, Trials(), seed=123)
        trials = trials_from_docs(docs)
        idxs, vals = miscs_to_idxs_vals(trials.miscs)

        # Passes Nov 8 / 2013 
        self.assertEqual(list(idxs['flip']), range(10))
        self.assertEqual(list(vals['flip']), [0, 1, 0, 0, 0, 0, 0, 1, 1, 0])
Example #9
0
 def test_suggest_1(self):
     print 'EXPR', self.bandit.expr
     docs = self.algo.suggest([0], Trials())
     assert len(docs) == 1
     print 'DOCS', docs
     # -- assert validity of docs
     trials = trials_from_docs(docs)
     print 'TRIALS', trials
     assert docs[0]['misc']['idxs']['flip'] == [0]
     idxs, vals = miscs_to_idxs_vals(trials.miscs)
     assert idxs['flip'] == [0]
Example #10
0
    def test_seeding(self):
        # -- assert that the seeding works a particular way

        domain = coin_flip()
        docs = rand.suggest(list(range(10)), domain, Trials(), seed=123)
        trials = trials_from_docs(docs)
        idxs, vals = miscs_to_idxs_vals(trials.miscs)

        # Passes Nov 8 / 2013
        self.assertEqual(list(idxs["flip"]), list(range(10)))
        self.assertEqual(list(vals["flip"]), [0, 1, 0, 0, 0, 0, 0, 1, 1, 0])
Example #11
0
 def test_arbitrary_range(self):
     new_ids = [-2, 0, 7, 'a', '007']
     docs = self.algo.suggest(new_ids, Trials())
     # -- assert validity of docs
     trials = trials_from_docs(docs)
     idxs, vals = miscs_to_idxs_vals(trials.miscs)
     assert len(docs) == 5
     assert len(idxs) == 1
     assert len(vals) == 1
     print vals
     assert idxs['flip'] == new_ids
     assert np.all(vals['flip'] == [0, 1, 0, 1, 1])
Example #12
0
 def test_arbitrary_range(self):
     new_ids = [-2, 0, 7, 'a', '007']
     docs = self.algo.suggest(new_ids, Trials())
     # -- assert validity of docs
     trials = trials_from_docs(docs)
     idxs, vals = miscs_to_idxs_vals(trials.miscs)
     assert len(docs) == 5
     assert len(idxs) == 1
     assert len(vals) == 1
     print vals
     assert idxs['node_4'] == new_ids
     assert np.all(vals['node_4'] == [0, 1, 0, 1, 1])
Example #13
0
 def test_suggest_5(self):
     docs = self.algo.suggest(range(5), Trials())
     print docs
     assert len(docs) == 5
     # -- assert validity of docs
     trials = trials_from_docs(docs)
     idxs, vals = miscs_to_idxs_vals(trials.miscs)
     print idxs
     print vals
     assert len(idxs) == 1
     assert len(vals) == 1
     assert idxs['node_4'] == range(5)
     assert np.all(vals['node_4'] == [1, 1, 0, 1, 0])
Example #14
0
 def test_suggest_5(self):
     docs = self.algo.suggest(range(5), Trials())
     print docs
     assert len(docs) == 5
     # -- assert validity of docs
     trials = trials_from_docs(docs)
     idxs, vals = miscs_to_idxs_vals(trials.miscs)
     print idxs
     print vals
     assert len(idxs) == 1
     assert len(vals) == 1
     assert idxs['flip'] == range(5)
     assert np.all(vals['flip'] == [1, 1, 0, 1, 0])
Example #15
0
    def test_arbitrary_range(self, N=10):
        assert N <= 10
        new_ids = [-2, 0, 7, 'a', '007', 66, 'a3', '899', 23, 2333][:N]
        docs = self.algo.suggest(new_ids, Trials())
        # -- assert validity of docs
        trials = trials_from_docs(docs)
        idxs, vals = miscs_to_idxs_vals(trials.miscs)
        assert len(docs) == N
        assert len(idxs) == 1
        assert len(vals) == 1
        print vals
        assert idxs['flip'] == new_ids

        # -- assert that the random seed matches that of Jan 8/2013
        assert np.all(vals['flip'] == [0, 1, 0, 0, 0, 0, 0, 1, 1, 0][:N])
Example #16
0
 def test_suggest_N(self, N=10):
     assert N <= 10
     docs = self.algo.suggest(range(N), Trials())
     print 'docs', docs
     assert len(docs) == N
     # -- assert validity of docs
     trials = trials_from_docs(docs)
     idxs, vals = miscs_to_idxs_vals(trials.miscs)
     print 'idxs', idxs
     print 'vals', vals
     assert len(idxs) == 1
     assert len(vals) == 1
     assert idxs['flip'] == range(N)
     # -- only works when N == 5
     assert np.all(vals['flip'] == [0, 1, 0, 0, 0, 0,  0, 1, 1, 0][:N])
Example #17
0
    def work(self, **kwargs):
        self.__dict__.update(kwargs)
        bandit = opt_q_uniform(self.target)
        prior_weight = 2.5
        gamma = 0.20
        algo = partial(tpe.suggest,
                prior_weight=prior_weight,
                n_startup_jobs=2,
                n_EI_candidates=128,
                gamma=gamma)
        #print algo.opt_idxs['x']
        #print algo.opt_vals['x']

        trials = Trials()
        fmin(passthrough,
            space=bandit.expr,
            algo=algo,
            trials=trials,
            max_evals=self.LEN)
        if self.show_vars:
            import hyperopt.plotting
            hyperopt.plotting.main_plot_vars(trials, bandit, do_show=1)

        idxs, vals = miscs_to_idxs_vals(trials.miscs)
        idxs = idxs['x']
        vals = vals['x']

        losses = trials.losses()

        from hyperopt.tpe import ap_filter_trials
        from hyperopt.tpe import adaptive_parzen_samplers

        qu = scope.quniform(1.01, 10, 1)
        fn = adaptive_parzen_samplers['quniform']
        fn_kwargs = dict(size=(4,), rng=np.random)
        s_below = pyll.Literal()
        s_above = pyll.Literal()
        b_args = [s_below, prior_weight] + qu.pos_args
        b_post = fn(*b_args, **fn_kwargs)
        a_args = [s_above, prior_weight] + qu.pos_args
        a_post = fn(*a_args, **fn_kwargs)

        #print b_post
        #print a_post
        fn_lpdf = getattr(scope, a_post.name + '_lpdf')
        print fn_lpdf
        # calculate the llik of b_post under both distributions
        a_kwargs = dict([(n, a) for n, a in a_post.named_args
                    if n not in ('rng', 'size')])
        b_kwargs = dict([(n, a) for n, a in b_post.named_args
                    if n not in ('rng', 'size')])
        below_llik = fn_lpdf(*([b_post] + b_post.pos_args), **b_kwargs)
        above_llik = fn_lpdf(*([b_post] + a_post.pos_args), **a_kwargs)
        new_node = scope.broadcast_best(b_post, below_llik, above_llik)

        print '=' * 80

        do_show = self.show_steps

        for ii in range(2, 9):
            if ii > len(idxs):
                break
            print '-' * 80
            print 'ROUND', ii
            print '-' * 80
            all_vals = [2, 3, 4, 5, 6, 7, 8, 9, 10]
            below, above = ap_filter_trials(idxs[:ii],
                    vals[:ii], idxs[:ii], losses[:ii], gamma)
            below = below.astype('int')
            above = above.astype('int')
            print 'BB0', below
            print 'BB1', above
            #print 'BELOW',  zip(range(100), np.bincount(below, minlength=11))
            #print 'ABOVE',  zip(range(100), np.bincount(above, minlength=11))
            memo = {b_post: all_vals, s_below: below, s_above: above}
            bl, al, nv = pyll.rec_eval([below_llik, above_llik, new_node],
                    memo=memo)
            #print bl - al
            print 'BB2', dict(zip(all_vals, bl - al))
            print 'BB3', dict(zip(all_vals, bl))
            print 'BB4', dict(zip(all_vals, al))
            print 'ORIG PICKED', vals[ii]
            print 'PROPER OPT PICKS:', nv

            #assert np.allclose(below, [3, 3, 9])
            #assert len(below) + len(above) == len(vals)

            if do_show:
                plt.subplot(8, 1, ii)
                #plt.scatter(all_vals,
                #    np.bincount(below, minlength=11)[2:], c='b')
                #plt.scatter(all_vals,
                #    np.bincount(above, minlength=11)[2:], c='c')
                plt.scatter(all_vals, bl, c='g')
                plt.scatter(all_vals, al, c='r')
        if do_show:
            plt.show()
Example #18
0
def plot_hyperparameters(trials,
                         save=True,
                         fontsize=10,
                         colorize_best=None,
                         columns=5,
                         arrange_by_loss=False,
                         fname='parameters_selection_plot.png'):
    """Copying from hyperopt because original hyperopt does not allow saving the plot."""

    idxs, vals = miscs_to_idxs_vals(trials.miscs)
    losses = trials.losses()
    finite_losses = [y for y in losses if y not in (None, float("inf"))]
    asrt = np.argsort(finite_losses)
    if colorize_best is not None:
        colorize_thresh = finite_losses[asrt[colorize_best + 1]]
    else:
        # -- set to lower than best (disabled)
        colorize_thresh = finite_losses[asrt[0]] - 1

    loss_min = min(finite_losses)
    loss_max = max(finite_losses)
    print("finite loss range", loss_min, loss_max, colorize_thresh)

    loss_by_tid = dict(zip(trials.tids, losses))

    def color_fn_bw(lossval):
        if lossval in (None, float("inf")):
            return 1, 1, 1
        else:
            t = (lossval - loss_min) / (loss_max - loss_min + 0.0001)
            if lossval < colorize_thresh:
                return 0.0, 1.0 - t, 0.0  # -- red best black worst
            else:
                return t, t, t  # -- white=worst, black=best

    all_labels = list(idxs.keys())
    titles = all_labels
    order = np.argsort(titles)

    C = min(columns, len(all_labels))
    R = int(np.ceil(len(all_labels) / float(C)))

    for plotnum, varnum in enumerate(order):
        label = all_labels[varnum]
        plt.subplot(R, C, plotnum + 1)

        # hide x ticks
        ticks_num, ticks_txt = plt.xticks()
        plt.xticks(ticks_num, [""] * len(ticks_num))

        dist_name = label

        if arrange_by_loss:
            x = [loss_by_tid[ii] for ii in idxs[label]]
        else:
            x = idxs[label]
        if "log" in dist_name:
            y = np.log(vals[label])
        else:
            y = vals[label]
        plt.title(titles[varnum], fontsize=fontsize)
        c = list(map(color_fn_bw, [loss_by_tid[ii] for ii in idxs[label]]))
        if len(y):
            plt.scatter(x, y, c=c)
        if "log" in dist_name:
            nums, texts = plt.yticks()
            plt.yticks(nums, ["%.2e" % np.exp(t) for t in nums])

    if save:
        plt.savefig(fname, dpi=300, bbox_inches='tight')
    else:
        plt.show()
Example #19
0
def test_distributions():
    # test that the distributions come out right

    # XXX: test more distributions
    space = {
        'loss': (
            hp_loguniform('lu', -2, 2) +
            hp_qloguniform('qlu', np.log(1 + 0.01), np.log(20), 2) +
            hp_quniform('qu', -4.999, 5, 1) +
            hp_uniform('u', 0, 10)),
        'status': 'ok'}
    trials = base.Trials()
    N = 1000
    fmin(lambda x: x,
        space=space,
        algo=rand.suggest,
        trials=trials,
        max_evals=N,
        rstate=np.random.RandomState(124),
        catch_eval_exceptions=False)
    assert len(trials) == N
    idxs, vals = base.miscs_to_idxs_vals(trials.miscs)
    print(list(idxs.keys()))

    COUNTMAX = 130
    COUNTMIN = 70

    # -- loguniform
    log_lu = np.log(vals['lu'])
    assert len(log_lu) == N
    assert -2 < np.min(log_lu)
    assert np.max(log_lu) < 2
    h = np.histogram(log_lu)[0]
    print(h)
    assert np.all(COUNTMIN < h)
    assert np.all(h < COUNTMAX)

    # -- quantized log uniform
    qlu = vals['qlu']
    assert np.all(np.fmod(qlu, 2) == 0)
    assert np.min(qlu) == 2
    assert np.max(qlu) == 20
    bc_qlu = np.bincount(qlu)
    assert bc_qlu[2] > bc_qlu[4] > bc_qlu[6] > bc_qlu[8]

    # -- quantized uniform
    qu = vals['qu']
    assert np.min(qu) == -5
    assert np.max(qu) == 5
    assert np.all(np.fmod(qu, 1) == 0)
    bc_qu = np.bincount(np.asarray(qu).astype('int') + 5)
    assert np.all(40 < bc_qu), bc_qu  # XXX: how to get the distribution flat
    # with new rounding rule?
    assert np.all(bc_qu < 125), bc_qu
    assert np.all(bc_qu < COUNTMAX)

    # -- uniform
    u = vals['u']
    assert np.min(u) > 0
    assert np.max(u) < 10
    h = np.histogram(u)[0]
    print(h)
    assert np.all(COUNTMIN < h)
    assert np.all(h < COUNTMAX)
Example #20
0
def test_distributions():
    # test that the distributions come out right

    # XXX: test more distributions
    class Bandit(base.Bandit):
        def __init__(self):
            d = dict(
                # -- alphabetical order
                lu = scope.loguniform(-2, 2),
                qlu = scope.qloguniform(np.log(0.01), np.log(20), 2),
                qu = scope.quniform(-4.999, 5, 1),
                u = scope.uniform(0, 10),
                )
            base.Bandit.__init__(self, as_apply(d))

        def evaluate(self, *args):
            return dict(status='ok', loss=0)
    algo = base.Random(Bandit())
    trials = base.Trials()
    exp = base.Experiment(trials, algo)
    N = 1000
    exp.run(N)
    assert len(trials) == N
    idxs, vals = base.miscs_to_idxs_vals(trials.miscs)
    print idxs.keys()
    # XXX how to tie thes names to dict entries...

    # -- loguniform
    log_lu = np.log(vals['node_2'])
    assert len(log_lu) == N
    assert -2 < np.min(log_lu)
    assert np.max(log_lu) < 2
    h = np.histogram(log_lu)[0]
    print h
    assert np.all(75 < h)
    assert np.all(h < 125)

    # -- quantized log uniform
    qlu = vals['node_6']
    assert np.all(np.fmod(qlu, 2) == 0)
    assert np.min(qlu) == 2
    assert np.max(qlu) == 20
    bc_qlu = np.bincount(qlu)
    assert bc_qlu[2] > bc_qlu[4] > bc_qlu[6] > bc_qlu[8]

    # -- quantized uniform
    qu = vals['node_10']
    assert np.min(qu) == -4
    assert np.max(qu) == 5
    assert np.all(np.fmod(qu, 1) == 0)
    bc_qu = np.bincount(np.asarray(qu).astype('int') + 4)
    assert np.all(75 < bc_qu)
    assert np.all(bc_qu < 125)

    # -- uniform
    u = vals['node_13']
    assert np.min(u) > 0
    assert np.max(u) < 10
    h = np.histogram(u)[0]
    print h
    assert np.all(75 < h)
    assert np.all(h < 125)
Example #21
0
 def idxs_vals_from_ids(self, ids, seed):
     docs = self.suggest(ids, self.domain, Trials(), seed)
     trials = trials_from_docs(docs)
     idxs, vals = miscs_to_idxs_vals(trials.miscs)
     return idxs, vals
Example #22
0
def test_distributions():
    # test that the distributions come out right

    # XXX: test more distributions
    space = {
        'loss': (hp_loguniform('lu', -2, 2) +
                 hp_qloguniform('qlu', np.log(1 + 0.01), np.log(20), 2) +
                 hp_quniform('qu', -4.999, 5, 1) + hp_uniform('u', 0, 10)),
        'status':
        'ok'
    }
    trials = base.Trials()
    N = 1000
    fmin(lambda x: x,
         space=space,
         algo=rand.suggest,
         trials=trials,
         max_evals=N,
         rstate=np.random.RandomState(124),
         catch_eval_exceptions=False)
    assert len(trials) == N
    idxs, vals = base.miscs_to_idxs_vals(trials.miscs)
    print(list(idxs.keys()))

    COUNTMAX = 130
    COUNTMIN = 70

    # -- loguniform
    log_lu = np.log(vals['lu'])
    assert len(log_lu) == N
    assert -2 < np.min(log_lu)
    assert np.max(log_lu) < 2
    h = np.histogram(log_lu)[0]
    print(h)
    assert np.all(COUNTMIN < h)
    assert np.all(h < COUNTMAX)

    # -- quantized log uniform
    qlu = vals['qlu']
    assert np.all(np.fmod(qlu, 2) == 0)
    assert np.min(qlu) == 2
    assert np.max(qlu) == 20
    bc_qlu = np.bincount(qlu)
    assert bc_qlu[2] > bc_qlu[4] > bc_qlu[6] > bc_qlu[8]

    # -- quantized uniform
    qu = vals['qu']
    assert np.min(qu) == -5
    assert np.max(qu) == 5
    assert np.all(np.fmod(qu, 1) == 0)
    bc_qu = np.bincount(np.asarray(qu).astype('int') + 5)
    assert np.all(40 < bc_qu), bc_qu  # XXX: how to get the distribution flat
    # with new rounding rule?
    assert np.all(bc_qu < 125), bc_qu
    assert np.all(bc_qu < COUNTMAX)

    # -- uniform
    u = vals['u']
    assert np.min(u) > 0
    assert np.max(u) < 10
    h = np.histogram(u)[0]
    print(h)
    assert np.all(COUNTMIN < h)
    assert np.all(h < COUNTMAX)
    def work(self, **kwargs):
        self.__dict__.update(kwargs)
        bandit = opt_q_uniform(self.target)
        prior_weight = 2.5
        gamma = 0.20
        algo = partial(tpe.suggest,
                       prior_weight=prior_weight,
                       n_startup_jobs=2,
                       n_EI_candidates=128,
                       gamma=gamma)
        #print algo.opt_idxs['x']
        #print algo.opt_vals['x']

        trials = Trials()
        fmin(passthrough,
             space=bandit.expr,
             algo=algo,
             trials=trials,
             max_evals=self.LEN)
        if self.show_vars:
            import hyperopt.plotting
            hyperopt.plotting.main_plot_vars(trials, bandit, do_show=1)

        idxs, vals = miscs_to_idxs_vals(trials.miscs)
        idxs = idxs['x']
        vals = vals['x']

        losses = trials.losses()

        from hyperopt.tpe import ap_filter_trials
        from hyperopt.tpe import adaptive_parzen_samplers

        qu = scope.quniform(1.01, 10, 1)
        fn = adaptive_parzen_samplers['quniform']
        fn_kwargs = dict(size=(4, ), rng=np.random)
        s_below = pyll.Literal()
        s_above = pyll.Literal()
        b_args = [s_below, prior_weight] + qu.pos_args
        b_post = fn(*b_args, **fn_kwargs)
        a_args = [s_above, prior_weight] + qu.pos_args
        a_post = fn(*a_args, **fn_kwargs)

        #print b_post
        #print a_post
        fn_lpdf = getattr(scope, a_post.name + '_lpdf')
        print fn_lpdf
        # calculate the llik of b_post under both distributions
        a_kwargs = dict([(n, a) for n, a in a_post.named_args
                         if n not in ('rng', 'size')])
        b_kwargs = dict([(n, a) for n, a in b_post.named_args
                         if n not in ('rng', 'size')])
        below_llik = fn_lpdf(*([b_post] + b_post.pos_args), **b_kwargs)
        above_llik = fn_lpdf(*([b_post] + a_post.pos_args), **a_kwargs)
        new_node = scope.broadcast_best(b_post, below_llik, above_llik)

        print '=' * 80

        do_show = self.show_steps

        for ii in range(2, 9):
            if ii > len(idxs):
                break
            print '-' * 80
            print 'ROUND', ii
            print '-' * 80
            all_vals = [2, 3, 4, 5, 6, 7, 8, 9, 10]
            below, above = ap_filter_trials(idxs[:ii], vals[:ii], idxs[:ii],
                                            losses[:ii], gamma)
            below = below.astype('int')
            above = above.astype('int')
            print 'BB0', below
            print 'BB1', above
            #print 'BELOW',  zip(range(100), np.bincount(below, minlength=11))
            #print 'ABOVE',  zip(range(100), np.bincount(above, minlength=11))
            memo = {b_post: all_vals, s_below: below, s_above: above}
            bl, al, nv = pyll.rec_eval([below_llik, above_llik, new_node],
                                       memo=memo)
            #print bl - al
            print 'BB2', dict(zip(all_vals, bl - al))
            print 'BB3', dict(zip(all_vals, bl))
            print 'BB4', dict(zip(all_vals, al))
            print 'ORIG PICKED', vals[ii]
            print 'PROPER OPT PICKS:', nv

            #assert np.allclose(below, [3, 3, 9])
            #assert len(below) + len(above) == len(vals)

            if do_show:
                plt.subplot(8, 1, ii)
                #plt.scatter(all_vals,
                #    np.bincount(below, minlength=11)[2:], c='b')
                #plt.scatter(all_vals,
                #    np.bincount(above, minlength=11)[2:], c='c')
                plt.scatter(all_vals, bl, c='g')
                plt.scatter(all_vals, al, c='r')
        if do_show:
            plt.show()