Esempio n. 1
0
 def test7(self):
     p0 = hp_uniform('p0', 0, 1)
     p1 = hp_normal('p1', 0, 1)
     p2 = hp_choice('p2', [1, p0])
     p3 = hp_choice('p3', [2, p1, p2, hp_uniform('a0', 2, 3)])
     self.expr = {'loss': p0 + p1 + p2 + p3}
     self.n_randints = 2
     self.wanted = [[('p0', [0], [0.71295532052322719]),
                     ('p1', [0], [0.28297849805199204]),
                     ('p2.randint', [0], [0]),
                     ('p3.arg:2', [0], [2.719468969785563]),
                     ('p3.randint', [0], [2])],
                    [('p0', [1], [0.78002776191207912]),
                     ('p1', [1], [-1.506294713918092]),
                     ('p2.randint', [1], [1]), ('p3.arg:2', [], []),
                     ('p3.randint', [1], [1])],
                    [('p0', [2], [0.57969429702261011]),
                     ('p1', [2], [1.6796003743035337]),
                     ('p2.randint', [2], [0]), ('p3.arg:2', [], []),
                     ('p3.randint', [2], [1])],
                    [('p0', [3], [0.43857224467962441]),
                     ('p1', [3], [-1.3058031267484451]),
                     ('p2.randint', [3], [1]), ('p3.arg:2', [], []),
                     ('p3.randint', [3], [1])],
                    [('p0', [4], [0.39804425533043142]),
                     ('p1', [4], [-0.91948540682140967]),
                     ('p2.randint', [4], [0]), ('p3.arg:2', [], []),
                     ('p3.randint', [4], [0])]]
     self.foo()
Esempio n. 2
0
def test_vectorize_multipath():
    N = as_apply(15)

    p0 = hp_uniform("p0", 0, 1)
    loss = hp_choice("p1", [1, p0, -p0])**2
    expr_idxs = scope.range(N)
    vh = VectorizeHelper(loss, expr_idxs, build=True)

    vloss = vh.v_expr
    print(vloss)

    full_output = as_apply([vloss, vh.idxs_by_label(), vh.vals_by_label()])

    new_vc = recursive_set_rng_kwarg(full_output,
                                     as_apply(np.random.RandomState(1)))

    losses, idxs, vals = rec_eval(new_vc)
    print("losses", losses)
    print("idxs p0", idxs["p0"])
    print("vals p0", vals["p0"])
    print("idxs p1", idxs["p1"])
    print("vals p1", vals["p1"])
    p0dct = dict(list(zip(idxs["p0"], vals["p0"])))
    p1dct = dict(list(zip(idxs["p1"], vals["p1"])))
    for ii, li in enumerate(losses):
        print(ii, li)
        if p1dct[ii] != 0:
            assert li == p0dct[ii]**2
        else:
            assert li == 1
Esempio n. 3
0
def test_vectorize_multipath():
    N = as_apply(15)

    p0 = hp_uniform('p0', 0, 1)
    loss = hp_choice('p1', [1, p0, -p0])**2
    expr_idxs = scope.range(N)
    vh = VectorizeHelper(loss, expr_idxs, build=True)

    vloss = vh.v_expr
    print vloss

    full_output = as_apply([vloss, vh.idxs_by_label(), vh.vals_by_label()])

    new_vc = recursive_set_rng_kwarg(
        full_output,
        as_apply(np.random.RandomState(1)),
    )

    losses, idxs, vals = rec_eval(new_vc)
    print 'losses', losses
    print 'idxs p0', idxs['p0']
    print 'vals p0', vals['p0']
    print 'idxs p1', idxs['p1']
    print 'vals p1', vals['p1']
    p0dct = dict(zip(idxs['p0'], vals['p0']))
    p1dct = dict(zip(idxs['p1'], vals['p1']))
    for ii, li in enumerate(losses):
        print ii, li
        if p1dct[ii] != 0:
            assert li == p0dct[ii]**2
        else:
            assert li == 1
Esempio n. 4
0
def test_distributions():
    # test that the distributions come out right

    # XXX: test more distributions
    bandit = base.Bandit({
        'loss':
        hp_loguniform('lu', -2, 2) +
        hp_qloguniform('qlu', np.log(1 + 0.01), np.log(20), 2) +
        hp_quniform('qu', -4.999, 5, 1) + hp_uniform('u', 0, 10)
    })
    algo = base.Random(bandit)
    trials = base.Trials()
    exp = base.Experiment(trials, algo)
    exp.catch_bandit_exceptions = False
    N = 1000
    exp.run(N)
    assert len(trials) == N
    idxs, vals = base.miscs_to_idxs_vals(trials.miscs)
    print idxs.keys()

    COUNTMAX = 130
    COUNTMIN = 70

    # -- loguniform
    log_lu = np.log(vals['lu'])
    assert len(log_lu) == N
    assert -2 < np.min(log_lu)
    assert np.max(log_lu) < 2
    h = np.histogram(log_lu)[0]
    print h
    assert np.all(COUNTMIN < h)
    assert np.all(h < COUNTMAX)

    # -- quantized log uniform
    qlu = vals['qlu']
    assert np.all(np.fmod(qlu, 2) == 0)
    assert np.min(qlu) == 2
    assert np.max(qlu) == 20
    bc_qlu = np.bincount(qlu)
    assert bc_qlu[2] > bc_qlu[4] > bc_qlu[6] > bc_qlu[8]

    # -- quantized uniform
    qu = vals['qu']
    assert np.min(qu) == -5
    assert np.max(qu) == 5
    assert np.all(np.fmod(qu, 1) == 0)
    bc_qu = np.bincount(np.asarray(qu).astype('int') + 5)
    assert np.all(40 < bc_qu), bc_qu  # XXX: how to get the distribution flat
    # with new rounding rule?
    assert np.all(bc_qu < 125), bc_qu
    assert np.all(bc_qu < COUNTMAX)

    # -- uniform
    u = vals['u']
    assert np.min(u) > 0
    assert np.max(u) < 10
    h = np.histogram(u)[0]
    print h
    assert np.all(COUNTMIN < h)
    assert np.all(h < COUNTMAX)
Esempio n. 5
0
def test_vectorize_simple():
    N = as_apply(15)

    p0 = hp_uniform('p0', 0, 1)
    loss = p0**2
    print loss
    expr_idxs = scope.range(N)
    vh = VectorizeHelper(loss, expr_idxs, build=True)
    vloss = vh.v_expr

    full_output = as_apply([vloss, vh.idxs_by_label(), vh.vals_by_label()])
    fo2 = replace_repeat_stochastic(full_output)

    new_vc = recursive_set_rng_kwarg(
        fo2,
        as_apply(np.random.RandomState(1)),
    )

    #print new_vc
    losses, idxs, vals = rec_eval(new_vc)
    print 'losses', losses
    print 'idxs p0', idxs['p0']
    print 'vals p0', vals['p0']
    p0dct = dict(zip(idxs['p0'], vals['p0']))
    for ii, li in enumerate(losses):
        assert p0dct[ii]**2 == li
Esempio n. 6
0
def test_vectorize_multipath():
    N = as_apply(15)

    p0 = hp_uniform('p0', 0, 1)
    loss = hp_choice('p1', [1, p0, -p0]) ** 2
    expr_idxs = scope.range(N)
    vh = VectorizeHelper(loss, expr_idxs, build=True)

    vloss = vh.v_expr
    print(vloss)

    full_output = as_apply([vloss,
        vh.idxs_by_label(),
        vh.vals_by_label()])

    new_vc = recursive_set_rng_kwarg(
            full_output,
            as_apply(np.random.RandomState(1)),
            )

    losses, idxs, vals = rec_eval(new_vc)
    print('losses', losses)
    print('idxs p0', idxs['p0'])
    print('vals p0', vals['p0'])
    print('idxs p1', idxs['p1'])
    print('vals p1', vals['p1'])
    p0dct = dict(list(zip(idxs['p0'], vals['p0'])))
    p1dct = dict(list(zip(idxs['p1'], vals['p1'])))
    for ii, li in enumerate(losses):
        print(ii, li)
        if p1dct[ii] != 0:
            assert li == p0dct[ii] ** 2
        else:
            assert li == 1
Esempio n. 7
0
def test_vectorize_simple():
    N = as_apply(15)

    p0 = hp_uniform('p0', 0, 1)
    loss = p0 ** 2
    print(loss)
    expr_idxs = scope.range(N)
    vh = VectorizeHelper(loss, expr_idxs, build=True)
    vloss = vh.v_expr

    full_output = as_apply([vloss,
        vh.idxs_by_label(),
        vh.vals_by_label()])
    fo2 = replace_repeat_stochastic(full_output)

    new_vc = recursive_set_rng_kwarg(
            fo2,
            as_apply(np.random.RandomState(1)),
            )

    #print new_vc
    losses, idxs, vals = rec_eval(new_vc)
    print('losses', losses)
    print('idxs p0', idxs['p0'])
    print('vals p0', vals['p0'])
    p0dct = dict(list(zip(idxs['p0'], vals['p0'])))
    for ii, li in enumerate(losses):
        assert p0dct[ii] ** 2 == li
Esempio n. 8
0
def test_distributions():
    # test that the distributions come out right

    # XXX: test more distributions
    bandit = base.Bandit({
                'loss': hp_loguniform('lu', -2, 2) +
                    hp_qloguniform('qlu', np.log(1 + 0.01), np.log(20), 2) +
                    hp_quniform('qu', -4.999, 5, 1) +
                    hp_uniform('u', 0, 10)})
    algo = base.Random(bandit)
    trials = base.Trials()
    exp = base.Experiment(trials, algo)
    exp.catch_bandit_exceptions = False
    N = 1000
    exp.run(N)
    assert len(trials) == N
    idxs, vals = base.miscs_to_idxs_vals(trials.miscs)
    print idxs.keys()

    COUNTMAX = 130
    COUNTMIN = 70

    # -- loguniform
    log_lu = np.log(vals['lu'])
    assert len(log_lu) == N
    assert -2 < np.min(log_lu)
    assert np.max(log_lu) < 2
    h = np.histogram(log_lu)[0]
    print h
    assert np.all(COUNTMIN < h)
    assert np.all(h < COUNTMAX)

    # -- quantized log uniform
    qlu = vals['qlu']
    assert np.all(np.fmod(qlu, 2) == 0)
    assert np.min(qlu) == 2
    assert np.max(qlu) == 20
    bc_qlu = np.bincount(qlu)
    assert bc_qlu[2] > bc_qlu[4] > bc_qlu[6] > bc_qlu[8]

    # -- quantized uniform
    qu = vals['qu']
    assert np.min(qu) == -5
    assert np.max(qu) == 5
    assert np.all(np.fmod(qu, 1) == 0)
    bc_qu = np.bincount(np.asarray(qu).astype('int') + 5)
    assert np.all(40 < bc_qu), bc_qu  # XXX: how to get the distribution flat
    # with new rounding rule?
    assert np.all(bc_qu < 125), bc_qu
    assert np.all(bc_qu < COUNTMAX)

    # -- uniform
    u = vals['u']
    assert np.min(u) > 0
    assert np.max(u) < 10
    h = np.histogram(u)[0]
    print h
    assert np.all(COUNTMIN < h)
    assert np.all(h < COUNTMAX)
Esempio n. 9
0
 def test0(self):
     self.expr = {'loss': hp_uniform('p0', 0, 1)}
     self.wanted = [[('p0', [0], [0.69646918559786164])],
                    [('p0', [1], [0.28613933495037946])],
                    [('p0', [2], [0.22685145356420311])],
                    [('p0', [3], [0.55131476908289123])],
                    [('p0', [4], [0.71946896978556307])]]
     self.foo()
Esempio n. 10
0
 def test0(self):
     self.expr = {'loss': hp_uniform('p0', 0, 1)}
     self.wanted = [
             [('p0', [0], [0.69646918559786164])],
             [('p0', [1], [0.28613933495037946])],
             [('p0', [2], [0.22685145356420311])],
             [('p0', [3], [0.55131476908289123])],
             [('p0', [4], [0.71946896978556307])]]
     self.foo()
Esempio n. 11
0
def test_quadratic1_rand():

    report = fmin(
            fn=lambda x: (x - 3) ** 2,
            space=hp_uniform('x', -5, 5),
            algo=rand.suggest,
            max_evals=500)

    assert len(report.trials) == 500
    assert abs(report.trials.argmin['x'] - 3.0) < .25
Esempio n. 12
0
def many_dists():
    a=hp_choice('a', [0, 1, 2])
    b=hp_randint('b', 10)
    c=hp_uniform('c', 4, 7)
    d=hp_loguniform('d', -2, 0)
    e=hp_quniform('e', 0, 10, 3)
    f=hp_qloguniform('f', 0, 3, 2)
    g=hp_normal('g', 4, 7)
    h=hp_lognormal('h', -2, 2)
    i=hp_qnormal('i', 0, 10, 2)
    j=hp_qlognormal('j', 0, 2, 1)
    z = a + b + c + d + e + f + g + h + i + j
    return {'loss': scope.float(scope.log(1e-12 + z ** 2))}
Esempio n. 13
0
def many_dists():
    a = hp_choice('a', [0, 1, 2])
    b = hp_randint('b', 10)
    c = hp_uniform('c', 4, 7)
    d = hp_loguniform('d', -2, 0)
    e = hp_quniform('e', 0, 10, 3)
    f = hp_qloguniform('f', 0, 3, 2)
    g = hp_normal('g', 4, 7)
    h = hp_lognormal('h', -2, 2)
    i = hp_qnormal('i', 0, 10, 2)
    j = hp_qlognormal('j', 0, 2, 1)
    z = a + b + c + d + e + f + g + h + i + j
    return {'loss': scope.float(scope.log(1e-12 + z**2))}
Esempio n. 14
0
 def test7(self):
     p0 = hp_uniform('p0', 0, 1)
     p1 = hp_normal('p1', 0, 1)
     p2 = hp_choice('p2', [1, p0])
     p3 = hp_choice('p3', [2, p1, p2, hp_uniform('a0', 2, 3)])
     self.expr = {'loss': p0 + p1 + p2 + p3}
     self.n_randints = 2
     self.wanted = [
             [
                 ('p0', [0], [0.71295532052322719]),
                 ('p1', [0], [0.28297849805199204]),
                 ('p2.randint', [0], [0]),
                 ('p3.arg:2', [0], [2.719468969785563]),
                 ('p3.randint', [0], [2])],
             [
                 ('p0', [1], [0.78002776191207912]),
                 ('p1', [1], [-1.506294713918092]),
                 ('p2.randint', [1], [1]),
                 ('p3.arg:2', [], []),
                 ('p3.randint', [1], [1])],
             [
                 ('p0', [2], [0.57969429702261011]),
                 ('p1', [2], [1.6796003743035337]),
                 ('p2.randint', [2], [0]),
                 ('p3.arg:2', [], []),
                 ('p3.randint', [2], [1])],
             [
                 ('p0', [3], [0.43857224467962441]),
                 ('p1', [3], [-1.3058031267484451]),
                 ('p2.randint', [3], [1]),
                 ('p3.arg:2', [], []),
                 ('p3.randint', [3], [1])],
             [
                 ('p0', [4], [0.39804425533043142]),
                 ('p1', [4], [-0.91948540682140967]),
                 ('p2.randint', [4], [0]),
                 ('p3.arg:2', [], []),
                 ('p3.randint', [4], [0])]]
     self.foo()
Esempio n. 15
0
def test_vectorize_trivial():
    N = as_apply(15)

    p0 = hp_uniform("p0", 0, 1)
    loss = p0
    print(loss)
    expr_idxs = scope.range(N)
    vh = VectorizeHelper(loss, expr_idxs, build=True)
    vloss = vh.v_expr

    full_output = as_apply([vloss, vh.idxs_by_label(), vh.vals_by_label()])
    fo2 = replace_repeat_stochastic(full_output)

    new_vc = recursive_set_rng_kwarg(fo2, as_apply(np.random.RandomState(1)))

    # print new_vc
    losses, idxs, vals = rec_eval(new_vc)
    print("losses", losses)
    print("idxs p0", idxs["p0"])
    print("vals p0", vals["p0"])
    p0dct = dict(list(zip(idxs["p0"], vals["p0"])))
    for ii, li in enumerate(losses):
        assert p0dct[ii] == li

num_filters1 = scope.int(hp_qloguniform('num_filters1',np.log(16), np.log(96), q=16))
filter1_size = scope.int(hp_quniform('filter1_shape', 2, 12, 1))

num_filters2 = scope.int(hp_qloguniform('num_filters2',np.log(16), np.log(96), q=16))
filter2_size = scope.int(hp_quniform('filter2_shape', 2, 12, 1))

num_filters3 = scope.int(hp_qloguniform('num_filters3',np.log(16), np.log(96), q=16))
filter3_size = scope.int(hp_quniform('filter3_shape', 2, 9, 1))

num_filters4 = scope.int(hp_qloguniform('num_filters4',np.log(16), np.log(64), q=16))
filter4_size = scope.int(hp_quniform('filter4_shape', 2, 9, 1))

pool1_sizex = scope.int(hp_quniform('pool1_sizex', 2, 5, 1))
pool1_type = hp_choice('pool1_type', ['max', 'avg', hp_uniform('pool_order_1', 1, 12)])

pool2_sizex = scope.int(hp_quniform('pool2_sizex', 2, 5, 1))
pool2_type = hp_choice('pool2_type', ['max', 'avg', hp_uniform('pool_order_2', 1, 4)])

rnorm1_size = scope.int(hp_quniform('rnorm1_size', 5, 12, 1))
rnorm2_size = scope.int(hp_quniform('rnorm2_size', 5, 12, 1))


layer_def_template = OrderedDict([('data', OrderedDict([('type', 'data'),
                                   ('dataidx', 0)])),
             ('labels', OrderedDict([('type', 'data'),
                                     ('dataidx', 1)])),
             ('conv1', OrderedDict([('type', 'conv'),
                                    ('inputs', 'data'),
                                    ('channels', 3),
Esempio n. 17
0
def test_distributions():
    # test that the distributions come out right

    # XXX: test more distributions
    space = {
        'loss': (
            hp_loguniform('lu', -2, 2) +
            hp_qloguniform('qlu', np.log(1 + 0.01), np.log(20), 2) +
            hp_quniform('qu', -4.999, 5, 1) +
            hp_uniform('u', 0, 10)),
        'status': 'ok'}
    trials = base.Trials()
    N = 1000
    fmin(lambda x: x,
        space=space,
        algo=rand.suggest,
        trials=trials,
        max_evals=N,
        rstate=np.random.RandomState(124),
        catch_eval_exceptions=False)
    assert len(trials) == N
    idxs, vals = base.miscs_to_idxs_vals(trials.miscs)
    print(list(idxs.keys()))

    COUNTMAX = 130
    COUNTMIN = 70

    # -- loguniform
    log_lu = np.log(vals['lu'])
    assert len(log_lu) == N
    assert -2 < np.min(log_lu)
    assert np.max(log_lu) < 2
    h = np.histogram(log_lu)[0]
    print(h)
    assert np.all(COUNTMIN < h)
    assert np.all(h < COUNTMAX)

    # -- quantized log uniform
    qlu = vals['qlu']
    assert np.all(np.fmod(qlu, 2) == 0)
    assert np.min(qlu) == 2
    assert np.max(qlu) == 20
    bc_qlu = np.bincount(qlu)
    assert bc_qlu[2] > bc_qlu[4] > bc_qlu[6] > bc_qlu[8]

    # -- quantized uniform
    qu = vals['qu']
    assert np.min(qu) == -5
    assert np.max(qu) == 5
    assert np.all(np.fmod(qu, 1) == 0)
    bc_qu = np.bincount(np.asarray(qu).astype('int') + 5)
    assert np.all(40 < bc_qu), bc_qu  # XXX: how to get the distribution flat
    # with new rounding rule?
    assert np.all(bc_qu < 125), bc_qu
    assert np.all(bc_qu < COUNTMAX)

    # -- uniform
    u = vals['u']
    assert np.min(u) > 0
    assert np.max(u) < 10
    h = np.histogram(u)[0]
    print(h)
    assert np.all(COUNTMIN < h)
    assert np.all(h < COUNTMAX)
Esempio n. 18
0
def test_vectorize_config0():
    p0 = hp_uniform('p0', 0, 1)
    p1 = hp_loguniform('p1', 2, 3)
    p2 = hp_choice('p2', [-1, p0])
    p3 = hp_choice('p3', [-2, p1])
    p4 = 1
    p5 = [3, 4, p0]
    p6 = hp_choice('p6', [-3, p1])
    d = locals()
    d['p1'] = None # -- don't sample p1 all the time, only if p3 says so
    config = as_apply(d)

    N = as_apply('N:TBA')
    expr = config
    expr_idxs = scope.range(N)
    vh = VectorizeHelper(expr, expr_idxs, build=True)
    vconfig = vh.v_expr

    full_output = as_apply([vconfig, vh.idxs_by_label(), vh.vals_by_label()])

    if 1:
        print('=' * 80)
        print('VECTORIZED')
        print(full_output)
        print('\n' * 1)

    fo2 = replace_repeat_stochastic(full_output)
    if 0:
        print('=' * 80)
        print('VECTORIZED STOCHASTIC')
        print(fo2)
        print('\n' * 1)

    new_vc = recursive_set_rng_kwarg(
            fo2,
            as_apply(np.random.RandomState(1))
            )
    if 0:
        print('=' * 80)
        print('VECTORIZED STOCHASTIC WITH RNGS')
        print(new_vc)

    Nval = 10
    foo, idxs, vals = rec_eval(new_vc, memo={N: Nval})

    print('foo[0]', foo[0])
    print('foo[1]', foo[1])
    assert len(foo) == Nval
    if 0:  # XXX refresh these values to lock down sampler
        assert foo[0] == {
            'p0': 0.39676747423066994,
            'p1': None,
            'p2': 0.39676747423066994,
            'p3': 2.1281244479293568,
            'p4': 1,
            'p5': (3, 4, 0.39676747423066994) }
    assert foo[1] != foo[2]

    print(idxs)
    print(vals['p3'])
    print(vals['p6'])
    print(idxs['p1'])
    print(vals['p1'])
    assert len(vals['p3']) == Nval
    assert len(vals['p6']) == Nval
    assert len(idxs['p1']) < Nval
    p1d = dict(list(zip(idxs['p1'], vals['p1'])))
    for ii, (p3v, p6v) in enumerate(zip(vals['p3'], vals['p6'])):
        if p3v == p6v == 0:
            assert ii not in idxs['p1']
        if p3v:
            assert foo[ii]['p3'] == p1d[ii]
        if p6v:
            print('p6', foo[ii]['p6'], p1d[ii])
            assert foo[ii]['p6'] == p1d[ii]
Esempio n. 19
0
def test_vectorize_config0():
    p0 = hp_uniform('p0', 0, 1)
    p1 = hp_loguniform('p1', 2, 3)
    p2 = hp_choice('p2', [-1, p0])
    p3 = hp_choice('p3', [-2, p1])
    p4 = 1
    p5 = [3, 4, p0]
    p6 = hp_choice('p6', [-3, p1])
    d = locals()
    d['p1'] = None  # -- don't sample p1 all the time, only if p3 says so
    config = as_apply(d)

    N = as_apply('N:TBA')
    expr = config
    expr_idxs = scope.range(N)
    vh = VectorizeHelper(expr, expr_idxs, build=True)
    vconfig = vh.v_expr

    full_output = as_apply([vconfig, vh.idxs_by_label(), vh.vals_by_label()])

    if 1:
        print '=' * 80
        print 'VECTORIZED'
        print full_output
        print '\n' * 1

    fo2 = replace_repeat_stochastic(full_output)
    if 0:
        print '=' * 80
        print 'VECTORIZED STOCHASTIC'
        print fo2
        print '\n' * 1

    new_vc = recursive_set_rng_kwarg(fo2, as_apply(np.random.RandomState(1)))
    if 0:
        print '=' * 80
        print 'VECTORIZED STOCHASTIC WITH RNGS'
        print new_vc

    Nval = 10
    foo, idxs, vals = rec_eval(new_vc, memo={N: Nval})

    print 'foo[0]', foo[0]
    print 'foo[1]', foo[1]
    assert len(foo) == Nval
    if 0:  # XXX refresh these values to lock down sampler
        assert foo[0] == {
            'p0': 0.39676747423066994,
            'p1': None,
            'p2': 0.39676747423066994,
            'p3': 2.1281244479293568,
            'p4': 1,
            'p5': (3, 4, 0.39676747423066994)
        }
    assert foo[1] != foo[2]

    print idxs
    print vals['p3']
    print vals['p6']
    print idxs['p1']
    print vals['p1']
    assert len(vals['p3']) == Nval
    assert len(vals['p6']) == Nval
    assert len(idxs['p1']) < Nval
    p1d = dict(zip(idxs['p1'], vals['p1']))
    for ii, (p3v, p6v) in enumerate(zip(vals['p3'], vals['p6'])):
        if p3v == p6v == 0:
            assert ii not in idxs['p1']
        if p3v:
            assert foo[ii]['p3'] == p1d[ii]
        if p6v:
            print 'p6', foo[ii]['p6'], p1d[ii]
            assert foo[ii]['p6'] == p1d[ii]
Esempio n. 20
0
def test_vectorize_config0():
    p0 = hp_uniform("p0", 0, 1)
    p1 = hp_loguniform("p1", 2, 3)
    p2 = hp_choice("p2", [-1, p0])
    p3 = hp_choice("p3", [-2, p1])
    p4 = 1
    p5 = [3, 4, p0]
    p6 = hp_choice("p6", [-3, p1])
    d = locals()
    d["p1"] = None  # -- don't sample p1 all the time, only if p3 says so
    config = as_apply(d)

    N = as_apply("N:TBA")
    expr = config
    expr_idxs = scope.range(N)
    vh = VectorizeHelper(expr, expr_idxs, build=True)
    vconfig = vh.v_expr

    full_output = as_apply([vconfig, vh.idxs_by_label(), vh.vals_by_label()])

    if 1:
        print("=" * 80)
        print("VECTORIZED")
        print(full_output)
        print("\n" * 1)

    fo2 = replace_repeat_stochastic(full_output)
    if 0:
        print("=" * 80)
        print("VECTORIZED STOCHASTIC")
        print(fo2)
        print("\n" * 1)

    new_vc = recursive_set_rng_kwarg(fo2, as_apply(np.random.RandomState(1)))
    if 0:
        print("=" * 80)
        print("VECTORIZED STOCHASTIC WITH RNGS")
        print(new_vc)

    Nval = 10
    foo, idxs, vals = rec_eval(new_vc, memo={N: Nval})

    print("foo[0]", foo[0])
    print("foo[1]", foo[1])
    assert len(foo) == Nval
    if 0:  # XXX refresh these values to lock down sampler
        assert foo[0] == {
            "p0": 0.39676747423066994,
            "p1": None,
            "p2": 0.39676747423066994,
            "p3": 2.1281244479293568,
            "p4": 1,
            "p5": (3, 4, 0.39676747423066994),
        }
    assert (foo[1].keys() != foo[2].keys()) or (foo[1].values() !=
                                                foo[2].values())

    print(idxs)
    print(vals["p3"])
    print(vals["p6"])
    print(idxs["p1"])
    print(vals["p1"])
    assert len(vals["p3"]) == Nval
    assert len(vals["p6"]) == Nval
    assert len(idxs["p1"]) < Nval
    p1d = dict(list(zip(idxs["p1"], vals["p1"])))
    for ii, (p3v, p6v) in enumerate(zip(vals["p3"], vals["p6"])):
        if p3v == p6v == 0:
            assert ii not in idxs["p1"]
        if p3v:
            assert foo[ii]["p3"] == p1d[ii]
        if p6v:
            print("p6", foo[ii]["p6"], p1d[ii])
            assert foo[ii]["p6"] == p1d[ii]
Esempio n. 21
0
def test_distributions():
    # test that the distributions come out right

    # XXX: test more distributions
    space = {
        'loss': (hp_loguniform('lu', -2, 2) +
                 hp_qloguniform('qlu', np.log(1 + 0.01), np.log(20), 2) +
                 hp_quniform('qu', -4.999, 5, 1) + hp_uniform('u', 0, 10)),
        'status':
        'ok'
    }
    trials = base.Trials()
    N = 1000
    fmin(lambda x: x,
         space=space,
         algo=rand.suggest,
         trials=trials,
         max_evals=N,
         rstate=np.random.RandomState(124),
         catch_eval_exceptions=False)
    assert len(trials) == N
    idxs, vals = base.miscs_to_idxs_vals(trials.miscs)
    print(list(idxs.keys()))

    COUNTMAX = 130
    COUNTMIN = 70

    # -- loguniform
    log_lu = np.log(vals['lu'])
    assert len(log_lu) == N
    assert -2 < np.min(log_lu)
    assert np.max(log_lu) < 2
    h = np.histogram(log_lu)[0]
    print(h)
    assert np.all(COUNTMIN < h)
    assert np.all(h < COUNTMAX)

    # -- quantized log uniform
    qlu = vals['qlu']
    assert np.all(np.fmod(qlu, 2) == 0)
    assert np.min(qlu) == 2
    assert np.max(qlu) == 20
    bc_qlu = np.bincount(qlu)
    assert bc_qlu[2] > bc_qlu[4] > bc_qlu[6] > bc_qlu[8]

    # -- quantized uniform
    qu = vals['qu']
    assert np.min(qu) == -5
    assert np.max(qu) == 5
    assert np.all(np.fmod(qu, 1) == 0)
    bc_qu = np.bincount(np.asarray(qu).astype('int') + 5)
    assert np.all(40 < bc_qu), bc_qu  # XXX: how to get the distribution flat
    # with new rounding rule?
    assert np.all(bc_qu < 125), bc_qu
    assert np.all(bc_qu < COUNTMAX)

    # -- uniform
    u = vals['u']
    assert np.min(u) > 0
    assert np.max(u) < 10
    h = np.histogram(u)[0]
    print(h)
    assert np.all(COUNTMIN < h)
    assert np.all(h < COUNTMAX)
Esempio n. 22
0
def uslm_domain(Xcm,
        batchsize,
        chmjr_image_shape,
        output_sizes,
        n_patches=50000,
        max_n_features=16000,
        max_layer_sizes=(64, 128),
        batched_lmap_speed_thresh=None,
        permit_affine_warp=True,
        abort_on_rows_larger_than=None,
        ):
    """
    This function works by creating a linear pipeline, with multiple exit
    points that could be the feature representation for classification.

    The function returns a switch among all of these exit points.
    """
    start_time = time.time()

    XC, XH, XW = chmjr_image_shape
    osize = hp_choice('warp_osize', output_sizes)

    assert XW > 3, chmjr_image_shape  # -- make sure we don't screw up channel-major

    warp_options = [
        # -- option 1: simple resize
        partial(slm_affine_image_warp,
            rot=0,
            shear=0,
            scale=[s_float(osize) / XH, s_float(osize) / XW],
            trans=[0, 0],
            oshape=[osize, osize]),
        ]
    if permit_affine_warp:
        # -- option 2: resize with rotation, shear, translation
        warp_options.append(
            partial(slm_affine_image_warp,
                rot=hp_uniform('warp_rot', low=-0.3, high=0.3),
                shear=hp_uniform('warp_shear', low=-0.3, high=0.3),
                # -- most of the scaling comes via osize
                scale=[
                    hp_uniform('warp_scale_h', low=0.8, high=1.2) * osize / XH,
                    hp_uniform('warp_scale_v', low=0.8, high=1.2) * osize / XW,
                    ],
                trans=[
                    hp_uniform('warp_trans_h', low=-0.2, high=0.2) * osize,
                    hp_uniform('warp_trans_v', low=-0.2, high=0.2) * osize,
                    ],
                oshape=[osize, osize]
                ))
    pipeline = [slm_img_uint8_to_float32,
                hp_choice('warp', warp_options)]
    Xcm = pyll_theano_batched_lmap(
        partial(callpipe1, pipeline),
        Xcm,
        batchsize=batchsize,
        print_progress_every=10,
        speed_thresh=batched_lmap_speed_thresh,
        abort_on_rows_larger_than=abort_on_rows_larger_than,
        x_dtype='uint8',
        )[:]

    exits = pipeline_exits(
                pipeline,
                layer_num=0,
                Xcm=Xcm,
                n_patches=n_patches,
                max_n_features=max_n_features)
    for layer_i, max_layer_size in enumerate(max_layer_sizes):
        extension = pipeline_extension(
                'l%i' % layer_i, Xcm, n_patches, max_layer_size)

        pipeline.extend(extension)
        Xcm = pyll_theano_batched_lmap(
                partial(callpipe1, extension),
                Xcm,  # scope.print_ndarray_summary('Xcm %i' % layer_i, Xcm),
                batchsize=batchsize,
                print_progress_every=10,
                speed_thresh=batched_lmap_speed_thresh,
                abort_on_rows_larger_than=abort_on_rows_larger_than,
                )[:]
        # -- indexing computes all the values (during rec_eval)
        exits.extend(
                pipeline_exits(
                    pipeline=pipeline,
                    layer_num=layer_i + 1,
                    Xcm=Xcm,
                    n_patches=n_patches,
                    max_n_features=max_n_features))

    return hp_choice("exit", exits)
filter1_size = scope.int(hp_quniform('filter1_shape', 5, 12, 1))

num_filters2 = scope.int(hp_quniform('num_filters2', 64, 400, 16))
filter2_size = scope.int(hp_quniform('filter2_shape', 4, 7, 1))

num_filters3 = scope.int(hp_quniform('num_filters3', 64, 400, 16))
filter3_size = scope.int(hp_quniform('filter3_shape', 3, 5, 1))

num_filters4 = scope.int(hp_quniform('num_filters4', 64, 400, 16))
filter4_size = scope.int(hp_quniform('filter4_shape', 3, 4, 1))

num_filters5 = scope.int(hp_quniform('num_filters5', 64, 400, 16))
filter5_size = scope.int(hp_quniform('filter5_shape', 2, 3, 1))

pool1_sizex = scope.int(hp_quniform('pool1_sizex', 2, 4, 1))
pool1_type = hp_choice('pool1_type', ['max', 'avg', hp_uniform('pool_order_1', 1, 4)])

pool2_sizex = scope.int(hp_quniform('pool2_sizex', 2, 4, 1))
pool2_type = hp_choice('pool2_type', ['max', 'avg', hp_uniform('pool_order_2', 1, 4)])

pool3_sizex = scope.int(hp_quniform('pool3_sizex', 2, 4, 1))
pool3_type = hp_choice('pool3_type', ['max', 'avg', hp_uniform('pool_order_3', 1, 4)])

rnorm1_size = scope.int(hp_quniform('rnorm1_size', 4, 6, 1))
rnorm2_size = scope.int(hp_quniform('rnorm2_size', 4, 6, 1))
rnorm3_size = scope.int(hp_quniform('rnorm3_size', 4, 6, 1))
rnorm4_size = scope.int(hp_quniform('rnorm4_size', 4, 6, 1))
rnorm5_size = scope.int(hp_quniform('rnorm5_size', 4, 6, 1))

layer_def_template = OrderedDict([('data', OrderedDict([('type', 'data'),
                                   ('dataidx', 0)])),