Example #1
0
def test_vectorize_simple():
    N = as_apply(15)

    p0 = hp_uniform('p0', 0, 1)
    loss = p0**2
    print loss
    expr_idxs = scope.range(N)
    vh = VectorizeHelper(loss, expr_idxs, build=True)
    vloss = vh.v_expr

    full_output = as_apply([vloss, vh.idxs_by_label(), vh.vals_by_label()])
    fo2 = replace_repeat_stochastic(full_output)

    new_vc = recursive_set_rng_kwarg(
        fo2,
        as_apply(np.random.RandomState(1)),
    )

    #print new_vc
    losses, idxs, vals = rec_eval(new_vc)
    print 'losses', losses
    print 'idxs p0', idxs['p0']
    print 'vals p0', vals['p0']
    p0dct = dict(zip(idxs['p0'], vals['p0']))
    for ii, li in enumerate(losses):
        assert p0dct[ii]**2 == li
Example #2
0
def test_vectorize_multipath():
    N = as_apply(15)

    p0 = hp_uniform('p0', 0, 1)
    loss = hp_choice('p1', [1, p0, -p0])**2
    expr_idxs = scope.range(N)
    vh = VectorizeHelper(loss, expr_idxs, build=True)

    vloss = vh.v_expr
    print vloss

    full_output = as_apply([vloss, vh.idxs_by_label(), vh.vals_by_label()])

    new_vc = recursive_set_rng_kwarg(
        full_output,
        as_apply(np.random.RandomState(1)),
    )

    losses, idxs, vals = rec_eval(new_vc)
    print 'losses', losses
    print 'idxs p0', idxs['p0']
    print 'vals p0', vals['p0']
    print 'idxs p1', idxs['p1']
    print 'vals p1', vals['p1']
    p0dct = dict(zip(idxs['p0'], vals['p0']))
    p1dct = dict(zip(idxs['p1'], vals['p1']))
    for ii, li in enumerate(losses):
        print ii, li
        if p1dct[ii] != 0:
            assert li == p0dct[ii]**2
        else:
            assert li == 1
Example #3
0
def test_vectorize_multipath():
    N = as_apply(15)

    p0 = hp_uniform("p0", 0, 1)
    loss = hp_choice("p1", [1, p0, -p0])**2
    expr_idxs = scope.range(N)
    vh = VectorizeHelper(loss, expr_idxs, build=True)

    vloss = vh.v_expr
    print(vloss)

    full_output = as_apply([vloss, vh.idxs_by_label(), vh.vals_by_label()])

    new_vc = recursive_set_rng_kwarg(full_output,
                                     as_apply(np.random.RandomState(1)))

    losses, idxs, vals = rec_eval(new_vc)
    print("losses", losses)
    print("idxs p0", idxs["p0"])
    print("vals p0", vals["p0"])
    print("idxs p1", idxs["p1"])
    print("vals p1", vals["p1"])
    p0dct = dict(list(zip(idxs["p0"], vals["p0"])))
    p1dct = dict(list(zip(idxs["p1"], vals["p1"])))
    for ii, li in enumerate(losses):
        print(ii, li)
        if p1dct[ii] != 0:
            assert li == p0dct[ii]**2
        else:
            assert li == 1
Example #4
0
def test_vectorize_multipath():
    N = as_apply(15)

    p0 = hp_uniform('p0', 0, 1)
    loss = hp_choice('p1', [1, p0, -p0]) ** 2
    expr_idxs = scope.range(N)
    vh = VectorizeHelper(loss, expr_idxs, build=True)

    vloss = vh.v_expr
    print(vloss)

    full_output = as_apply([vloss,
        vh.idxs_by_label(),
        vh.vals_by_label()])

    new_vc = recursive_set_rng_kwarg(
            full_output,
            as_apply(np.random.RandomState(1)),
            )

    losses, idxs, vals = rec_eval(new_vc)
    print('losses', losses)
    print('idxs p0', idxs['p0'])
    print('vals p0', vals['p0'])
    print('idxs p1', idxs['p1'])
    print('vals p1', vals['p1'])
    p0dct = dict(list(zip(idxs['p0'], vals['p0'])))
    p1dct = dict(list(zip(idxs['p1'], vals['p1'])))
    for ii, li in enumerate(losses):
        print(ii, li)
        if p1dct[ii] != 0:
            assert li == p0dct[ii] ** 2
        else:
            assert li == 1
Example #5
0
def test_vectorize_simple():
    N = as_apply(15)

    p0 = hp_uniform('p0', 0, 1)
    loss = p0 ** 2
    print(loss)
    expr_idxs = scope.range(N)
    vh = VectorizeHelper(loss, expr_idxs, build=True)
    vloss = vh.v_expr

    full_output = as_apply([vloss,
        vh.idxs_by_label(),
        vh.vals_by_label()])
    fo2 = replace_repeat_stochastic(full_output)

    new_vc = recursive_set_rng_kwarg(
            fo2,
            as_apply(np.random.RandomState(1)),
            )

    #print new_vc
    losses, idxs, vals = rec_eval(new_vc)
    print('losses', losses)
    print('idxs p0', idxs['p0'])
    print('vals p0', vals['p0'])
    p0dct = dict(list(zip(idxs['p0'], vals['p0'])))
    for ii, li in enumerate(losses):
        assert p0dct[ii] ** 2 == li
Example #6
0
def n_arms(N=2):
    """
    Each arm yields a reward from a different Gaussian.

    The correct arm is arm 0.

    """
    rng = np.random.RandomState(123)
    x = hp.choice('x', [0, 1])
    reward_mus = as_apply([-1] + [0] * (N - 1))
    reward_sigmas = as_apply([1] * N)
    return {'loss': scope.normal(reward_mus[x], reward_sigmas[x], rng=rng),
            'loss_variance': 1.0,
            'status': base.STATUS_OK}
Example #7
0
def n_arms(N=2):
    """
    Each arm yields a reward from a different Gaussian.

    The correct arm is arm 0.

    """
    rng = np.random.default_rng(123)
    x = hp.choice("x", [0, 1])
    reward_mus = as_apply([-1] + [0] * (N - 1))
    reward_sigmas = as_apply([1] * N)
    return {
        "loss": scope.normal(reward_mus[x], reward_sigmas[x], rng=rng),
        "loss_variance": 1.0,
        "status": base.STATUS_OK,
    }
Example #8
0
 def test2(self):
     self.expr = as_apply(dict(p0=one_of(0, 1)))
     self.wanted = [
             [('p0.randint', [0], [0])], [('p0.randint', [1], [1])],
             [('p0.randint', [2], [0])], [('p0.randint', [3], [0])],
             [('p0.randint', [4], [0])]]
     self.foo()
Example #9
0
def test_recursive_set_rng_kwarg():
    uniform = scope.uniform
    a = as_apply([uniform(0, 1), uniform(2, 3)])
    rng = np.random.RandomState(234)
    recursive_set_rng_kwarg(a, rng)
    print a
    val_a = rec_eval(a)
    assert 0 < val_a[0] < 1
    assert 2 < val_a[1] < 3
def test_repeatable():
    u = scope.uniform(0, 1)
    aa = as_apply(
        dict(u=u, n=scope.normal(5, 0.1), l=[0, 1, scope.one_of(2, 3), u]))
    dd1 = sample(aa, np.random.RandomState(3))
    dd2 = sample(aa, np.random.RandomState(3))
    dd3 = sample(aa, np.random.RandomState(4))
    assert dd1 == dd2
    assert dd1 != dd3
def test_recursive_set_rng_kwarg():
    uniform = scope.uniform
    a = as_apply([uniform(0, 1), uniform(2, 3)])
    rng = np.random.RandomState(234)
    recursive_set_rng_kwarg(a, rng)
    print a
    val_a = rec_eval(a)
    assert 0 < val_a[0] < 1
    assert 2 < val_a[1] < 3
Example #12
0
 def test5(self):
     p0 = uniform(0, 1)
     self.expr = as_apply(dict(p0=p0, p1=p0))
     self.wanted = [[('p0', [0], [0.69646918559786164])], [('p0', [1],
                 [0.28613933495037946])], [('p0', [2],
                     [0.22685145356420311])], [('p0', [3],
                         [0.55131476908289123])], [('p0', [4],
                             [0.71946896978556307])]]
     self.foo()
Example #13
0
 def test1(self):
     self.expr = as_apply(dict(p0=normal(0, 1)))
     self.wanted = [
             [('p0', [0], [-1.0856306033005612])],
             [('p0', [1], [0.99734544658358582])],
             [('p0', [2], [0.28297849805199204])],
             [('p0', [3], [-1.506294713918092])],
             [('p0', [4], [-0.57860025196853637])]]
     self.foo()
Example #14
0
 def test6(self):
     p0 = uniform(0, 1)
     self.expr = as_apply(dict(p0=p0, p1=normal(p0, 1)))
     self.wanted = [
         [('p0', [0], [0.69646918559786164]), ('p1', [0], [-0.25562802126346051])],
         [('p0', [1], [0.55131476908289123]), ('p1', [1], [-0.19412629039976703])],
         [('p0', [2], [0.71946896978556307]), ('p1', [2], [1.0415750381251847])],
         [('p0', [3], [0.68482973858486329]), ('p1', [3], [0.63331201764547818])],
         [('p0', [4], [0.48093190148436094]), ('p1', [4], [-1.1383681635523848])]]
     self.foo()
Example #15
0
def generic_space(name='space'):
    model = hp.pchoice('%s' % name, [
        (.8, {'preprocessing': [pca(name + '.pca')],
              'classifier': any_classifier(name + '.pca_clsf')
              }),
        (.2, {'preprocessing': [min_max_scaler(name + '.min_max_scaler')],
              'classifier': any_classifier(name + '.min_max_clsf'),
              }),
    ])
    return as_apply({'model': model})
def generic_space(name='space'):
    model = hp.pchoice('%s' % name, [
        (.8, {'preprocessing': [pca(name + '.pca')],
              'classifier': any_classifier(name + '.pca_clsf')
              }),
        (.2, {'preprocessing': [min_max_scaler(name + '.min_max_scaler')],
              'classifier': any_classifier(name + '.min_max_clsf'),
              }),
    ])
    return as_apply({'model': model})
Example #17
0
def test_repeatable():
    u = scope.uniform(0, 1)
    aa = as_apply(dict(
                u = u,
                n = scope.normal(5, 0.1),
                l = [0, 1, scope.one_of(2, 3), u]))
    dd1 = sample(aa, np.random.RandomState(3))
    dd2 = sample(aa, np.random.RandomState(3))
    dd3 = sample(aa, np.random.RandomState(4))
    assert dd1 == dd2
    assert dd1 != dd3
Example #18
0
def test_sample():
    u = scope.uniform(0, 1)
    aa = as_apply(
        dict(u=u, n=scope.normal(5, 0.1), l=[0, 1, scope.one_of(2, 3), u]))
    print(aa)
    dd = sample(aa, np.random.default_rng(3))
    assert 0 < dd["u"] < 1
    assert 4 < dd["n"] < 6
    assert dd["u"] == dd["l"][3]
    assert dd["l"][:2] == (0, 1)
    assert dd["l"][2] in (2, 3)
Example #19
0
def _convert_partialplus(node, bindings):
    """
    Convert a `PartialPlus` node into  an Apply node.

    Parameters
    ----------
    node : PartialPlus
        A `PartialPlus` object to be converted into an `Apply`.
    bindings : dict
        A dictionary mapping `PartialPlus`/`Literal` objects to Apply
        nodes already converted, for converting the elements/values
        in `node.args` and `node.keywords`.

    Returns
    -------
    apply_seq : Apply
        The equivalent `Apply` node representation.

    Notes
    -----
    Special-cases the `PartialPlus` "pos-args" node used for constructing
    dictionaries and dictionary subclasses. For these, creates an `Apply`
    with `node.args[0]` as the function and `node.args[1]` as the
    positionals.
    """
    args = node.args
    kwargs = node.keywords
    # Convert substitutable variable nodes.
    if is_variable_node(node):
        # TODO: currrently variables can't have hyper(hyper)parameters
        # that are partialpluses. Fix this.
        return _convert_variable(node, bindings)
    elif is_sequence_node(node):
        return _convert_sequence(node, bindings)
    # Convert the pos_args node for, e.g. dictionaries.
    elif is_pos_args_node(node):
        assert isinstance(node.args[0], Literal)
        assert hasattr(node.args[0].value, '__call__')
        assert len(kwargs) == 0
        f = args[0].value
        args = [pyll.as_apply([bindings[p] for p in args[1:]])]
    elif is_choice_node(node):
        return _convert_choice(node, bindings)
    else:
        f = node.func
        args = [bindings[p] for p in args]
        kwargs = dict((k, bindings[v]) for k, v in kwargs.iteritems())
    # In any case, add the function to the scope object if need be and create
    # an equivalent Apply node. define_params tells us what setup we need to
    # do when this node if and when this node is deserialized.
    f = pyll.scope.define_if_new(f)
    apply_node = getattr(pyll.scope, f.__name__)(*args, **kwargs)
    apply_node.define_params = {'f': f}
    return apply_node
Example #20
0
def _convert_partialplus(node, bindings):
    """
    Convert a `PartialPlus` node into  an Apply node.

    Parameters
    ----------
    node : PartialPlus
        A `PartialPlus` object to be converted into an `Apply`.
    bindings : dict
        A dictionary mapping `PartialPlus`/`Literal` objects to Apply
        nodes already converted, for converting the elements/values
        in `node.args` and `node.keywords`.

    Returns
    -------
    apply_seq : Apply
        The equivalent `Apply` node representation.

    Notes
    -----
    Special-cases the `PartialPlus` "pos-args" node used for constructing
    dictionaries and dictionary subclasses. For these, creates an `Apply`
    with `node.args[0]` as the function and `node.args[1]` as the
    positionals.
    """
    args = node.args
    kwargs = node.keywords
    # Convert substitutable variable nodes.
    if is_variable_node(node):
        # TODO: currrently variables can't have hyper(hyper)parameters
        # that are partialpluses. Fix this.
        return _convert_variable(node, bindings)
    elif is_sequence_node(node):
        return _convert_sequence(node, bindings)
    # Convert the pos_args node for, e.g. dictionaries.
    elif is_pos_args_node(node):
        assert isinstance(node.args[0], Literal)
        assert hasattr(node.args[0].value, '__call__')
        assert len(kwargs) == 0
        f = args[0].value
        args = [pyll.as_apply([bindings[p] for p in args[1:]])]
    elif is_choice_node(node):
        return _convert_choice(node, bindings)
    else:
        f = node.func
        args = [bindings[p] for p in args]
        kwargs = dict((k, bindings[v]) for k, v in kwargs.iteritems())
    # In any case, add the function to the scope object if need be and create
    # an equivalent Apply node. define_params tells us what setup we need to
    # do when this node if and when this node is deserialized.
    f = pyll.scope.define_if_new(f)
    apply_node = getattr(pyll.scope, f.__name__)(*args, **kwargs)
    apply_node.define_params = {'f': f}
    return apply_node
def test_sample():
    u = scope.uniform(0, 1)
    aa = as_apply(
        dict(u=u, n=scope.normal(5, 0.1), l=[0, 1, scope.one_of(2, 3), u]))
    print aa
    dd = sample(aa, np.random.RandomState(3))
    assert 0 < dd['u'] < 1
    assert 4 < dd['n'] < 6
    assert dd['u'] == dd['l'][3]
    assert dd['l'][:2] == (0, 1)
    assert dd['l'][2] in (2, 3)
Example #22
0
def config0():
    p0 = scope.uniform(0, 1)
    p1 = scope.uniform(2, 3)
    p2 = scope.one_of(-1, p0)
    p3 = scope.one_of(-2, p1)
    p4 = 1
    p5 = [3, 4, p0]
    p6 = scope.one_of(-3, p1)
    d = locals()
    d['p1'] = None # -- don't sample p1 all the time, only if p3 says so
    s = as_apply(d)
    return s
Example #23
0
def config0():
    p0 = scope.uniform(0, 1)
    p1 = scope.uniform(2, 3)
    p2 = scope.one_of(-1, p0)
    p3 = scope.one_of(-2, p1)
    p4 = 1
    p5 = [3, 4, p0]
    p6 = scope.one_of(-3, p1)
    d = locals()
    d['p1'] = None  # -- don't sample p1 all the time, only if p3 says so
    s = as_apply(d)
    return s
Example #24
0
def test_expr_to_config():

    z = hp.randint("z", 10)
    a = hp.choice(
        "a",
        [
            hp.uniform("b", -1, 1) + z,
            {
                "c":
                1,
                "d":
                hp.choice("d", [
                    3 + hp.loguniform("c", 0, 1), 1 + hp.loguniform("e", 0, 1)
                ]),
            },
        ],
    )

    expr = as_apply((a, z))

    hps = {}
    expr_to_config(expr, (True, ), hps)

    for label, dct in list(hps.items()):
        print(label)
        print("  dist: %s(%s)" % (
            dct["node"].name,
            ", ".join(map(str, [ii.eval() for ii in dct["node"].inputs()])),
        ))
        if len(dct["conditions"]) > 1:
            print("  conditions (OR):")
            for condseq in dct["conditions"]:
                print("    ", " AND ".join(map(str, condseq)))
        elif dct["conditions"]:
            for condseq in dct["conditions"]:
                print("  conditions :", " AND ".join(map(str, condseq)))

    assert hps["a"]["node"].name == "randint"
    assert hps["b"]["node"].name == "uniform"
    assert hps["c"]["node"].name == "loguniform"
    assert hps["d"]["node"].name == "randint"
    assert hps["e"]["node"].name == "loguniform"
    assert hps["z"]["node"].name == "randint"

    assert set([(True, EQ("a", 0))]) == set([(True, EQ("a", 0))])
    assert hps["a"]["conditions"] == set([(True, )])
    assert hps["b"]["conditions"] == set([(True, EQ("a", 0))
                                          ]), hps["b"]["conditions"]
    assert hps["c"]["conditions"] == set([(True, EQ("a", 1), EQ("d", 0))])
    assert hps["d"]["conditions"] == set([(True, EQ("a", 1))])
    assert hps["e"]["conditions"] == set([(True, EQ("a", 1), EQ("d", 1))])
    assert hps["z"]["conditions"] == set([(True, ), (True, EQ("a", 0))])
Example #25
0
def test_sample():
    u = scope.uniform(0, 1)
    aa = as_apply(dict(
                u = u,
                n = scope.normal(5, 0.1),
                l = [0, 1, scope.one_of(2, 3), u]))
    print aa
    dd = sample(aa, np.random.RandomState(3))
    assert 0 < dd['u'] < 1
    assert 4 < dd['n'] < 6
    assert dd['u'] == dd['l'][3]
    assert dd['l'][:2] == (0, 1)
    assert dd['l'][2] in (2, 3)
Example #26
0
def test_vectorize_trivial():
    N = as_apply(15)

    p0 = hp_uniform("p0", 0, 1)
    loss = p0
    print(loss)
    expr_idxs = scope.range(N)
    vh = VectorizeHelper(loss, expr_idxs, build=True)
    vloss = vh.v_expr

    full_output = as_apply([vloss, vh.idxs_by_label(), vh.vals_by_label()])
    fo2 = replace_repeat_stochastic(full_output)

    new_vc = recursive_set_rng_kwarg(fo2, as_apply(np.random.RandomState(1)))

    # print new_vc
    losses, idxs, vals = rec_eval(new_vc)
    print("losses", losses)
    print("idxs p0", idxs["p0"])
    print("vals p0", vals["p0"])
    p0dct = dict(list(zip(idxs["p0"], vals["p0"])))
    for ii, li in enumerate(losses):
        assert p0dct[ii] == li
Example #27
0
def test_expr_to_config():

    z = hp.randint('z', 10)
    a = hp.choice('a',
                  [
                      hp.uniform('b', -1, 1) + z,
                      {'c': 1, 'd': hp.choice('d',
                                              [3 + hp.loguniform('c', 0, 1),
                                               1 + hp.loguniform('e', 0, 1)])
                      }])

    expr = as_apply((a, z))

    hps = {}
    expr_to_config(expr, (True,), hps)

    for label, dct in hps.items():
        print label
        print '  dist: %s(%s)' % (
            dct['node'].name,
            ', '.join(map(str, [ii.eval() for ii in dct['node'].inputs()])))
        if len(dct['conditions']) > 1:
            print '  conditions (OR):'
            for condseq in dct['conditions']:
                print '    ', ' AND '.join(map(str, condseq))
        elif dct['conditions']:
            for condseq in dct['conditions']:
                print '  conditions :', ' AND '.join(map(str, condseq))


    assert hps['a']['node'].name == 'randint'
    assert hps['b']['node'].name == 'uniform'
    assert hps['c']['node'].name == 'loguniform'
    assert hps['d']['node'].name == 'randint'
    assert hps['e']['node'].name == 'loguniform'
    assert hps['z']['node'].name == 'randint'

    assert set([(True, EQ('a', 0))]) == set([(True, EQ('a', 0))])
    assert hps['a']['conditions'] == set([(True,)])
    assert hps['b']['conditions'] == set([
        (True, EQ('a', 0))]), hps['b']['conditions']
    assert hps['c']['conditions'] == set([
        (True, EQ('a', 1), EQ('d', 0))])
    assert hps['d']['conditions'] == set([
        (True, EQ('a', 1))])
    assert hps['e']['conditions'] == set([
        (True, EQ('a', 1), EQ('d', 1))])
    assert hps['z']['conditions'] == set([
        (True,),
        (True, EQ('a', 0))])
Example #28
0
 def test4(self):
     self.expr = as_apply(dict(p0=uniform(0, 1) + normal(0, 1)))
     self.wanted = [
             [('p0.arg:0', [0], [0.69646918559786164]),
                 ('p0.arg:1', [0], [-0.95209720686132215])],
             [('p0.arg:0', [1], [0.55131476908289123]),
                 ('p0.arg:1', [1], [-0.74544105948265826])],
             [('p0.arg:0', [2], [0.71946896978556307]),
                 ('p0.arg:1', [2], [0.32210606833962163])],
             [('p0.arg:0', [3], [0.68482973858486329]),
                 ('p0.arg:1', [3], [-0.0515177209393851])],
             [('p0.arg:0', [4], [0.48093190148436094]),
                 ('p0.arg:1', [4], [-1.6193000650367457])]]
     self.foo()
def test_expr_to_config():

    z = hp.randint('z', 10)
    a = hp.choice('a', [
        hp.uniform('b', -1, 1) + z, {
            'c':
            1,
            'd':
            hp.choice(
                'd',
                [3 + hp.loguniform('c', 0, 1), 1 + hp.loguniform('e', 0, 1)])
        }
    ])

    expr = as_apply((a, z))

    hps = {}
    expr_to_config(expr, (True, ), hps)

    for label, dct in hps.items():
        print label
        print '  dist: %s(%s)' % (dct['node'].name, ', '.join(
            map(str, [ii.eval() for ii in dct['node'].inputs()])))
        if len(dct['conditions']) > 1:
            print '  conditions (OR):'
            for condseq in dct['conditions']:
                print '    ', ' AND '.join(map(str, condseq))
        elif dct['conditions']:
            for condseq in dct['conditions']:
                print '  conditions :', ' AND '.join(map(str, condseq))

    assert hps['a']['node'].name == 'randint'
    assert hps['b']['node'].name == 'uniform'
    assert hps['c']['node'].name == 'loguniform'
    assert hps['d']['node'].name == 'randint'
    assert hps['e']['node'].name == 'loguniform'
    assert hps['z']['node'].name == 'randint'

    assert set([(True, EQ('a', 0))]) == set([(True, EQ('a', 0))])
    assert hps['a']['conditions'] == set([(True, )])
    assert hps['b']['conditions'] == set([(True, EQ('a', 0))
                                          ]), hps['b']['conditions']
    assert hps['c']['conditions'] == set([(True, EQ('a', 1), EQ('d', 0))])
    assert hps['d']['conditions'] == set([(True, EQ('a', 1))])
    assert hps['e']['conditions'] == set([(True, EQ('a', 1), EQ('d', 1))])
    assert hps['z']['conditions'] == set([(True, ), (True, EQ('a', 0))])
Example #30
0
def test_lnorm():
    G = scope
    choice = G.choice
    uniform = G.uniform
    quantized_uniform = G.quniform

    inker_size = quantized_uniform(low=0, high=7.99, q=2) + 3
    # -- test that it runs
    lnorm = as_apply({'kwargs': {'inker_shape' : (inker_size, inker_size),
             'outker_shape' : (inker_size, inker_size),
             'remove_mean' : choice([0, 1]),
             'stretch' : uniform(low=0, high=10),
             'threshold' : uniform(
                 low=.1 / np.sqrt(10.),
                 high=10 * np.sqrt(10))
             }})
    print lnorm
    print 'len', len(str(lnorm))
Example #31
0
 def work(self):
     """Test that all prior samplers are gone"""
     tpe_algo = TreeParzenEstimator(self.bandit)
     foo = pyll.as_apply(
         [tpe_algo.post_below['idxs'], tpe_algo.post_below['vals']])
     prior_names = [
         'uniform',
         'quniform',
         'loguniform',
         'qloguniform',
         'normal',
         'qnormal',
         'lognormal',
         'qlognormal',
         'randint',
     ]
     for node in pyll.dfs(foo):
         assert node.name not in prior_names
Example #32
0
def test_lnorm():
    G = scope
    choice = G.choice
    uniform = G.uniform
    quantized_uniform = G.quniform

    inker_size = quantized_uniform(low=0, high=7.99, q=2) + 3
    # -- test that it runs
    lnorm = as_apply({'kwargs': {'inker_shape': (inker_size, inker_size),
                                 'outker_shape': (inker_size, inker_size),
                                 'remove_mean': choice([0, 1]),
                                 'stretch': uniform(low=0, high=10),
                                 'threshold': uniform(
        low=old_div(.1, np.sqrt(10.)),
        high=10 * np.sqrt(10))
    }})
    print(lnorm)
    print(('len', len(str(lnorm))))
Example #33
0
def _convert_sequence(pp_seq, bindings):
    """
    Convert a tuple or list node into the equivalent Apply node.

    Parameters
    ----------
    pp_seq : PartialPlus
        Must be a tuple node or a list node.
    bindings : dict
        A dictionary mapping `PartialPlus`/`Literal` objects to Apply
        nodes already converted, for converting the elements of the sequence.

    Returns
    -------
    apply_seq : Apply
        The equivalent `Apply` node representation.
    """
    return pyll.as_apply([bindings[p] for p in pp_seq.args])
Example #34
0
def _convert_sequence(pp_seq, bindings):
    """
    Convert a tuple or list node into the equivalent Apply node.

    Parameters
    ----------
    pp_seq : PartialPlus
        Must be a tuple node or a list node.
    bindings : dict
        A dictionary mapping `PartialPlus`/`Literal` objects to Apply
        nodes already converted, for converting the elements of the sequence.

    Returns
    -------
    apply_seq : Apply
        The equivalent `Apply` node representation.
    """
    return pyll.as_apply([bindings[p] for p in pp_seq.args])
Example #35
0
 def work(self):
     """Test that all prior samplers are gone"""
     tpe_algo = TreeParzenEstimator(self.bandit)
     foo = pyll.as_apply([
         tpe_algo.post_below['idxs'],
         tpe_algo.post_below['vals']])
     prior_names = [
             'uniform',
             'quniform',
             'loguniform',
             'qloguniform',
             'normal',
             'qnormal',
             'lognormal',
             'qlognormal',
             'randint',
             ]
     for node in pyll.dfs(foo):
         assert node.name not in prior_names
Example #36
0
def test_lnorm():
    G = scope
    choice = G.choice
    uniform = G.uniform
    quantized_uniform = G.quniform

    inker_size = quantized_uniform(low=0, high=7.99, q=2) + 3
    # -- test that it runs
    lnorm = as_apply({
        "kwargs": {
            "inker_shape": (inker_size, inker_size),
            "outker_shape": (inker_size, inker_size),
            "remove_mean":
            choice([0, 1]),
            "stretch":
            uniform(low=0, high=10),
            "threshold":
            uniform(low=old_div(0.1, np.sqrt(10.0)), high=10 * np.sqrt(10)),
        }
    })
    print(lnorm)
    print(("len", len(str(lnorm))))
Example #37
0
def test_vectorize_config0():
    p0 = hp_uniform('p0', 0, 1)
    p1 = hp_loguniform('p1', 2, 3)
    p2 = hp_choice('p2', [-1, p0])
    p3 = hp_choice('p3', [-2, p1])
    p4 = 1
    p5 = [3, 4, p0]
    p6 = hp_choice('p6', [-3, p1])
    d = locals()
    d['p1'] = None # -- don't sample p1 all the time, only if p3 says so
    config = as_apply(d)

    N = as_apply('N:TBA')
    expr = config
    expr_idxs = scope.range(N)
    vh = VectorizeHelper(expr, expr_idxs, build=True)
    vconfig = vh.v_expr

    full_output = as_apply([vconfig, vh.idxs_by_label(), vh.vals_by_label()])

    if 1:
        print('=' * 80)
        print('VECTORIZED')
        print(full_output)
        print('\n' * 1)

    fo2 = replace_repeat_stochastic(full_output)
    if 0:
        print('=' * 80)
        print('VECTORIZED STOCHASTIC')
        print(fo2)
        print('\n' * 1)

    new_vc = recursive_set_rng_kwarg(
            fo2,
            as_apply(np.random.RandomState(1))
            )
    if 0:
        print('=' * 80)
        print('VECTORIZED STOCHASTIC WITH RNGS')
        print(new_vc)

    Nval = 10
    foo, idxs, vals = rec_eval(new_vc, memo={N: Nval})

    print('foo[0]', foo[0])
    print('foo[1]', foo[1])
    assert len(foo) == Nval
    if 0:  # XXX refresh these values to lock down sampler
        assert foo[0] == {
            'p0': 0.39676747423066994,
            'p1': None,
            'p2': 0.39676747423066994,
            'p3': 2.1281244479293568,
            'p4': 1,
            'p5': (3, 4, 0.39676747423066994) }
    assert foo[1] != foo[2]

    print(idxs)
    print(vals['p3'])
    print(vals['p6'])
    print(idxs['p1'])
    print(vals['p1'])
    assert len(vals['p3']) == Nval
    assert len(vals['p6']) == Nval
    assert len(idxs['p1']) < Nval
    p1d = dict(list(zip(idxs['p1'], vals['p1'])))
    for ii, (p3v, p6v) in enumerate(zip(vals['p3'], vals['p6'])):
        if p3v == p6v == 0:
            assert ii not in idxs['p1']
        if p3v:
            assert foo[ii]['p3'] == p1d[ii]
        if p6v:
            print('p6', foo[ii]['p6'], p1d[ii])
            assert foo[ii]['p6'] == p1d[ii]
Example #38
0
def test_sample_deterministic():
    aa = as_apply([0, 1])
    print(aa)
    dd = sample(aa, np.random.default_rng(3))
    assert dd == (0, 1)
Example #39
0
def test_vectorize_config0():
    p0 = hp_uniform('p0', 0, 1)
    p1 = hp_loguniform('p1', 2, 3)
    p2 = hp_choice('p2', [-1, p0])
    p3 = hp_choice('p3', [-2, p1])
    p4 = 1
    p5 = [3, 4, p0]
    p6 = hp_choice('p6', [-3, p1])
    d = locals()
    d['p1'] = None  # -- don't sample p1 all the time, only if p3 says so
    config = as_apply(d)

    N = as_apply('N:TBA')
    expr = config
    expr_idxs = scope.range(N)
    vh = VectorizeHelper(expr, expr_idxs, build=True)
    vconfig = vh.v_expr

    full_output = as_apply([vconfig, vh.idxs_by_label(), vh.vals_by_label()])

    if 1:
        print '=' * 80
        print 'VECTORIZED'
        print full_output
        print '\n' * 1

    fo2 = replace_repeat_stochastic(full_output)
    if 0:
        print '=' * 80
        print 'VECTORIZED STOCHASTIC'
        print fo2
        print '\n' * 1

    new_vc = recursive_set_rng_kwarg(fo2, as_apply(np.random.RandomState(1)))
    if 0:
        print '=' * 80
        print 'VECTORIZED STOCHASTIC WITH RNGS'
        print new_vc

    Nval = 10
    foo, idxs, vals = rec_eval(new_vc, memo={N: Nval})

    print 'foo[0]', foo[0]
    print 'foo[1]', foo[1]
    assert len(foo) == Nval
    if 0:  # XXX refresh these values to lock down sampler
        assert foo[0] == {
            'p0': 0.39676747423066994,
            'p1': None,
            'p2': 0.39676747423066994,
            'p3': 2.1281244479293568,
            'p4': 1,
            'p5': (3, 4, 0.39676747423066994)
        }
    assert foo[1] != foo[2]

    print idxs
    print vals['p3']
    print vals['p6']
    print idxs['p1']
    print vals['p1']
    assert len(vals['p3']) == Nval
    assert len(vals['p6']) == Nval
    assert len(idxs['p1']) < Nval
    p1d = dict(zip(idxs['p1'], vals['p1']))
    for ii, (p3v, p6v) in enumerate(zip(vals['p3'], vals['p6'])):
        if p3v == p6v == 0:
            assert ii not in idxs['p1']
        if p3v:
            assert foo[ii]['p3'] == p1d[ii]
        if p6v:
            print 'p6', foo[ii]['p6'], p1d[ii]
            assert foo[ii]['p6'] == p1d[ii]
Example #40
0
from hyperopt import pyll

import pyll_slm  # adds the symbols to pyll.scope

from .slm_visitor_primal import uslm_eval_helper
from .slm import uslm_domain


dumps = functools.partial(cPickle.dumps, protocol=-1)
logger = logging.getLogger(__name__)


class DataView(object):
     pass

pyll_data_view = pyll.as_apply(DataView)


@pyll.scope.define
def cifar10_unsup_images(data_view, N):
    # -- extract training images for unsupervised learning,
    #    and put them into channel-major format
    imgs = np.asarray(
            data_view.dataset._pixels[
                data_view.fit_idxs[:N]])
    assert str(imgs.dtype) == 'uint8'
    rval = imgs.transpose(0, 3, 1, 2).copy()
    assert rval.shape[1] in (1, 3)  # -- channels
    return rval

Example #41
0
def _convert_literal(pp_literal):
    """Convert a searchspaces Literal to a hyperopt Literal."""
    return pyll.as_apply(pp_literal.value)
Example #42
0
import slm_visitor_esvc

import foobar

warn = logging.getLogger(__name__).warn
info = logging.getLogger(__name__).info

# XXX: this is related to a hack for cacheing features to disk
#      see e.g. shovel/lfw.py, slm_visitor_esvc.py
dbname = 'lfw_db'


class DataViewPlaceHolder(object):
     pass

pyll_data_view = pyll.as_apply(DataViewPlaceHolder)


@pyll.scope.define
def unsup_images(data_view, trn, N):
    """
    Return a block of 
    """
    if trn == 'DevTrain':
        # -- extract training images, and put them into channel-major format
        imgs = larray.reindex(data_view.image_pixels,
                data_view.dev_train['lpathidx'][0, :N])[:]
        imgs = np.asarray(imgs)
        assert 'int' in str(imgs.dtype)
        foobar.append_ndarray_signature(imgs, 'unsup_images')
        foobar.append_trace('unsup_images N', N)
Example #43
0
def test_sample_deterministic():
    aa = as_apply([0, 1])
    print aa
    dd = sample(aa, np.random.RandomState(3))
    assert dd == (0, 1)
Example #44
0
    def __init__(self, fn, expr,
                 workdir=None,
                 pass_expr_memo_ctrl=None,
                 name=None,
                 loss_target=None,
                 ):
        """
        Paramaters
        ----------

        fn : callable
            This stores the `fn` argument to `fmin`. (See `hyperopt.fmin.fmin`)

        expr : hyperopt.pyll.Apply
            This is the `space` argument to `fmin`. (See `hyperopt.fmin.fmin`)

        workdir : string (or None)
            If non-None, the current working directory will be `workdir`while
            `expr` and `fn` are evaluated. (XXX Currently only respected by
            jobs run via MongoWorker)

        pass_expr_memo_ctrl : bool
            If True, `fn` will be called like this:
            `fn(self.expr, memo, ctrl)`,
            where `memo` is a dictionary mapping `Apply` nodes to their
            computed values, and `ctrl` is a `Ctrl` instance for communicating
            with a Trials database.  This lower-level calling convention is
            useful if you want to call e.g. `hyperopt.pyll.rec_eval` yourself
            in some customized way.

        name : string (or None)
            Label, used for pretty-printing.

        loss_target : float (or None)
            The actual or estimated minimum of `fn`.
            Some optimization algorithms may behave differently if their first
            objective is to find an input that achieves a certain value,
            rather than the more open-ended objective of pure minimization.
            XXX: Move this from Domain to be an fmin arg.

        """
        self.fn = fn
        if pass_expr_memo_ctrl is None:
            self.pass_expr_memo_ctrl = getattr(fn,
                                               'fmin_pass_expr_memo_ctrl',
                                               False)
        else:
            self.pass_expr_memo_ctrl = pass_expr_memo_ctrl

        self.expr = pyll.as_apply(expr)

        self.params = {}
        for node in pyll.dfs(self.expr):
            if node.name == 'hyperopt_param':
                label = node.arg['label'].obj
                if label in self.params:
                    raise DuplicateLabel(label)
                self.params[label] = node.arg['obj']

        self.loss_target = loss_target
        self.name = name

        self.workdir = workdir
        self.s_new_ids = pyll.Literal('new_ids')  # -- list at eval-time
        before = pyll.dfs(self.expr)
        # -- raises exception if expr contains cycles
        pyll.toposort(self.expr)
        vh = self.vh = VectorizeHelper(self.expr, self.s_new_ids)
        # -- raises exception if v_expr contains cycles
        pyll.toposort(vh.v_expr)

        idxs_by_label = vh.idxs_by_label()
        vals_by_label = vh.vals_by_label()
        after = pyll.dfs(self.expr)
        # -- try to detect if VectorizeHelper screwed up anything inplace
        assert before == after
        assert set(idxs_by_label.keys()) == set(vals_by_label.keys())
        assert set(idxs_by_label.keys()) == set(self.params.keys())

        self.s_rng = pyll.Literal('rng-placeholder')
        # -- N.B. operates inplace:
        self.s_idxs_vals = recursive_set_rng_kwarg(
            pyll.scope.pos_args(idxs_by_label, vals_by_label),
            self.s_rng)

        # -- raises an exception if no topological ordering exists
        pyll.toposort(self.s_idxs_vals)

        # -- Protocol for serialization.
        #    self.cmd indicates to e.g. MongoWorker how this domain
        #    should be [un]serialized.
        #    XXX This mechanism deserves review as support for ipython
        #        workers improves.
        self.cmd = ('domain_attachment', 'FMinIter_Domain')
def test_sample_deterministic():
    aa = as_apply([0, 1])
    print aa
    dd = sample(aa, np.random.RandomState(3))
    assert dd == (0, 1)
Example #46
0
def test_vectorize_config0():
    p0 = hp_uniform("p0", 0, 1)
    p1 = hp_loguniform("p1", 2, 3)
    p2 = hp_choice("p2", [-1, p0])
    p3 = hp_choice("p3", [-2, p1])
    p4 = 1
    p5 = [3, 4, p0]
    p6 = hp_choice("p6", [-3, p1])
    d = locals()
    d["p1"] = None  # -- don't sample p1 all the time, only if p3 says so
    config = as_apply(d)

    N = as_apply("N:TBA")
    expr = config
    expr_idxs = scope.range(N)
    vh = VectorizeHelper(expr, expr_idxs, build=True)
    vconfig = vh.v_expr

    full_output = as_apply([vconfig, vh.idxs_by_label(), vh.vals_by_label()])

    if 1:
        print("=" * 80)
        print("VECTORIZED")
        print(full_output)
        print("\n" * 1)

    fo2 = replace_repeat_stochastic(full_output)
    if 0:
        print("=" * 80)
        print("VECTORIZED STOCHASTIC")
        print(fo2)
        print("\n" * 1)

    new_vc = recursive_set_rng_kwarg(fo2, as_apply(np.random.RandomState(1)))
    if 0:
        print("=" * 80)
        print("VECTORIZED STOCHASTIC WITH RNGS")
        print(new_vc)

    Nval = 10
    foo, idxs, vals = rec_eval(new_vc, memo={N: Nval})

    print("foo[0]", foo[0])
    print("foo[1]", foo[1])
    assert len(foo) == Nval
    if 0:  # XXX refresh these values to lock down sampler
        assert foo[0] == {
            "p0": 0.39676747423066994,
            "p1": None,
            "p2": 0.39676747423066994,
            "p3": 2.1281244479293568,
            "p4": 1,
            "p5": (3, 4, 0.39676747423066994),
        }
    assert (foo[1].keys() != foo[2].keys()) or (foo[1].values() !=
                                                foo[2].values())

    print(idxs)
    print(vals["p3"])
    print(vals["p6"])
    print(idxs["p1"])
    print(vals["p1"])
    assert len(vals["p3"]) == Nval
    assert len(vals["p6"]) == Nval
    assert len(idxs["p1"]) < Nval
    p1d = dict(list(zip(idxs["p1"], vals["p1"])))
    for ii, (p3v, p6v) in enumerate(zip(vals["p3"], vals["p6"])):
        if p3v == p6v == 0:
            assert ii not in idxs["p1"]
        if p3v:
            assert foo[ii]["p3"] == p1d[ii]
        if p6v:
            print("p6", foo[ii]["p6"], p1d[ii])
            assert foo[ii]["p6"] == p1d[ii]
Example #47
0
#     'data_weight': hp.lognormal('dw', -2 * r, 1 * r),
#     'data_max': hp.lognormal('dm', 2 * r, 1 * r),
#     'disc_max': hp.lognormal('cm', 1 * r, 1 * r),
# }
space = collections.OrderedDict([
    ('laplacian_ksize', pyll.scope.int(1 + hp.quniform('laplacian_ksize', 0, 20, 2))),
    ('laplacian_scale', hp.lognormal('laplacian_scale', 0, r)),
    ('data_weight', hp.lognormal('data_weight', -2*r, r)),
    ('data_max', hp.lognormal('data_max', 2*r, r)),
    ('data_exp', hp.lognormal('data_exp', 0, r)),
    ('disc_max', hp.lognormal('disc_max', r, r)),
    ('smooth', hp.lognormal('smooth', 0, r)),
    # ('post_smooth', hp.lognormal('post_smooth', 0, r)),
])

expr = pyll.as_apply(space)
arg_key = lambda args: tuple(args[key] for key in space)

def get_args(params):
    memo = {node: params[node.arg['label'].obj]
            for node in pyll.dfs(expr) if node.name == 'hyperopt_param'}
    return pyll.rec_eval(expr, memo=memo)

# best = hyperopt.fmin(objective, space, algo=hyperopt.tpe.suggest, max_evals=1)
best = hyperopt.fmin(objective, space, algo=hyperopt.tpe.suggest, max_evals=500)
# print("Best: %s" % best)

args = get_args(best)
error = errors[arg_key(args)]
assert set(args) == set(space)