コード例 #1
0
ファイル: slm.py プロジェクト: xiaoxiao19/hyperopt-convnet
def exit_grid(pipeline, layer_num, Xcm, n_patches, max_n_features):
    def lab(msg):
        return 'l%ieg_%s' % (layer_num, msg)

    fsize = rfilter_size(lab('fsize'), 3, 8)

    grid_res = hp_choice(lab('res'), [2, 3])
    grid_features_per_filter = 2 * (grid_res ** 2)
    grid_nfilters = max_n_features // grid_features_per_filter

    grid_filtering = new_fbncc_layer(
            prefix='l%ieg' % layer_num,
            Xcm=Xcm,
            n_patches=n_patches,
            n_filters=grid_nfilters,
            size=fsize,
            )

    grid_pooling = partial(slm_quantize_gridpool,
            alpha=hp_normal(lab('alpha'), 0.0, 1.0),
            use_mid=False,
            grid_res=grid_res,
            order=hp_choice(lab('order'), [
                1.0, 2.0, logu_range(lab('order_real'), .1, 10.)]))

    return new_exit(pipeline + [grid_filtering, grid_pooling], lab('%s'))
コード例 #2
0
ファイル: test_base.py プロジェクト: wqren/hyperopt
 def test7(self):
     p0 = hp_uniform('p0', 0, 1)
     p1 = hp_normal('p1', 0, 1)
     p2 = hp_choice('p2', [1, p0])
     p3 = hp_choice('p3', [2, p1, p2, hp_uniform('a0', 2, 3)])
     self.expr = {'loss': p0 + p1 + p2 + p3}
     self.n_randints = 2
     self.wanted = [[('p0', [0], [0.71295532052322719]),
                     ('p1', [0], [0.28297849805199204]),
                     ('p2.randint', [0], [0]),
                     ('p3.arg:2', [0], [2.719468969785563]),
                     ('p3.randint', [0], [2])],
                    [('p0', [1], [0.78002776191207912]),
                     ('p1', [1], [-1.506294713918092]),
                     ('p2.randint', [1], [1]), ('p3.arg:2', [], []),
                     ('p3.randint', [1], [1])],
                    [('p0', [2], [0.57969429702261011]),
                     ('p1', [2], [1.6796003743035337]),
                     ('p2.randint', [2], [0]), ('p3.arg:2', [], []),
                     ('p3.randint', [2], [1])],
                    [('p0', [3], [0.43857224467962441]),
                     ('p1', [3], [-1.3058031267484451]),
                     ('p2.randint', [3], [1]), ('p3.arg:2', [], []),
                     ('p3.randint', [3], [1])],
                    [('p0', [4], [0.39804425533043142]),
                     ('p1', [4], [-0.91948540682140967]),
                     ('p2.randint', [4], [0]), ('p3.arg:2', [], []),
                     ('p3.randint', [4], [0])]]
     self.foo()
コード例 #3
0
ファイル: test_vectorize.py プロジェクト: goller/hyperopt
def test_vectorize_multipath():
    N = as_apply(15)

    p0 = hp_uniform('p0', 0, 1)
    loss = hp_choice('p1', [1, p0, -p0])**2
    expr_idxs = scope.range(N)
    vh = VectorizeHelper(loss, expr_idxs, build=True)

    vloss = vh.v_expr
    print vloss

    full_output = as_apply([vloss, vh.idxs_by_label(), vh.vals_by_label()])

    new_vc = recursive_set_rng_kwarg(
        full_output,
        as_apply(np.random.RandomState(1)),
    )

    losses, idxs, vals = rec_eval(new_vc)
    print 'losses', losses
    print 'idxs p0', idxs['p0']
    print 'vals p0', vals['p0']
    print 'idxs p1', idxs['p1']
    print 'vals p1', vals['p1']
    p0dct = dict(zip(idxs['p0'], vals['p0']))
    p1dct = dict(zip(idxs['p1'], vals['p1']))
    for ii, li in enumerate(losses):
        print ii, li
        if p1dct[ii] != 0:
            assert li == p0dct[ii]**2
        else:
            assert li == 1
コード例 #4
0
def test_vectorize_multipath():
    N = as_apply(15)

    p0 = hp_uniform('p0', 0, 1)
    loss = hp_choice('p1', [1, p0, -p0]) ** 2
    expr_idxs = scope.range(N)
    vh = VectorizeHelper(loss, expr_idxs, build=True)

    vloss = vh.v_expr
    print(vloss)

    full_output = as_apply([vloss,
        vh.idxs_by_label(),
        vh.vals_by_label()])

    new_vc = recursive_set_rng_kwarg(
            full_output,
            as_apply(np.random.RandomState(1)),
            )

    losses, idxs, vals = rec_eval(new_vc)
    print('losses', losses)
    print('idxs p0', idxs['p0'])
    print('vals p0', vals['p0'])
    print('idxs p1', idxs['p1'])
    print('vals p1', vals['p1'])
    p0dct = dict(list(zip(idxs['p0'], vals['p0'])))
    p1dct = dict(list(zip(idxs['p1'], vals['p1'])))
    for ii, li in enumerate(losses):
        print(ii, li)
        if p1dct[ii] != 0:
            assert li == p0dct[ii] ** 2
        else:
            assert li == 1
コード例 #5
0
ファイル: test_vectorize.py プロジェクト: LvdKnaap/BinPacking
def test_vectorize_multipath():
    N = as_apply(15)

    p0 = hp_uniform("p0", 0, 1)
    loss = hp_choice("p1", [1, p0, -p0])**2
    expr_idxs = scope.range(N)
    vh = VectorizeHelper(loss, expr_idxs, build=True)

    vloss = vh.v_expr
    print(vloss)

    full_output = as_apply([vloss, vh.idxs_by_label(), vh.vals_by_label()])

    new_vc = recursive_set_rng_kwarg(full_output,
                                     as_apply(np.random.RandomState(1)))

    losses, idxs, vals = rec_eval(new_vc)
    print("losses", losses)
    print("idxs p0", idxs["p0"])
    print("vals p0", vals["p0"])
    print("idxs p1", idxs["p1"])
    print("vals p1", vals["p1"])
    p0dct = dict(list(zip(idxs["p0"], vals["p0"])))
    p1dct = dict(list(zip(idxs["p1"], vals["p1"])))
    for ii, li in enumerate(losses):
        print(ii, li)
        if p1dct[ii] != 0:
            assert li == p0dct[ii]**2
        else:
            assert li == 1
コード例 #6
0
ファイル: slm.py プロジェクト: xiaoxiao19/hyperopt-convnet
def pipeline_extension(prefix, X, n_patches, max_filters):
    assert max_filters > 16
    f_layer = new_fbncc_layer(prefix, X, n_patches,
            n_filters=s_int(
                hp_qloguniform('%sfb_nfilters' % prefix,
                    np.log(8.01), np.log(max_filters), q=16)),
            size=rfilter_size('%sfb_size' % prefix, 3, 8),
            )

    p_layer = partial(slm_lpool,
            stride=hp_choice('%sp_stride' % prefix, [1, 2]),
            order=hp_choice('%sp_order' % prefix,
                [1, 2, hp_lognormal('%sp_order_real' % prefix,
                    mu=np.log(1), sigma=np.log(3))]),
            ker_size=rfilter_size('%sp_size' % prefix, 2, 8))

    return [f_layer, p_layer]
コード例 #7
0
ファイル: slm.py プロジェクト: xiaoxiao19/hyperopt-convnet
def exit_lpool_alpha(pipeline, layer_num, Xcm, n_patches, max_n_features):
    def lab(msg):
        return 'l%ielpa_%s' % (layer_num, msg)

    fsize = rfilter_size(lab('fsize'), 3, 8)
    filtering_res = pyll_getattr(Xcm, 'shape')[2] - fsize + 1
    # -- N.B. Xrows depends on other params, so we can't use it to set the
    #         upper bound on lpsize. We can only sample independently, and
    #         then fail below with non-positive number of features.
    size = rfilter_size(lab('lpsize'), 1, 5)
    stride = hp_choice(lab('stride'), [1, 2, 3])
    res = scope.ceildiv(scope.max(filtering_res - size + 1, 0), stride)
    if 0:
        # XXX: This is a smarter way to pick the n_filters, but it triggers
        # a bug in hyperopt.vectorize_helper.  The build_idxs_vals function
        # there needs to be smarter -- to recognize when wanted_idxs is a
        # necessarily subset of the all_idxs, and then not to append
        # wanted_idxs to the union defining all_idxs... because that creates a
        # cycle.  The trouble is specifically that lpool_res is used in the
        # switch statement below both in the condition and the response.
        nfilters = switch(res > 0,
            max_n_features // (2 * (res ** 2)),
            scope.Raise(ValueError, 'Non-positive number of features'))
    else:
        # this is less good because it risks dividing by zero,
        # and forces the bandit to catch weirder errors from new_fbncc_layer
        # caused by negative nfilters
        nfilters = max_n_features // (2 * (res ** 2))

    filtering = new_fbncc_layer(
            prefix='l%iel' % layer_num,
            Xcm=Xcm,
            n_patches=n_patches,
            n_filters=nfilters,
            size=fsize,
            )

    pooling = partial(slm_lpool_alpha,
            ker_size=size,
            stride=stride,
            alpha=hp_normal(lab('alpha'), 0.0, 1.0),
            order=hp_choice(lab('order_choice'), [
                1.0, 2.0, logu_range(lab('order_real'), .1, 10.)]))

    return new_exit(pipeline + [filtering, pooling], lab('%s'))
コード例 #8
0
ファイル: slm.py プロジェクト: xiaoxiao19/hyperopt-convnet
def exit_lpool(pipeline, layer_num, Xcm, n_patches, max_n_features):
    def lab(msg):
        return 'l%i_out_lp_%s' % (layer_num, msg)

    fsize = rfilter_size(lab('fsize'), 3, 8)
    filtering_res = pyll_getattr(Xcm, 'shape')[2] - fsize + 1
    # -- N.B. Xrows depends on other params, so we can't use it to set the
    #         upper bound on lpsize. We can only sample independently, and
    #         then fail below with non-positive number of features.
    psize = rfilter_size(lab('psize'), 1, 5)
    stride = hp_choice(lab('stride'), [1, 2, 3])
    pooling_res = scope.ceildiv(filtering_res - psize + 1, stride)
    nsize = rfilter_size(lab('nsize'), 1, 5)
    norm_res = pooling_res - nsize + 1

    # -- raises exception at rec_eval if norm_res is 0
    nfilters = max_n_features // (scope.max(norm_res, 0) ** 2)

    filtering = new_fbncc_layer(
            prefix='l%ielp' % layer_num,
            Xcm=Xcm,
            n_patches=n_patches,
            n_filters=nfilters,
            size=fsize,
            )

    pooling = partial(slm_lpool,
            ker_size=psize,
            stride=stride,
            order=hp_choice(lab('order_choice'), [
                1.0, 2.0, logu_range(lab('order_real'), .1, 10.)]))

    normalization = partial(slm_lnorm,
            ker_size=nsize,
            remove_mean=hp_TF(lab('norm_rmean')),
            threshold=hp_lognormal(lab('norm_thresh'),
                np.log(1.0), np.log(3)),
            )

    seq = hp_choice(lab('use_norm'), [
            [filtering, pooling],
            [filtering, pooling, normalization]])

    return new_exit(pipeline + seq, lab('%s'))
コード例 #9
0
ファイル: test_base.py プロジェクト: CnrLwlss/hyperopt
 def test7(self):
     p0 = hp_uniform('p0', 0, 1)
     p1 = hp_normal('p1', 0, 1)
     p2 = hp_choice('p2', [1, p0])
     p3 = hp_choice('p3', [2, p1, p2, hp_uniform('a0', 2, 3)])
     self.expr = {'loss': p0 + p1 + p2 + p3}
     self.n_randints = 2
     self.wanted = [
             [
                 ('p0', [0], [0.71295532052322719]),
                 ('p1', [0], [0.28297849805199204]),
                 ('p2.randint', [0], [0]),
                 ('p3.arg:2', [0], [2.719468969785563]),
                 ('p3.randint', [0], [2])],
             [
                 ('p0', [1], [0.78002776191207912]),
                 ('p1', [1], [-1.506294713918092]),
                 ('p2.randint', [1], [1]),
                 ('p3.arg:2', [], []),
                 ('p3.randint', [1], [1])],
             [
                 ('p0', [2], [0.57969429702261011]),
                 ('p1', [2], [1.6796003743035337]),
                 ('p2.randint', [2], [0]),
                 ('p3.arg:2', [], []),
                 ('p3.randint', [2], [1])],
             [
                 ('p0', [3], [0.43857224467962441]),
                 ('p1', [3], [-1.3058031267484451]),
                 ('p2.randint', [3], [1]),
                 ('p3.arg:2', [], []),
                 ('p3.randint', [3], [1])],
             [
                 ('p0', [4], [0.39804425533043142]),
                 ('p1', [4], [-0.91948540682140967]),
                 ('p2.randint', [4], [0]),
                 ('p3.arg:2', [], []),
                 ('p3.randint', [4], [0])]]
     self.foo()
コード例 #10
0
ファイル: test_tpe.py プロジェクト: ardila/hyperopt
def many_dists():
    a=hp_choice('a', [0, 1, 2])
    b=hp_randint('b', 10)
    c=hp_uniform('c', 4, 7)
    d=hp_loguniform('d', -2, 0)
    e=hp_quniform('e', 0, 10, 3)
    f=hp_qloguniform('f', 0, 3, 2)
    g=hp_normal('g', 4, 7)
    h=hp_lognormal('h', -2, 2)
    i=hp_qnormal('i', 0, 10, 2)
    j=hp_qlognormal('j', 0, 2, 1)
    z = a + b + c + d + e + f + g + h + i + j
    return {'loss': scope.float(scope.log(1e-12 + z ** 2))}
コード例 #11
0
ファイル: test_tpe.py プロジェクト: wqren/hyperopt
def many_dists():
    a = hp_choice('a', [0, 1, 2])
    b = hp_randint('b', 10)
    c = hp_uniform('c', 4, 7)
    d = hp_loguniform('d', -2, 0)
    e = hp_quniform('e', 0, 10, 3)
    f = hp_qloguniform('f', 0, 3, 2)
    g = hp_normal('g', 4, 7)
    h = hp_lognormal('h', -2, 2)
    i = hp_qnormal('i', 0, 10, 2)
    j = hp_qlognormal('j', 0, 2, 1)
    z = a + b + c + d + e + f + g + h + i + j
    return {'loss': scope.float(scope.log(1e-12 + z**2))}
コード例 #12
0
ファイル: test_vectorize.py プロジェクト: goller/hyperopt
def test_vectorize_config0():
    p0 = hp_uniform('p0', 0, 1)
    p1 = hp_loguniform('p1', 2, 3)
    p2 = hp_choice('p2', [-1, p0])
    p3 = hp_choice('p3', [-2, p1])
    p4 = 1
    p5 = [3, 4, p0]
    p6 = hp_choice('p6', [-3, p1])
    d = locals()
    d['p1'] = None  # -- don't sample p1 all the time, only if p3 says so
    config = as_apply(d)

    N = as_apply('N:TBA')
    expr = config
    expr_idxs = scope.range(N)
    vh = VectorizeHelper(expr, expr_idxs, build=True)
    vconfig = vh.v_expr

    full_output = as_apply([vconfig, vh.idxs_by_label(), vh.vals_by_label()])

    if 1:
        print '=' * 80
        print 'VECTORIZED'
        print full_output
        print '\n' * 1

    fo2 = replace_repeat_stochastic(full_output)
    if 0:
        print '=' * 80
        print 'VECTORIZED STOCHASTIC'
        print fo2
        print '\n' * 1

    new_vc = recursive_set_rng_kwarg(fo2, as_apply(np.random.RandomState(1)))
    if 0:
        print '=' * 80
        print 'VECTORIZED STOCHASTIC WITH RNGS'
        print new_vc

    Nval = 10
    foo, idxs, vals = rec_eval(new_vc, memo={N: Nval})

    print 'foo[0]', foo[0]
    print 'foo[1]', foo[1]
    assert len(foo) == Nval
    if 0:  # XXX refresh these values to lock down sampler
        assert foo[0] == {
            'p0': 0.39676747423066994,
            'p1': None,
            'p2': 0.39676747423066994,
            'p3': 2.1281244479293568,
            'p4': 1,
            'p5': (3, 4, 0.39676747423066994)
        }
    assert foo[1] != foo[2]

    print idxs
    print vals['p3']
    print vals['p6']
    print idxs['p1']
    print vals['p1']
    assert len(vals['p3']) == Nval
    assert len(vals['p6']) == Nval
    assert len(idxs['p1']) < Nval
    p1d = dict(zip(idxs['p1'], vals['p1']))
    for ii, (p3v, p6v) in enumerate(zip(vals['p3'], vals['p6'])):
        if p3v == p6v == 0:
            assert ii not in idxs['p1']
        if p3v:
            assert foo[ii]['p3'] == p1d[ii]
        if p6v:
            print 'p6', foo[ii]['p6'], p1d[ii]
            assert foo[ii]['p6'] == p1d[ii]
コード例 #13
0

num_filters1 = scope.int(hp_qloguniform('num_filters1',np.log(16), np.log(96), q=16))
filter1_size = scope.int(hp_quniform('filter1_shape', 2, 12, 1))

num_filters2 = scope.int(hp_qloguniform('num_filters2',np.log(16), np.log(96), q=16))
filter2_size = scope.int(hp_quniform('filter2_shape', 2, 12, 1))

num_filters3 = scope.int(hp_qloguniform('num_filters3',np.log(16), np.log(96), q=16))
filter3_size = scope.int(hp_quniform('filter3_shape', 2, 7, 1))

num_filters4 = scope.int(hp_qloguniform('num_filters4',np.log(16), np.log(64), q=16))
filter4_size = scope.int(hp_quniform('filter4_shape', 2, 7, 1))

pool1_sizex = scope.int(hp_quniform('pool1_sizex', 2, 5, 1))
pool1_type = hp_choice('pool1_type', ['max', 'avg', hp_uniform('pool_order_1', 0.4, 12)])

pool2_sizex = scope.int(hp_quniform('pool2_sizex', 2, 5, 1))
pool2_type = hp_choice('pool2_type', ['max', 'avg', hp_uniform('pool_order_2', 0.4, 12)])

rnorm1_size = scope.int(hp_quniform('rnorm1_size', 5, 12, 1))
rnorm2_size = scope.int(hp_quniform('rnorm2_size', 5, 12, 1))


layer_def_template = OrderedDict([('data', OrderedDict([('type', 'data'),
                                   ('dataidx', '0')])),
             ('labels', OrderedDict([('type', 'data'),
                                     ('dataidx', '1')])),
             ('conv1', OrderedDict([('type', 'conv'),
                                    ('inputs', 'data'),
                                    ('channels', '3'),
コード例 #14
0

num_filters1 = scope.int(hp_qloguniform('num_filters1',np.log(16), np.log(96), q=16))
filter1_size = scope.int(hp_quniform('filter1_shape', 2, 12, 1))

num_filters2 = scope.int(hp_qloguniform('num_filters2',np.log(16), np.log(96), q=16))
filter2_size = scope.int(hp_quniform('filter2_shape', 2, 12, 1))

num_filters3 = scope.int(hp_qloguniform('num_filters3',np.log(16), np.log(96), q=16))
filter3_size = scope.int(hp_quniform('filter3_shape', 2, 9, 1))

num_filters4 = scope.int(hp_qloguniform('num_filters4',np.log(16), np.log(64), q=16))
filter4_size = scope.int(hp_quniform('filter4_shape', 2, 9, 1))

pool1_sizex = scope.int(hp_quniform('pool1_sizex', 2, 5, 1))
pool1_type = hp_choice('pool1_type', ['max', 'avg', hp_uniform('pool_order_1', 1, 12)])

pool2_sizex = scope.int(hp_quniform('pool2_sizex', 2, 5, 1))
pool2_type = hp_choice('pool2_type', ['max', 'avg', hp_uniform('pool_order_2', 1, 4)])

rnorm1_size = scope.int(hp_quniform('rnorm1_size', 5, 12, 1))
rnorm2_size = scope.int(hp_quniform('rnorm2_size', 5, 12, 1))


layer_def_template = OrderedDict([('data', OrderedDict([('type', 'data'),
                                   ('dataidx', 0)])),
             ('labels', OrderedDict([('type', 'data'),
                                     ('dataidx', 1)])),
             ('conv1', OrderedDict([('type', 'conv'),
                                    ('inputs', 'data'),
                                    ('channels', 3),
コード例 #15
0
ファイル: slm.py プロジェクト: xiaoxiao19/hyperopt-convnet
 def get_rseed(name, N):
     fullname = lab(name)
     low = stable_hash(fullname) % (2 ** 31)
     rval = hp_choice(fullname, range(low, low + N))
     return rval
コード例 #16
0
ファイル: slm.py プロジェクト: xiaoxiao19/hyperopt-convnet
def new_fbncc_layer(prefix, Xcm, n_patches, n_filters, size,
                   memlimit=5e8, # -- limit patches array to 500MB
                   ):
    def lab(msg):
        return '%s_fbncc_%s' % (prefix, msg)

    def get_rseed(name, N):
        fullname = lab(name)
        low = stable_hash(fullname) % (2 ** 31)
        rval = hp_choice(fullname, range(low, low + N))
        return rval

    patches = random_patches(
        Xcm, n_patches, size, size,
        rng=np_RandomState(get_rseed('patch_rseed', 10)),
        channel_major=True,
        memlimit=memlimit)

    remove_mean = hp_TF(lab('remove_mean'))
    beta = hp_lognormal(lab('beta'), np.log(100), np.log(100))
    hard_beta = hp_TF(lab('hard'))

    # TODO: use different nfilters, beta etc. for each algo

    # -- random projections filterbank allocation
    random_projections = partial(slm_fbncc_chmaj,
        m_fb=slm_uniform_M_FB(
            nfilters=n_filters,
            size=size,
            channels=pyll_getattr(Xcm, 'shape')[1],
            rseed=get_rseed('r_rseed', 10),
            normalize=hp_TF(lab('r_normalize')),
            dtype='float32',
            ret_cmajor=True,
            ),
        remove_mean=remove_mean,
        beta=beta,
        hard_beta=hard_beta)

    # -- random whitened projections filterbank allocation
    random_whitened_projections = partial(slm_fbncc_chmaj,
            m_fb=fb_whitened_projections(patches,
                patch_whitening_filterbank_X(patches,
                    gamma=hp_lognormal(lab('wr_gamma'),
                                       np.log(1e-2), np.log(100)),
                    o_ndim=2,
                    remove_mean=remove_mean,
                    beta=beta,
                    hard_beta=hard_beta,
                    ),
                n_filters=n_filters,
                rseed=get_rseed('wr_rseed', 10),
                dtype='float32',
                ),
            remove_mean=remove_mean,
            beta=beta,
            hard_beta=hard_beta)

    # -- whitened patches filterbank allocation
    whitened_patches = partial(slm_fbncc_chmaj,
            m_fb=fb_whitened_patches(patches,
                patch_whitening_filterbank_X(patches,
                    gamma=hp_lognormal(lab('wp_gamma'),
                                       np.log(1e-2), np.log(100)),
                    o_ndim=2,
                    remove_mean=remove_mean,
                    beta=beta,
                    hard_beta=hard_beta,
                    ),
                n_filters=n_filters,
                rseed=get_rseed('wp_rseed', 10),
                dtype='float32',
                ),
            remove_mean=remove_mean,
            beta=beta,
            hard_beta=hard_beta)

    # --> MORE FB LEARNING ALGOS HERE <--
    # TODO: V1-like filterbank (incl. with whitening matrix)
    # TODO: sparse coding
    # TODO: OMP from Coates 2011
    # TODO: K-means
    # TODO: RBM
    # TODO: DAA
    # TODO: ssRBM
    rchoice = hp_choice(lab('algo'), [
        random_projections,
        random_whitened_projections,
        whitened_patches,
        ])
    return rchoice
コード例 #17
0

num_filters1 = scope.int(hp_qloguniform('num_filters1',np.log(16), np.log(96), q=16))
filter1_size = scope.int(hp_quniform('filter1_shape', 2, 12, 1))

num_filters2 = scope.int(hp_qloguniform('num_filters2',np.log(16), np.log(96), q=16))
filter2_size = scope.int(hp_quniform('filter2_shape', 2, 12, 1))

num_filters3 = scope.int(hp_qloguniform('num_filters3',np.log(16), np.log(96), q=16))
filter3_size = scope.int(hp_quniform('filter3_shape', 2, 9, 1))

num_filters4 = scope.int(hp_qloguniform('num_filters4',np.log(16), np.log(64), q=16))
filter4_size = scope.int(hp_quniform('filter4_shape', 2, 9, 1))

pool1_sizex = scope.int(hp_quniform('pool1_sizex', 2, 5, 1))
pool1_type = hp_choice('pool1_type', ['max', 'avg', hp_uniform('pool_order_1', 1, 12)])

pool2_sizex = scope.int(hp_quniform('pool2_sizex', 2, 5, 1))
pool2_type = hp_choice('pool2_type', ['max', 'avg', hp_uniform('pool_order_2', 1, 4)])

pool3_sizex = scope.int(hp_quniform('pool3_sizex', 2, 5, 1))
pool3_type = hp_choice('pool3_type', ['max', 'avg', hp_uniform('pool_order_3', 1, 4)])

rnorm1_size = scope.int(hp_quniform('rnorm1_size', 5, 12, 1))
rnorm2_size = scope.int(hp_quniform('rnorm2_size', 5, 12, 1))
rnorm3_size = scope.int(hp_quniform('rnorm3_size', 5, 12, 1))


layer_def_template = OrderedDict([('data', OrderedDict([('type', 'data'),
                                   ('dataidx', 0)])),
             ('labels', OrderedDict([('type', 'data'),
コード例 #18
0
ファイル: slm.py プロジェクト: xiaoxiao19/hyperopt-convnet
def uslm_domain(Xcm,
        batchsize,
        chmjr_image_shape,
        output_sizes,
        n_patches=50000,
        max_n_features=16000,
        max_layer_sizes=(64, 128),
        batched_lmap_speed_thresh=None,
        permit_affine_warp=True,
        abort_on_rows_larger_than=None,
        ):
    """
    This function works by creating a linear pipeline, with multiple exit
    points that could be the feature representation for classification.

    The function returns a switch among all of these exit points.
    """
    start_time = time.time()

    XC, XH, XW = chmjr_image_shape
    osize = hp_choice('warp_osize', output_sizes)

    assert XW > 3, chmjr_image_shape  # -- make sure we don't screw up channel-major

    warp_options = [
        # -- option 1: simple resize
        partial(slm_affine_image_warp,
            rot=0,
            shear=0,
            scale=[s_float(osize) / XH, s_float(osize) / XW],
            trans=[0, 0],
            oshape=[osize, osize]),
        ]
    if permit_affine_warp:
        # -- option 2: resize with rotation, shear, translation
        warp_options.append(
            partial(slm_affine_image_warp,
                rot=hp_uniform('warp_rot', low=-0.3, high=0.3),
                shear=hp_uniform('warp_shear', low=-0.3, high=0.3),
                # -- most of the scaling comes via osize
                scale=[
                    hp_uniform('warp_scale_h', low=0.8, high=1.2) * osize / XH,
                    hp_uniform('warp_scale_v', low=0.8, high=1.2) * osize / XW,
                    ],
                trans=[
                    hp_uniform('warp_trans_h', low=-0.2, high=0.2) * osize,
                    hp_uniform('warp_trans_v', low=-0.2, high=0.2) * osize,
                    ],
                oshape=[osize, osize]
                ))
    pipeline = [slm_img_uint8_to_float32,
                hp_choice('warp', warp_options)]
    Xcm = pyll_theano_batched_lmap(
        partial(callpipe1, pipeline),
        Xcm,
        batchsize=batchsize,
        print_progress_every=10,
        speed_thresh=batched_lmap_speed_thresh,
        abort_on_rows_larger_than=abort_on_rows_larger_than,
        x_dtype='uint8',
        )[:]

    exits = pipeline_exits(
                pipeline,
                layer_num=0,
                Xcm=Xcm,
                n_patches=n_patches,
                max_n_features=max_n_features)
    for layer_i, max_layer_size in enumerate(max_layer_sizes):
        extension = pipeline_extension(
                'l%i' % layer_i, Xcm, n_patches, max_layer_size)

        pipeline.extend(extension)
        Xcm = pyll_theano_batched_lmap(
                partial(callpipe1, extension),
                Xcm,  # scope.print_ndarray_summary('Xcm %i' % layer_i, Xcm),
                batchsize=batchsize,
                print_progress_every=10,
                speed_thresh=batched_lmap_speed_thresh,
                abort_on_rows_larger_than=abort_on_rows_larger_than,
                )[:]
        # -- indexing computes all the values (during rec_eval)
        exits.extend(
                pipeline_exits(
                    pipeline=pipeline,
                    layer_num=layer_i + 1,
                    Xcm=Xcm,
                    n_patches=n_patches,
                    max_n_features=max_n_features))

    return hp_choice("exit", exits)
コード例 #19
0
filter3_size = scope.int(hp_quniform('filter3_shape', 3, 6, 1))

num_filters4 = scope.int(hp_quniform('num_filters4', 64, 400, 16))
filter4_size = scope.int(hp_quniform('filter4_shape', 3, 5, 1))

num_filters5 = scope.int(hp_quniform('num_filters5', 64, 400, 16))
filter5_size = scope.int(hp_quniform('filter5_shape', 2, 4, 1))

num_filters6 = scope.int(hp_quniform('num_filters6', 64, 400, 16))
filter6_size = scope.int(hp_quniform('filter6_shape', 2, 4, 1))

num_filters7 = scope.int(hp_quniform('num_filters7', 64, 400, 16))
filter7_size = scope.int(hp_quniform('filter7_shape', 2, 3, 1))

pool1_sizex = scope.int(hp_quniform('pool1_sizex', 2, 4, 1))
pool1_type = hp_choice('pool1_type', ['max', 'avg', hp_uniform('pool_order_1', 1, 4)])

pool2_sizex = scope.int(hp_quniform('pool2_sizex', 2, 4, 1))
pool2_type = hp_choice('pool2_type', ['max', 'avg', hp_uniform('pool_order_2', 1, 4)])

pool3_sizex = scope.int(hp_quniform('pool3_sizex', 2, 4, 1))
pool3_type = hp_choice('pool3_type', ['max', 'avg', hp_uniform('pool_order_3', 1, 4)])

pool4_sizex = scope.int(hp_quniform('pool4_sizex', 2, 4, 1))
pool4_type = hp_choice('pool4_type', ['max', 'avg', hp_uniform('pool_order_4', 1, 4)])

rnorm1_size = scope.int(hp_quniform('rnorm1_size', 3, 6, 1))
rnorm2_size = scope.int(hp_quniform('rnorm2_size', 3, 6, 1))
rnorm3_size = scope.int(hp_quniform('rnorm3_size', 3, 6, 1))
rnorm4_size = scope.int(hp_quniform('rnorm4_size', 3, 6, 1))
rnorm5_size = scope.int(hp_quniform('rnorm5_size', 3, 6, 1))
コード例 #20
0
filter1_size = scope.int(hp_quniform('filter1_shape', 5, 12, 1))

num_filters2 = scope.int(hp_quniform('num_filters2', 64, 400, 16))
filter2_size = scope.int(hp_quniform('filter2_shape', 4, 7, 1))

num_filters3 = scope.int(hp_quniform('num_filters3', 64, 400, 16))
filter3_size = scope.int(hp_quniform('filter3_shape', 3, 5, 1))

num_filters4 = scope.int(hp_quniform('num_filters4', 64, 400, 16))
filter4_size = scope.int(hp_quniform('filter4_shape', 3, 4, 1))

num_filters5 = scope.int(hp_quniform('num_filters5', 64, 400, 16))
filter5_size = scope.int(hp_quniform('filter5_shape', 2, 3, 1))

pool1_sizex = scope.int(hp_quniform('pool1_sizex', 2, 4, 1))
pool1_type = hp_choice('pool1_type', ['max', 'avg', hp_uniform('pool_order_1', 1, 4)])

pool2_sizex = scope.int(hp_quniform('pool2_sizex', 2, 4, 1))
pool2_type = hp_choice('pool2_type', ['max', 'avg', hp_uniform('pool_order_2', 1, 4)])

pool3_sizex = scope.int(hp_quniform('pool3_sizex', 2, 4, 1))
pool3_type = hp_choice('pool3_type', ['max', 'avg', hp_uniform('pool_order_3', 1, 4)])

rnorm1_size = scope.int(hp_quniform('rnorm1_size', 4, 6, 1))
rnorm2_size = scope.int(hp_quniform('rnorm2_size', 4, 6, 1))
rnorm3_size = scope.int(hp_quniform('rnorm3_size', 4, 6, 1))
rnorm4_size = scope.int(hp_quniform('rnorm4_size', 4, 6, 1))
rnorm5_size = scope.int(hp_quniform('rnorm5_size', 4, 6, 1))

layer_def_template = OrderedDict([('data', OrderedDict([('type', 'data'),
                                   ('dataidx', 0)])),
コード例 #21
0

num_filters1 = scope.int(hp_qloguniform('num_filters1',np.log(16), np.log(96), q=16))
filter1_size = scope.int(hp_quniform('filter1_shape', 2, 12, 1))

num_filters2 = scope.int(hp_qloguniform('num_filters2',np.log(16), np.log(96), q=16))
filter2_size = scope.int(hp_quniform('filter2_shape', 2, 12, 1))

num_filters3 = scope.int(hp_qloguniform('num_filters3',np.log(16), np.log(96), q=16))
filter3_size = scope.int(hp_quniform('filter3_shape', 2, 7, 1))

num_filters4 = scope.int(hp_qloguniform('num_filters4',np.log(16), np.log(64), q=16))
filter4_size = scope.int(hp_quniform('filter4_shape', 2, 7, 1))

pool1_sizex = scope.int(hp_quniform('pool1_sizex', 2, 5, 1))
pool1_type = hp_choice('pool1_type', ['max', 'avg'])

pool2_sizex = scope.int(hp_quniform('pool2_sizex', 2, 5, 1))
pool2_type = hp_choice('pool2_type', ['max', 'avg'])

rnorm1_size = scope.int(hp_quniform('rnorm1_size', 5, 12, 1))
rnorm2_size = scope.int(hp_quniform('rnorm2_size', 5, 12, 1))


layer_def_template = OrderedDict([('data', OrderedDict([('type', 'data'),
                                   ('dataidx', '0')])),
             ('labels', OrderedDict([('type', 'data'),
                                     ('dataidx', '1')])),
             ('conv1', OrderedDict([('type', 'conv'),
                                    ('inputs', 'data'),
                                    ('channels', '3'),
コード例 #22
0
ファイル: slm.py プロジェクト: xiaoxiao19/hyperopt-convnet
def hp_TF(label):
    return hp_choice(label, [0, 1])
コード例 #23
0
def test_vectorize_config0():
    p0 = hp_uniform('p0', 0, 1)
    p1 = hp_loguniform('p1', 2, 3)
    p2 = hp_choice('p2', [-1, p0])
    p3 = hp_choice('p3', [-2, p1])
    p4 = 1
    p5 = [3, 4, p0]
    p6 = hp_choice('p6', [-3, p1])
    d = locals()
    d['p1'] = None # -- don't sample p1 all the time, only if p3 says so
    config = as_apply(d)

    N = as_apply('N:TBA')
    expr = config
    expr_idxs = scope.range(N)
    vh = VectorizeHelper(expr, expr_idxs, build=True)
    vconfig = vh.v_expr

    full_output = as_apply([vconfig, vh.idxs_by_label(), vh.vals_by_label()])

    if 1:
        print('=' * 80)
        print('VECTORIZED')
        print(full_output)
        print('\n' * 1)

    fo2 = replace_repeat_stochastic(full_output)
    if 0:
        print('=' * 80)
        print('VECTORIZED STOCHASTIC')
        print(fo2)
        print('\n' * 1)

    new_vc = recursive_set_rng_kwarg(
            fo2,
            as_apply(np.random.RandomState(1))
            )
    if 0:
        print('=' * 80)
        print('VECTORIZED STOCHASTIC WITH RNGS')
        print(new_vc)

    Nval = 10
    foo, idxs, vals = rec_eval(new_vc, memo={N: Nval})

    print('foo[0]', foo[0])
    print('foo[1]', foo[1])
    assert len(foo) == Nval
    if 0:  # XXX refresh these values to lock down sampler
        assert foo[0] == {
            'p0': 0.39676747423066994,
            'p1': None,
            'p2': 0.39676747423066994,
            'p3': 2.1281244479293568,
            'p4': 1,
            'p5': (3, 4, 0.39676747423066994) }
    assert foo[1] != foo[2]

    print(idxs)
    print(vals['p3'])
    print(vals['p6'])
    print(idxs['p1'])
    print(vals['p1'])
    assert len(vals['p3']) == Nval
    assert len(vals['p6']) == Nval
    assert len(idxs['p1']) < Nval
    p1d = dict(list(zip(idxs['p1'], vals['p1'])))
    for ii, (p3v, p6v) in enumerate(zip(vals['p3'], vals['p6'])):
        if p3v == p6v == 0:
            assert ii not in idxs['p1']
        if p3v:
            assert foo[ii]['p3'] == p1d[ii]
        if p6v:
            print('p6', foo[ii]['p6'], p1d[ii])
            assert foo[ii]['p6'] == p1d[ii]
コード例 #24
0
ファイル: test_vectorize.py プロジェクト: LvdKnaap/BinPacking
def test_vectorize_config0():
    p0 = hp_uniform("p0", 0, 1)
    p1 = hp_loguniform("p1", 2, 3)
    p2 = hp_choice("p2", [-1, p0])
    p3 = hp_choice("p3", [-2, p1])
    p4 = 1
    p5 = [3, 4, p0]
    p6 = hp_choice("p6", [-3, p1])
    d = locals()
    d["p1"] = None  # -- don't sample p1 all the time, only if p3 says so
    config = as_apply(d)

    N = as_apply("N:TBA")
    expr = config
    expr_idxs = scope.range(N)
    vh = VectorizeHelper(expr, expr_idxs, build=True)
    vconfig = vh.v_expr

    full_output = as_apply([vconfig, vh.idxs_by_label(), vh.vals_by_label()])

    if 1:
        print("=" * 80)
        print("VECTORIZED")
        print(full_output)
        print("\n" * 1)

    fo2 = replace_repeat_stochastic(full_output)
    if 0:
        print("=" * 80)
        print("VECTORIZED STOCHASTIC")
        print(fo2)
        print("\n" * 1)

    new_vc = recursive_set_rng_kwarg(fo2, as_apply(np.random.RandomState(1)))
    if 0:
        print("=" * 80)
        print("VECTORIZED STOCHASTIC WITH RNGS")
        print(new_vc)

    Nval = 10
    foo, idxs, vals = rec_eval(new_vc, memo={N: Nval})

    print("foo[0]", foo[0])
    print("foo[1]", foo[1])
    assert len(foo) == Nval
    if 0:  # XXX refresh these values to lock down sampler
        assert foo[0] == {
            "p0": 0.39676747423066994,
            "p1": None,
            "p2": 0.39676747423066994,
            "p3": 2.1281244479293568,
            "p4": 1,
            "p5": (3, 4, 0.39676747423066994),
        }
    assert (foo[1].keys() != foo[2].keys()) or (foo[1].values() !=
                                                foo[2].values())

    print(idxs)
    print(vals["p3"])
    print(vals["p6"])
    print(idxs["p1"])
    print(vals["p1"])
    assert len(vals["p3"]) == Nval
    assert len(vals["p6"]) == Nval
    assert len(idxs["p1"]) < Nval
    p1d = dict(list(zip(idxs["p1"], vals["p1"])))
    for ii, (p3v, p6v) in enumerate(zip(vals["p3"], vals["p6"])):
        if p3v == p6v == 0:
            assert ii not in idxs["p1"]
        if p3v:
            assert foo[ii]["p3"] == p1d[ii]
        if p6v:
            print("p6", foo[ii]["p6"], p1d[ii])
            assert foo[ii]["p6"] == p1d[ii]