def new_exit(pipeline, prefix): def lab(msg): return prefix % msg return { 'pipe': pipeline, 'remove_std0': hp_TF(lab('remove_std0')), 'varthresh': hp_lognormal(lab('varthresh'), np.log(1e-4), np.log(1000)), 'l2_reg': hp_lognormal(lab('l2_reg'), np.log(1e-5), np.log(1e3)), 'divrowl2': hp_TF(lab('divrowl2')), }
def many_dists(): a = hp_choice('a', [0, 1, 2]) b = hp_randint('b', 10) c = hp_uniform('c', 4, 7) d = hp_loguniform('d', -2, 0) e = hp_quniform('e', 0, 10, 3) f = hp_qloguniform('f', 0, 3, 2) g = hp_normal('g', 4, 7) h = hp_lognormal('h', -2, 2) i = hp_qnormal('i', 0, 10, 2) j = hp_qlognormal('j', 0, 2, 1) z = a + b + c + d + e + f + g + h + i + j return {'loss': scope.float(scope.log(1e-12 + z**2))}
def many_dists(): a=hp_choice('a', [0, 1, 2]) b=hp_randint('b', 10) c=hp_uniform('c', 4, 7) d=hp_loguniform('d', -2, 0) e=hp_quniform('e', 0, 10, 3) f=hp_qloguniform('f', 0, 3, 2) g=hp_normal('g', 4, 7) h=hp_lognormal('h', -2, 2) i=hp_qnormal('i', 0, 10, 2) j=hp_qlognormal('j', 0, 2, 1) z = a + b + c + d + e + f + g + h + i + j return {'loss': scope.float(scope.log(1e-12 + z ** 2))}
def pipeline_extension(prefix, X, n_patches, max_filters): assert max_filters > 16 f_layer = new_fbncc_layer(prefix, X, n_patches, n_filters=s_int( hp_qloguniform('%sfb_nfilters' % prefix, np.log(8.01), np.log(max_filters), q=16)), size=rfilter_size('%sfb_size' % prefix, 3, 8), ) p_layer = partial(slm_lpool, stride=hp_choice('%sp_stride' % prefix, [1, 2]), order=hp_choice('%sp_order' % prefix, [1, 2, hp_lognormal('%sp_order_real' % prefix, mu=np.log(1), sigma=np.log(3))]), ker_size=rfilter_size('%sp_size' % prefix, 2, 8)) return [f_layer, p_layer]
def exit_lpool(pipeline, layer_num, Xcm, n_patches, max_n_features): def lab(msg): return 'l%i_out_lp_%s' % (layer_num, msg) fsize = rfilter_size(lab('fsize'), 3, 8) filtering_res = pyll_getattr(Xcm, 'shape')[2] - fsize + 1 # -- N.B. Xrows depends on other params, so we can't use it to set the # upper bound on lpsize. We can only sample independently, and # then fail below with non-positive number of features. psize = rfilter_size(lab('psize'), 1, 5) stride = hp_choice(lab('stride'), [1, 2, 3]) pooling_res = scope.ceildiv(filtering_res - psize + 1, stride) nsize = rfilter_size(lab('nsize'), 1, 5) norm_res = pooling_res - nsize + 1 # -- raises exception at rec_eval if norm_res is 0 nfilters = max_n_features // (scope.max(norm_res, 0) ** 2) filtering = new_fbncc_layer( prefix='l%ielp' % layer_num, Xcm=Xcm, n_patches=n_patches, n_filters=nfilters, size=fsize, ) pooling = partial(slm_lpool, ker_size=psize, stride=stride, order=hp_choice(lab('order_choice'), [ 1.0, 2.0, logu_range(lab('order_real'), .1, 10.)])) normalization = partial(slm_lnorm, ker_size=nsize, remove_mean=hp_TF(lab('norm_rmean')), threshold=hp_lognormal(lab('norm_thresh'), np.log(1.0), np.log(3)), ) seq = hp_choice(lab('use_norm'), [ [filtering, pooling], [filtering, pooling, normalization]]) return new_exit(pipeline + seq, lab('%s'))
def new_fbncc_layer(prefix, Xcm, n_patches, n_filters, size, memlimit=5e8, # -- limit patches array to 500MB ): def lab(msg): return '%s_fbncc_%s' % (prefix, msg) def get_rseed(name, N): fullname = lab(name) low = stable_hash(fullname) % (2 ** 31) rval = hp_choice(fullname, range(low, low + N)) return rval patches = random_patches( Xcm, n_patches, size, size, rng=np_RandomState(get_rseed('patch_rseed', 10)), channel_major=True, memlimit=memlimit) remove_mean = hp_TF(lab('remove_mean')) beta = hp_lognormal(lab('beta'), np.log(100), np.log(100)) hard_beta = hp_TF(lab('hard')) # TODO: use different nfilters, beta etc. for each algo # -- random projections filterbank allocation random_projections = partial(slm_fbncc_chmaj, m_fb=slm_uniform_M_FB( nfilters=n_filters, size=size, channels=pyll_getattr(Xcm, 'shape')[1], rseed=get_rseed('r_rseed', 10), normalize=hp_TF(lab('r_normalize')), dtype='float32', ret_cmajor=True, ), remove_mean=remove_mean, beta=beta, hard_beta=hard_beta) # -- random whitened projections filterbank allocation random_whitened_projections = partial(slm_fbncc_chmaj, m_fb=fb_whitened_projections(patches, patch_whitening_filterbank_X(patches, gamma=hp_lognormal(lab('wr_gamma'), np.log(1e-2), np.log(100)), o_ndim=2, remove_mean=remove_mean, beta=beta, hard_beta=hard_beta, ), n_filters=n_filters, rseed=get_rseed('wr_rseed', 10), dtype='float32', ), remove_mean=remove_mean, beta=beta, hard_beta=hard_beta) # -- whitened patches filterbank allocation whitened_patches = partial(slm_fbncc_chmaj, m_fb=fb_whitened_patches(patches, patch_whitening_filterbank_X(patches, gamma=hp_lognormal(lab('wp_gamma'), np.log(1e-2), np.log(100)), o_ndim=2, remove_mean=remove_mean, beta=beta, hard_beta=hard_beta, ), n_filters=n_filters, rseed=get_rseed('wp_rseed', 10), dtype='float32', ), remove_mean=remove_mean, beta=beta, hard_beta=hard_beta) # --> MORE FB LEARNING ALGOS HERE <-- # TODO: V1-like filterbank (incl. with whitening matrix) # TODO: sparse coding # TODO: OMP from Coates 2011 # TODO: K-means # TODO: RBM # TODO: DAA # TODO: ssRBM rchoice = hp_choice(lab('algo'), [ random_projections, random_whitened_projections, whitened_patches, ]) return rchoice