def exit_grid(pipeline, layer_num, Xcm, n_patches, max_n_features): def lab(msg): return 'l%ieg_%s' % (layer_num, msg) fsize = rfilter_size(lab('fsize'), 3, 8) grid_res = hp_choice(lab('res'), [2, 3]) grid_features_per_filter = 2 * (grid_res ** 2) grid_nfilters = max_n_features // grid_features_per_filter grid_filtering = new_fbncc_layer( prefix='l%ieg' % layer_num, Xcm=Xcm, n_patches=n_patches, n_filters=grid_nfilters, size=fsize, ) grid_pooling = partial(slm_quantize_gridpool, alpha=hp_normal(lab('alpha'), 0.0, 1.0), use_mid=False, grid_res=grid_res, order=hp_choice(lab('order'), [ 1.0, 2.0, logu_range(lab('order_real'), .1, 10.)])) return new_exit(pipeline + [grid_filtering, grid_pooling], lab('%s'))
def test7(self): p0 = hp_uniform('p0', 0, 1) p1 = hp_normal('p1', 0, 1) p2 = hp_choice('p2', [1, p0]) p3 = hp_choice('p3', [2, p1, p2, hp_uniform('a0', 2, 3)]) self.expr = {'loss': p0 + p1 + p2 + p3} self.n_randints = 2 self.wanted = [[('p0', [0], [0.71295532052322719]), ('p1', [0], [0.28297849805199204]), ('p2.randint', [0], [0]), ('p3.arg:2', [0], [2.719468969785563]), ('p3.randint', [0], [2])], [('p0', [1], [0.78002776191207912]), ('p1', [1], [-1.506294713918092]), ('p2.randint', [1], [1]), ('p3.arg:2', [], []), ('p3.randint', [1], [1])], [('p0', [2], [0.57969429702261011]), ('p1', [2], [1.6796003743035337]), ('p2.randint', [2], [0]), ('p3.arg:2', [], []), ('p3.randint', [2], [1])], [('p0', [3], [0.43857224467962441]), ('p1', [3], [-1.3058031267484451]), ('p2.randint', [3], [1]), ('p3.arg:2', [], []), ('p3.randint', [3], [1])], [('p0', [4], [0.39804425533043142]), ('p1', [4], [-0.91948540682140967]), ('p2.randint', [4], [0]), ('p3.arg:2', [], []), ('p3.randint', [4], [0])]] self.foo()
def many_dists(): a = hp_choice('a', [0, 1, 2]) b = hp_randint('b', 10) c = hp_uniform('c', 4, 7) d = hp_loguniform('d', -2, 0) e = hp_quniform('e', 0, 10, 3) f = hp_qloguniform('f', 0, 3, 2) g = hp_normal('g', 4, 7) h = hp_lognormal('h', -2, 2) i = hp_qnormal('i', 0, 10, 2) j = hp_qlognormal('j', 0, 2, 1) z = a + b + c + d + e + f + g + h + i + j return {'loss': scope.float(scope.log(1e-12 + z**2))}
def many_dists(): a=hp_choice('a', [0, 1, 2]) b=hp_randint('b', 10) c=hp_uniform('c', 4, 7) d=hp_loguniform('d', -2, 0) e=hp_quniform('e', 0, 10, 3) f=hp_qloguniform('f', 0, 3, 2) g=hp_normal('g', 4, 7) h=hp_lognormal('h', -2, 2) i=hp_qnormal('i', 0, 10, 2) j=hp_qlognormal('j', 0, 2, 1) z = a + b + c + d + e + f + g + h + i + j return {'loss': scope.float(scope.log(1e-12 + z ** 2))}
def exit_lpool_alpha(pipeline, layer_num, Xcm, n_patches, max_n_features): def lab(msg): return 'l%ielpa_%s' % (layer_num, msg) fsize = rfilter_size(lab('fsize'), 3, 8) filtering_res = pyll_getattr(Xcm, 'shape')[2] - fsize + 1 # -- N.B. Xrows depends on other params, so we can't use it to set the # upper bound on lpsize. We can only sample independently, and # then fail below with non-positive number of features. size = rfilter_size(lab('lpsize'), 1, 5) stride = hp_choice(lab('stride'), [1, 2, 3]) res = scope.ceildiv(scope.max(filtering_res - size + 1, 0), stride) if 0: # XXX: This is a smarter way to pick the n_filters, but it triggers # a bug in hyperopt.vectorize_helper. The build_idxs_vals function # there needs to be smarter -- to recognize when wanted_idxs is a # necessarily subset of the all_idxs, and then not to append # wanted_idxs to the union defining all_idxs... because that creates a # cycle. The trouble is specifically that lpool_res is used in the # switch statement below both in the condition and the response. nfilters = switch(res > 0, max_n_features // (2 * (res ** 2)), scope.Raise(ValueError, 'Non-positive number of features')) else: # this is less good because it risks dividing by zero, # and forces the bandit to catch weirder errors from new_fbncc_layer # caused by negative nfilters nfilters = max_n_features // (2 * (res ** 2)) filtering = new_fbncc_layer( prefix='l%iel' % layer_num, Xcm=Xcm, n_patches=n_patches, n_filters=nfilters, size=fsize, ) pooling = partial(slm_lpool_alpha, ker_size=size, stride=stride, alpha=hp_normal(lab('alpha'), 0.0, 1.0), order=hp_choice(lab('order_choice'), [ 1.0, 2.0, logu_range(lab('order_real'), .1, 10.)])) return new_exit(pipeline + [filtering, pooling], lab('%s'))
def test7(self): p0 = hp_uniform('p0', 0, 1) p1 = hp_normal('p1', 0, 1) p2 = hp_choice('p2', [1, p0]) p3 = hp_choice('p3', [2, p1, p2, hp_uniform('a0', 2, 3)]) self.expr = {'loss': p0 + p1 + p2 + p3} self.n_randints = 2 self.wanted = [ [ ('p0', [0], [0.71295532052322719]), ('p1', [0], [0.28297849805199204]), ('p2.randint', [0], [0]), ('p3.arg:2', [0], [2.719468969785563]), ('p3.randint', [0], [2])], [ ('p0', [1], [0.78002776191207912]), ('p1', [1], [-1.506294713918092]), ('p2.randint', [1], [1]), ('p3.arg:2', [], []), ('p3.randint', [1], [1])], [ ('p0', [2], [0.57969429702261011]), ('p1', [2], [1.6796003743035337]), ('p2.randint', [2], [0]), ('p3.arg:2', [], []), ('p3.randint', [2], [1])], [ ('p0', [3], [0.43857224467962441]), ('p1', [3], [-1.3058031267484451]), ('p2.randint', [3], [1]), ('p3.arg:2', [], []), ('p3.randint', [3], [1])], [ ('p0', [4], [0.39804425533043142]), ('p1', [4], [-0.91948540682140967]), ('p2.randint', [4], [0]), ('p3.arg:2', [], []), ('p3.randint', [4], [0])]] self.foo()