Exemple #1
0
def q1_lognormal():
    """
    About the simplest problem you could ask for:
    optimize a one-variable quadratic function.
    """
    return {'loss': scope.max(-(hp.lognormal('x', 0, 2) - 3) ** 2, -100),
            'status': base.STATUS_OK }
Exemple #2
0
def q1_lognormal():
    """
    About the simplest problem you could ask for:
    optimize a one-variable quadratic function.
    """
    return {
        'loss': scope.max(-(hp.lognormal('x', 0, 2) - 3)**2, -100),
        'status': base.STATUS_OK
    }
Exemple #3
0
def exit_lpool_alpha(pipeline, layer_num, Xcm, n_patches, max_n_features):
    def lab(msg):
        return 'l%ielpa_%s' % (layer_num, msg)

    fsize = rfilter_size(lab('fsize'), 3, 8)
    filtering_res = pyll_getattr(Xcm, 'shape')[2] - fsize + 1
    # -- N.B. Xrows depends on other params, so we can't use it to set the
    #         upper bound on lpsize. We can only sample independently, and
    #         then fail below with non-positive number of features.
    size = rfilter_size(lab('lpsize'), 1, 5)
    stride = hp_choice(lab('stride'), [1, 2, 3])
    res = scope.ceildiv(scope.max(filtering_res - size + 1, 0), stride)
    if 0:
        # XXX: This is a smarter way to pick the n_filters, but it triggers
        # a bug in hyperopt.vectorize_helper.  The build_idxs_vals function
        # there needs to be smarter -- to recognize when wanted_idxs is a
        # necessarily subset of the all_idxs, and then not to append
        # wanted_idxs to the union defining all_idxs... because that creates a
        # cycle.  The trouble is specifically that lpool_res is used in the
        # switch statement below both in the condition and the response.
        nfilters = switch(res > 0,
            max_n_features // (2 * (res ** 2)),
            scope.Raise(ValueError, 'Non-positive number of features'))
    else:
        # this is less good because it risks dividing by zero,
        # and forces the bandit to catch weirder errors from new_fbncc_layer
        # caused by negative nfilters
        nfilters = max_n_features // (2 * (res ** 2))

    filtering = new_fbncc_layer(
            prefix='l%iel' % layer_num,
            Xcm=Xcm,
            n_patches=n_patches,
            n_filters=nfilters,
            size=fsize,
            )

    pooling = partial(slm_lpool_alpha,
            ker_size=size,
            stride=stride,
            alpha=hp_normal(lab('alpha'), 0.0, 1.0),
            order=hp_choice(lab('order_choice'), [
                1.0, 2.0, logu_range(lab('order_real'), .1, 10.)]))

    return new_exit(pipeline + [filtering, pooling], lab('%s'))
Exemple #4
0
def exit_lpool(pipeline, layer_num, Xcm, n_patches, max_n_features):
    def lab(msg):
        return 'l%i_out_lp_%s' % (layer_num, msg)

    fsize = rfilter_size(lab('fsize'), 3, 8)
    filtering_res = pyll_getattr(Xcm, 'shape')[2] - fsize + 1
    # -- N.B. Xrows depends on other params, so we can't use it to set the
    #         upper bound on lpsize. We can only sample independently, and
    #         then fail below with non-positive number of features.
    psize = rfilter_size(lab('psize'), 1, 5)
    stride = hp_choice(lab('stride'), [1, 2, 3])
    pooling_res = scope.ceildiv(filtering_res - psize + 1, stride)
    nsize = rfilter_size(lab('nsize'), 1, 5)
    norm_res = pooling_res - nsize + 1

    # -- raises exception at rec_eval if norm_res is 0
    nfilters = max_n_features // (scope.max(norm_res, 0) ** 2)

    filtering = new_fbncc_layer(
            prefix='l%ielp' % layer_num,
            Xcm=Xcm,
            n_patches=n_patches,
            n_filters=nfilters,
            size=fsize,
            )

    pooling = partial(slm_lpool,
            ker_size=psize,
            stride=stride,
            order=hp_choice(lab('order_choice'), [
                1.0, 2.0, logu_range(lab('order_real'), .1, 10.)]))

    normalization = partial(slm_lnorm,
            ker_size=nsize,
            remove_mean=hp_TF(lab('norm_rmean')),
            threshold=hp_lognormal(lab('norm_thresh'),
                np.log(1.0), np.log(3)),
            )

    seq = hp_choice(lab('use_norm'), [
            [filtering, pooling],
            [filtering, pooling, normalization]])

    return new_exit(pipeline + seq, lab('%s'))