Пример #1
0
def dnn_net_simple(num_classes):

        # defining hyperparameter
        h_num_hidden = D([64, 128, 256, 512, 1024]) # number of hidden units for affine transform module
        h_nonlin_name = D(['relu', 'tanh', 'elu']) # nonlinearity function names to choose from
        h_opt_drop = D([0, 1]) # dropout optional hyperparameter; 0 is exclude, 1 is include
        h_drop_keep_prob = D([0.25, 0.5, 0.75]) # dropout probability to choose from
        h_opt_bn = D([0, 1]) # batch_norm optional hyperparameter
        h_swap = D([0, 1]) # order of swapping for permutation
        h_num_repeats = D([1, 2]) # 1 is appearing once, 2 is appearing twice

        # defining search space topology
        model = mo.siso_sequential([
            flatten(),
            mo.siso_repeat(lambda: mo.siso_sequential([
                dense(h_num_hidden),
                nonlinearity(h_nonlin_name),
                mo.siso_permutation([
                    lambda: mo.siso_optional(lambda: dropout(h_drop_keep_prob), h_opt_drop),
                    lambda: mo.siso_optional(batch_normalization, h_opt_bn),
                ], h_swap)
            ]), h_num_repeats),
            dense(D([num_classes]))
        ])

        return model
Пример #2
0
def dnn_cell(h_num_hidden, h_nonlin_name, h_swap, h_opt_drop, h_opt_bn, h_drop_keep_prob):
    return mo.siso_sequential([
        affine_simplified(h_num_hidden),
        nonlinearity(h_nonlin_name),
        mo.siso_permutation([
            lambda: mo.siso_optional(lambda: dropout(h_drop_keep_prob), h_opt_drop),
            lambda: mo.siso_optional(batch_normalization, h_opt_bn),
Пример #3
0
def dnn_cell(h_num_hidden, h_nonlin_name, h_swap, h_opt_drop, h_opt_bn,
             h_drop_rate):
    return mo.siso_sequential([
        dense(h_num_hidden),
        nonlinearity(h_nonlin_name),
        mo.siso_permutation([
            lambda: mo.siso_optional(lambda: dropout(h_drop_rate), h_opt_drop),
            lambda: mo.siso_optional(batch_normalization, h_opt_bn),
        ], h_swap)
    ])
Пример #4
0
def module(h_num_filters, h_kernel_size, h_swap, h_opt_drop, h_drop_rate,
           h_num_repeats):

    return mo.siso_repeat(
        lambda: mo.siso_sequential([
            conv2d(h_num_filters, h_kernel_size, D([1])),
            mo.siso_permutation([relu, batch_normalization], h_swap),
            mo.siso_optional(lambda: dropout(h_drop_rate), h_opt_drop)
        ]), h_num_repeats)
Пример #5
0
def dnn_cell(h_num_hidden, h_nonlin_name, h_opt_drop, h_drop_keep_prob):
    return mo.siso_sequential([
        dense(h_num_hidden),
        nonlinearity(h_nonlin_name),
        mo.siso_optional(lambda: dropout(h_drop_keep_prob), h_opt_drop)
    ])
Пример #6
0
    def compile_fn(di, dh):
        p_var = tf.placeholder(tf.bool)
        def fn(di):
            return {'out' : tf.layers.batch_normalization(di['in'], training=p_var)}
    return fn, {p_var : 1}, {p_var : 0}
return siso_tfm('BatchNormalization', compile_fn, {})

### ${MARKDOWN}

# * Optional dropout/batchnorm:  this pre-defined module determines whether dropout is included in a particular architecture or not.
# * H_drop_keep_prob: hyperparmeters of dropout probability to choose from.
# * H_opt_drop: dropout optional hyperparameter; if 0 is select, then dropout is exclude. Vice versa, 1 is include.

### ${CODE}

	mo.siso_optional(lambda: dropout(h_drop_keep_prob), h_opt_drop)

### ${MARKDOWN}

# * Permutation dropout/batchnorm: pre-defined module that determines the ordering of modules. In this case, whether batchnorm is before dropout or vice versa.
# * H_swap: D([0, 1]) # order of swapping for permutation

### ${CODE}

mo.siso_permutation([
    lambda: mo.siso_optional(lambda: dropout(h_drop_keep_prob), h_opt_drop),
    lambda: mo.siso_optional(batch_normalization, h_opt_bn),
], h_swap)]

### ${MARKDOWN}