Example #1
0
def cifar10_shufflenet(x, n_groups=2, n_filters=200, ratio=1.0, seed=42):
    layers = []
    variables = []

    training = tf.placeholder(tf.bool, name="training")
    variables.append(("training", training))

    conv = conv2d_bn_relu(x,
                          size=3,
                          n_filters=24,
                          is_training=training,
                          kernel_init=He_normal(seed=seed + 1),
                          name="initial_conv")
    layers.append(("initial_conv", conv))

    slayer1 = shufflenet.shufflenet_layer(conv,
                                          n_filters=n_filters,
                                          n_repeat=3,
                                          n_groups=n_groups,
                                          reduction_ratio=ratio,
                                          is_training=training,
                                          kernel_init=He_normal(seed=seed + 2),
                                          name="shufflenet_layer_1")
    layers.append(("shufflenet_layer_1", slayer1))

    slayer2 = shufflenet.shufflenet_layer(slayer1,
                                          n_filters=n_filters * 2,
                                          n_repeat=7,
                                          n_groups=n_groups,
                                          reduction_ratio=ratio,
                                          is_training=training,
                                          kernel_init=He_normal(seed=seed + 3),
                                          name="shufflenet_layer_2")
    layers.append(("shufflenet_layer_2", slayer2))

    slayer3 = shufflenet.shufflenet_layer(slayer2,
                                          n_filters=n_filters * 4,
                                          n_repeat=3,
                                          n_groups=n_groups,
                                          reduction_ratio=ratio,
                                          is_training=training,
                                          kernel_init=He_normal(seed=seed + 4),
                                          name="shufflenet_layer_3")
    layers.append(("shufflenet_layer_3", slayer3))

    pool = global_avg_pool2d(slayer3)
    layers.append(("pool", pool))

    dense1 = dense(pool,
                   n_units=10,
                   kernel_init=Kumar_normal(activation=None,
                                            mode="FAN_IN",
                                            seed=seed + 5),
                   name="dense_1")
    layers.append(("logit", dense1))

    prob = tf.nn.softmax(dense1, name="prob")
    layers.append(("prob", prob))

    return layers, variables
def cifar10_resnext(x, n_blocks, cardinality = 8, group_width = 16, seed = 42):
    layers = []
    variables = []

    training = tf.placeholder(tf.bool, name="training")
    variables.append(("training", training))

    conv = conv2d_bn_act(
            x, size = 3, n_filters = 32,
            activation = tf.nn.relu,
            is_training = training,
            kernel_init = He_normal(seed = seed+1),
            name = "initial_conv")
    layers.append(("initial_conv", conv))
            
    res1 = resnext.residual_layer(
            conv, n_blocks = n_blocks, stride = 1,
            cardinality = cardinality,
            group_width = group_width,
            block_function = resnext.bottleneck_block,
            is_training = training,
            kernel_init = He_normal(seed = seed+2),
            name = "residual_1")
    layers.append(("residual_1", res1))

    res2 = resnext.residual_layer(
            res1, n_blocks = n_blocks, stride = 2,
            cardinality = cardinality,
            group_width = group_width*2,
            block_function = resnext.bottleneck_block,
            is_training = training,
            kernel_init = He_normal(seed = seed+3),            
            name="residual_2")
    layers.append(("residual_2", res2))

    res3 = resnext.residual_layer(
            res2, n_blocks = n_blocks, stride = 2,
            cardinality = cardinality,
            group_width = group_width*4,
            block_function = resnext.bottleneck_block,
            is_training = training,
            kernel_init = He_normal(seed = seed+4),
            name = "residual_3")
    layers.append(("residual_3", res3))

    pool = global_avg_pool2d(res3)
    layers.append(("pool", pool))
    
    dense1 = dense(
            pool, n_units = 10,
            kernel_init = Kumar_normal(activation = None, mode = "FAN_IN", seed = seed+5),
            name = "dense_1")
    layers.append(("logit", dense1))
    
    prob = tf.nn.softmax(dense1, name = "prob")
    layers.append(("prob", prob))
    
    return layers, variables
Example #3
0
def cifar10_sequential_c3d_selu(x, seed=42):
    layers = []
    variables = []

    training = tf.placeholder(tf.bool, name="training")
    variables.append(("training", training))

    # conv5x5 selu + pool
    conv1 = conv2d_selu(x,
                        size=5,
                        n_filters=32,
                        kernel_init=conv_selu_safe_initializer(x,
                                                               5,
                                                               seed=seed + 1),
                        name="conv_1")
    layers.append(("conv_1", conv1))
    pool1 = max_pool2d(conv1, size=3, stride=2, name="pool_1")
    layers.append(("pool_1", pool1))

    # conv5x5 selu + pool
    conv2 = conv2d_selu(pool1,
                        size=5,
                        n_filters=64,
                        kernel_init=conv_selu_safe_initializer(pool1,
                                                               5,
                                                               seed=seed + 2),
                        name="conv_2")
    layers.append(("conv_2", conv2))
    pool2 = max_pool2d(conv2, size=3, stride=2, name="pool_2")
    layers.append(("pool_2", pool2))

    # conv3x3 selu + pool
    conv3 = conv2d_selu(pool2,
                        size=3,
                        n_filters=128,
                        kernel_init=conv_selu_safe_initializer(pool2,
                                                               3,
                                                               seed=seed + 3),
                        name="conv_3")
    layers.append(("conv_3", conv3))
    pool3 = max_pool2d(conv3, size=2, stride=2, name="pool_3")
    layers.append(("pool_3", pool3))

    flat = flatten(pool3, name="flatten")
    layers.append(("flatten", flat))

    # dense softmax
    dense1 = dense(flat,
                   n_units=10,
                   kernel_init=dense_selu_safe_initializer(flat,
                                                           seed=seed + 4),
                   name="dense_1")
    layers.append(("logit", dense1))
    prob = tf.nn.softmax(dense1, name="prob")
    layers.append(("prob", prob))

    return layers, variables
Example #4
0
def auxiliary_classifier(inputs,
                         classes,
                         is_training=False,
                         regularizer=None,
                         activation=tf.nn.relu,
                         conv_kernel_init=He_normal(seed=42),
                         conv_bias_init=tf.zeros_initializer(),
                         dense_kernel_init=Kumar_normal(activation=None,
                                                        mode="FAN_IN",
                                                        seed=42),
                         name="nasnet_auxiliary_classifier"):

    with tf.variable_scope(name):
        x = inputs
        if activation is not None:
            x = activation(x, name="activation_1")
        x = avg_pool2d(x, size=5, stride=3, padding="VALID", name="avg_pool")

        x = conv2d_bn(x,
                      n_filters=128,
                      size=1,
                      is_training=is_training,
                      regularizer=regularizer,
                      kernel_init=conv_kernel_init,
                      bias_init=conv_bias_init,
                      name="conv_projection")

        if activation is not None:
            x = activation(x, name="activation_2")

        x = conv2d_bn(x,
                      n_filters=768,
                      size=[x.get_shape()[1].value,
                            x.get_shape()[2].value],
                      padding="VALID",
                      is_training=is_training,
                      regularizer=regularizer,
                      kernel_init=conv_kernel_init,
                      bias_init=conv_bias_init,
                      name="conv_reduction")

        if activation is not None:
            x = activation(x, name="activation_3")

        x = global_avg_pool2d(x, name="global_avg_pool")

        x = dense(x,
                  n_units=classes,
                  regularizer=regularizer,
                  kernel_init=dense_kernel_init,
                  name="dense")

    return x
Example #5
0
def mnist_sequential_dbn2d1(x, drop_rate=0.5):
    """Creates sequential neural network for MNIST. The network contains 2 
        batch-normalized fully-connected dense layers. Dropout layers are used 
        for regularization. The output  probabilities are generated by one 
        dense layer followed by a softmax function.
    Args:
        x: A tensor representing the input.
    Returns:
        A tuple containing the layers of the network graph and additional
        placeholders if any. Layers are represented as list of named tuples.
    """
    layers = []
    variables = []

    training = tf.placeholder(tf.bool, name="training")
    variables.append(("training", training))

    flat = flatten(x, name="flatten")
    layers.append(("flatten", flat))

    fc1 = dense_bn(flat, n_units=256, is_training=training, name="fc1")
    layers.append(("fc1", fc1))

    dropout1 = tf.layers.dropout(fc1,
                                 rate=drop_rate,
                                 training=training,
                                 seed=42,
                                 name="dropout1")
    layers.append(("dropout1", dropout1))

    fc2 = dense_bn(dropout1, n_units=64, is_training=training, name="fc2")
    layers.append(("fc2", fc2))

    dropout2 = tf.layers.dropout(fc2,
                                 rate=drop_rate,
                                 training=training,
                                 seed=42,
                                 name="dropout2")
    layers.append(("dropout2", dropout2))

    fc3 = dense(dropout2,
                n_units=10,
                activation=None,
                kernel_init=Kumar_initializer(activation=None),
                name="fc3")
    layers.append(("fc3", fc3))

    prob = tf.nn.softmax(fc2, name="prob")
    layers.append(("prob", prob))

    return layers, variables
Example #6
0
def cifar10_sequential_clrn3d(x,
                              drop_rate_1=0.2,
                              drop_rate_2=0.3,
                              drop_rate_3=0.4,
                              seed=42):
    layers = []
    variables = []

    training = tf.placeholder(tf.bool, name="training")
    variables.append(("training", training))

    # conv5x5 relu + lrn + pool
    conv1 = conv2d_relu(x,
                        size=5,
                        n_filters=32,
                        kernel_init=tf.truncated_normal_initializer(
                            stddev=5e-2, seed=seed + 1),
                        name="conv_1")
    layers.append(("conv_1", conv1))
    norm1 = tf.nn.lrn(conv1,
                      depth_radius=4,
                      bias=1.0,
                      alpha=0.001 / 9.0,
                      beta=0.75,
                      name="norm_1")
    layers.append(("norm_1", norm1))
    pool1 = max_pool2d(norm1, size=3, stride=2, name="pool_1")
    layers.append(("pool_1", pool1))
    dropout1 = tf.layers.dropout(pool1,
                                 rate=drop_rate_1,
                                 training=training,
                                 seed=seed + 1,
                                 name="dropout_1")
    layers.append(("dropout_1", dropout1))

    # conv5x5 relu + lrn + pool
    conv2 = conv2d_relu(dropout1,
                        size=5,
                        n_filters=64,
                        kernel_init=tf.truncated_normal_initializer(
                            stddev=5e-2, seed=seed + 2),
                        name="conv_2")
    layers.append(("conv_2", conv2))
    norm2 = tf.nn.lrn(conv2,
                      depth_radius=4,
                      bias=1.0,
                      alpha=0.001 / 9.0,
                      beta=0.75,
                      name="norm_2")
    layers.append(("norm_2", norm2))
    pool2 = max_pool2d(norm2, size=3, stride=2, name="pool_2")
    layers.append(("pool_2", pool2))
    dropout2 = tf.layers.dropout(pool2,
                                 rate=drop_rate_2,
                                 training=training,
                                 seed=seed + 2,
                                 name="dropout_2")
    layers.append(("dropout_2", dropout2))

    # conv3x3 relu + lrn + pool
    conv3 = conv2d_relu(dropout2,
                        size=3,
                        n_filters=128,
                        kernel_init=tf.truncated_normal_initializer(
                            stddev=5e-2, seed=seed + 3),
                        name="conv_3")
    layers.append(("conv_3", conv3))
    norm3 = tf.nn.lrn(conv3,
                      depth_radius=4,
                      bias=1.0,
                      alpha=0.001 / 9.0,
                      beta=0.75,
                      name="norm_3")
    layers.append(("norm_3", norm3))
    pool3 = max_pool2d(norm3, size=2, stride=2, name="pool_3")
    layers.append(("pool_3", pool3))
    dropout3 = tf.layers.dropout(pool3,
                                 rate=drop_rate_3,
                                 training=training,
                                 seed=seed + 3,
                                 name="dropout_3")
    layers.append(("dropout_3", dropout3))

    flat = flatten(dropout3, name="flatten")
    layers.append(("flatten", flat))

    # dense softmax
    dense1 = dense(flat,
                   n_units=10,
                   kernel_init=tf.truncated_normal_initializer(stddev=1 /
                                                               192.0,
                                                               seed=seed + 4),
                   name="dense_1")
    layers.append(("logit", dense1))
    prob = tf.nn.softmax(dense1, name="prob")
    layers.append(("prob", prob))

    return layers, variables
def cifar10_densenet_wd(x,
                        n_repeat,
                        drop_rate=0.2,
                        weight_decay=0.0001,
                        seed=42):
    layers = []
    variables = []

    training = tf.placeholder(tf.bool, name="training")
    variables.append(("training", training))

    conv = conv2d(x,
                  size=3,
                  n_filters=16,
                  regularizer=tf.contrib.layers.l2_regularizer(weight_decay),
                  kernel_init=He_normal(seed=seed + 1),
                  name="initial_conv")
    layers.append(("initial_conv", conv))

    dblock1 = densenet.dense_block(
        conv,
        n_repeat=n_repeat,
        n_filters=12,
        drop_rate=drop_rate,
        is_training=training,
        regularizer=tf.contrib.layers.l2_regularizer(weight_decay),
        kernel_init=He_normal(seed=seed + 2),
        seed=seed + 2,
        name="dense_block_1")
    layers.append(("dense_block_1", dblock1))

    tlayer1 = densenet.transition_layer(
        dblock1,
        pool_size=2,
        pool_stride=2,
        drop_rate=drop_rate,
        is_training=training,
        regularizer=tf.contrib.layers.l2_regularizer(weight_decay),
        kernel_init=He_normal(seed=seed + 3),
        seed=seed + 3,
        name="transition_layer_1")
    layers.append(("transition_layer_1", tlayer1))

    dblock2 = densenet.dense_block(
        tlayer1,
        n_repeat=n_repeat,
        n_filters=12,
        drop_rate=drop_rate,
        is_training=training,
        regularizer=tf.contrib.layers.l2_regularizer(weight_decay),
        kernel_init=He_normal(seed=seed + 4),
        seed=seed + 4,
        name="dense_block_2")
    layers.append(("dense_block_2", dblock2))

    tlayer2 = densenet.transition_layer(
        dblock2,
        pool_size=2,
        pool_stride=2,
        drop_rate=drop_rate,
        is_training=training,
        regularizer=tf.contrib.layers.l2_regularizer(weight_decay),
        kernel_init=He_normal(seed=seed + 5),
        seed=seed + 5,
        name="transition_layer_2")
    layers.append(("transition_layer_2", tlayer2))

    dblock3 = densenet.dense_block(
        tlayer2,
        n_repeat=n_repeat,
        n_filters=12,
        drop_rate=drop_rate,
        is_training=training,
        regularizer=tf.contrib.layers.l2_regularizer(weight_decay),
        kernel_init=He_normal(seed=seed + 6),
        seed=seed + 6,
        name="dense_block_3")
    layers.append(("dense_block_3", dblock3))

    final = densenet.final_layer(dblock3, is_training=training, name="final")
    layers.append(("final", final))

    dense1 = dense(final,
                   n_units=10,
                   kernel_init=Kumar_normal(activation=None,
                                            mode="FAN_IN",
                                            seed=seed + 7),
                   name="dense_1")
    layers.append(("logit", dense1))

    prob = tf.nn.softmax(dense1, name="prob")
    layers.append(("prob", prob))

    return layers, variables
def cifar10_bottleneck_densenet(x, n_repeat, drop_rate=0.25, seed=42):
    layers = []
    variables = []

    training = tf.placeholder(tf.bool, name="training")
    variables.append(("training", training))

    conv = conv2d(x,
                  size=3,
                  n_filters=16,
                  kernel_init=He_normal(seed=seed + 1),
                  name="initial_conv")
    layers.append(("initial_conv", conv))

    dblock1 = densenet.bottleneck_block(conv,
                                        n_repeat=n_repeat,
                                        n_filters=12,
                                        reduction_ratio=4,
                                        drop_rate=drop_rate,
                                        is_training=training,
                                        kernel_init=He_normal(seed=seed + 2),
                                        seed=seed + 2,
                                        name="dense_bootleneck_block_1")
    layers.append(("dense_bootleneck_block_1", dblock1))

    tlayer1 = densenet.transition_layer(dblock1,
                                        pool_size=2,
                                        pool_stride=2,
                                        drop_rate=drop_rate,
                                        is_training=training,
                                        kernel_init=He_normal(seed=seed + 3),
                                        seed=seed + 3,
                                        name="transition_layer_1")
    layers.append(("transition_layer_1", tlayer1))

    dblock2 = densenet.bottleneck_block(tlayer1,
                                        n_repeat=n_repeat,
                                        n_filters=12,
                                        reduction_ratio=4,
                                        drop_rate=drop_rate,
                                        is_training=training,
                                        kernel_init=He_normal(seed=seed + 4),
                                        seed=seed + 4,
                                        name="dense_bootleneck_block_2")
    layers.append(("dense_bootleneck_block_2", dblock2))

    tlayer2 = densenet.transition_layer(dblock2,
                                        pool_size=2,
                                        pool_stride=2,
                                        drop_rate=drop_rate,
                                        is_training=training,
                                        kernel_init=He_normal(seed=seed + 5),
                                        seed=seed + 5,
                                        name="transition_layer_2")
    layers.append(("transition_layer_2", tlayer2))

    dblock3 = densenet.bottleneck_block(tlayer2,
                                        n_repeat=n_repeat,
                                        n_filters=12,
                                        reduction_ratio=4,
                                        drop_rate=drop_rate,
                                        is_training=training,
                                        kernel_init=He_normal(seed=seed + 6),
                                        seed=seed + 6,
                                        name="dense_bootleneck_block_3")
    layers.append(("dense_bootleneck_block_3", dblock3))

    pool = global_avg_pool2d(dblock3)
    layers.append(("pool", pool))

    dense1 = dense(pool,
                   n_units=10,
                   kernel_init=Kumar_normal(activation=None,
                                            mode="FAN_IN",
                                            seed=seed + 7),
                   name="dense_1")
    layers.append(("logit", dense1))

    prob = tf.nn.softmax(dense1, name="prob")
    layers.append(("prob", prob))

    return layers, variables
Example #9
0
def cifar10_sequential_c5d3_selu_drop_wd(x,
                                         drop_rate=0.05,
                                         weight_decay=0.001,
                                         seed=42):
    layers = []
    variables = []

    training = tf.placeholder(tf.bool, name="training")
    variables.append(("training", training))

    # 2x conv3x3 selu + pool
    conv1 = conv2d_selu(
        x,
        size=5,
        n_filters=64,
        regularizer=tf.contrib.layers.l2_regularizer(weight_decay),
        kernel_init=conv_selu_safe_initializer(x, 5, seed=seed + 1),
        name="conv_1")
    layers.append(("conv_1", conv1))
    pool1 = max_pool2d(conv1, size=3, stride=2, name="pool_1")
    layers.append(("pool_1", pool1))

    # 2x conv3x3 selu + pool
    conv2 = conv2d_selu(
        pool1,
        size=5,
        n_filters=64,
        regularizer=tf.contrib.layers.l2_regularizer(weight_decay),
        kernel_init=conv_selu_safe_initializer(pool1, 5, seed=seed + 2),
        name="conv_2")
    layers.append(("conv_2", conv2))
    pool2 = max_pool2d(conv2, size=3, stride=2, name="pool_2")
    layers.append(("pool_2", pool2))

    # 2x conv3x3 selu + pool
    conv3 = conv2d_selu(
        pool2,
        size=3,
        n_filters=128,
        regularizer=tf.contrib.layers.l2_regularizer(weight_decay),
        kernel_init=conv_selu_safe_initializer(pool2, 3, seed=seed + 3),
        name="conv_3")
    layers.append(("conv_3", conv3))
    conv4 = conv2d_selu(
        conv3,
        size=3,
        n_filters=128,
        regularizer=tf.contrib.layers.l2_regularizer(weight_decay),
        kernel_init=conv_selu_safe_initializer(conv3, 3, seed=seed + 4),
        name="conv_4")
    layers.append(("conv_4", conv4))
    conv5 = conv2d_selu(
        conv4,
        size=3,
        n_filters=128,
        regularizer=tf.contrib.layers.l2_regularizer(weight_decay),
        kernel_init=conv_selu_safe_initializer(conv4, 3, seed=seed + 5),
        name="conv_5")
    layers.append(("conv_5", conv5))
    pool3 = max_pool2d(conv5, size=2, stride=2, name="pool_3")
    layers.append(("pool_3", pool3))

    flat = flatten(pool3, name="flatten")
    layers.append(("flatten", flat))

    dense1 = dense_selu(
        flat,
        n_units=384,
        regularizer=tf.contrib.layers.l2_regularizer(weight_decay),
        kernel_init=dense_selu_safe_initializer(flat, seed=seed + 6),
        name="dense_1")
    layers.append(("dense_1", dense1))

    if drop_rate > 0.0:
        dense1 = dropout_selu(dense1,
                              rate=drop_rate,
                              training=training,
                              seed=seed + 7)

    dense2 = dense_selu(
        dense1,
        n_units=192,
        regularizer=tf.contrib.layers.l2_regularizer(weight_decay),
        kernel_init=dense_selu_safe_initializer(dense1, seed=seed + 8),
        name="dense_2")
    layers.append(("dense_2", dense2))

    if drop_rate > 0.0:
        dense2 = dropout_selu(dense2,
                              rate=drop_rate,
                              training=training,
                              seed=seed + 9)

    # dense softmax
    dense3 = dense(dense2,
                   n_units=10,
                   regularizer=tf.contrib.layers.l2_regularizer(weight_decay),
                   kernel_init=dense_selu_safe_initializer(dense2,
                                                           seed=seed + 4),
                   name="dense_3")
    layers.append(("logit", dense3))
    prob = tf.nn.softmax(dense3, name="prob")
    layers.append(("prob", prob))

    return layers, variables
Example #10
0
def cifar10_sequential_clrn5d3_wd(x,
                                  drop_rate=0.5,
                                  weight_decay=0.001,
                                  seed=42):
    layers = []
    variables = []

    training = tf.placeholder(tf.bool, name="training")
    variables.append(("training", training))

    # 2x conv3x3 selu + pool
    conv1 = conv2d_relu(
        x,
        size=5,
        n_filters=64,
        regularizer=tf.contrib.layers.l2_regularizer(weight_decay),
        kernel_init=He_normal(seed=seed + 1),
        name="conv_1")
    layers.append(("conv_1", conv1))
    norm1 = tf.nn.lrn(conv1,
                      depth_radius=4,
                      bias=1.0,
                      alpha=0.001 / 9.0,
                      beta=0.75,
                      name="norm_1")
    layers.append(("norm_1", norm1))
    pool1 = max_pool2d(norm1, size=3, stride=2, name="pool_1")
    layers.append(("pool_1", pool1))

    # 2x conv3x3 selu + pool
    conv2 = conv2d_relu(
        pool1,
        size=5,
        n_filters=64,
        regularizer=tf.contrib.layers.l2_regularizer(weight_decay),
        kernel_init=He_normal(seed=seed + 2),
        name="conv_2")
    layers.append(("conv_2", conv2))
    norm2 = tf.nn.lrn(conv2,
                      depth_radius=4,
                      bias=1.0,
                      alpha=0.001 / 9.0,
                      beta=0.75,
                      name="norm_2")
    layers.append(("norm_2", norm2))
    pool2 = max_pool2d(norm2, size=3, stride=2, name="pool_2")
    layers.append(("pool_2", pool2))

    # 2x conv3x3 selu + pool
    conv3 = conv2d_relu(
        pool2,
        size=3,
        n_filters=128,
        regularizer=tf.contrib.layers.l2_regularizer(weight_decay),
        kernel_init=He_normal(seed=seed + 3),
        name="conv_3")
    layers.append(("conv_3", conv3))
    norm3 = tf.nn.lrn(conv3,
                      depth_radius=4,
                      bias=1.0,
                      alpha=0.001 / 9.0,
                      beta=0.75,
                      name="norm_3")
    layers.append(("norm_3", norm3))
    conv4 = conv2d_relu(
        norm3,
        size=3,
        n_filters=128,
        regularizer=tf.contrib.layers.l2_regularizer(weight_decay),
        kernel_init=He_normal(seed=seed + 4),
        name="conv_4")
    layers.append(("conv_4", conv4))
    norm4 = tf.nn.lrn(conv4,
                      depth_radius=4,
                      bias=1.0,
                      alpha=0.001 / 9.0,
                      beta=0.75,
                      name="norm_1")
    layers.append(("norm_4", norm4))
    conv5 = conv2d_relu(
        norm4,
        size=3,
        n_filters=128,
        regularizer=tf.contrib.layers.l2_regularizer(weight_decay),
        kernel_init=He_normal(seed=seed + 5),
        name="conv_5")
    layers.append(("conv_5", conv5))
    norm5 = tf.nn.lrn(conv5,
                      depth_radius=4,
                      bias=1.0,
                      alpha=0.001 / 9.0,
                      beta=0.75,
                      name="norm_5")
    layers.append(("norm_5", norm5))

    pool3 = max_pool2d(norm5, size=2, stride=2, name="pool_3")
    layers.append(("pool_3", pool3))

    flat = flatten(pool3, name="flatten")
    layers.append(("flatten", flat))

    dense1 = dense_relu(
        flat,
        n_units=384,
        regularizer=tf.contrib.layers.l2_regularizer(weight_decay),
        kernel_init=He_normal(seed=seed + 6),
        name="dense_1")
    layers.append(("dense_1", dense1))

    if drop_rate > 0.0:
        dense1 = tf.layers.dropout(dense1,
                                   rate=drop_rate,
                                   training=training,
                                   seed=seed + 6,
                                   name="dropout_1")

    dense2 = dense_relu(
        dense1,
        n_units=192,
        regularizer=tf.contrib.layers.l2_regularizer(weight_decay),
        kernel_init=He_normal(seed=seed + 7),
        name="dense_2")
    layers.append(("dense_2", dense2))

    if drop_rate > 0.0:
        dense2 = tf.layers.dropout(dense2,
                                   rate=drop_rate,
                                   training=training,
                                   seed=seed + 7,
                                   name="dropout_2")

    # dense softmax
    dense3 = dense(dense2,
                   n_units=10,
                   regularizer=tf.contrib.layers.l2_regularizer(weight_decay),
                   kernel_init=Kumar_normal(activation=None,
                                            mode="FAN_IN",
                                            seed=seed + 8),
                   name="dense_3")
    layers.append(("logit", dense3))
    prob = tf.nn.softmax(dense3, name="prob")
    layers.append(("prob", prob))

    return layers, variables
Example #11
0
def cifar10_sequential_cbn6d_wd(x,
                                drop_rate_1=0.2,
                                drop_rate_2=0.3,
                                drop_rate_3=0.4,
                                weight_decay=0.0001,
                                seed=42):
    layers = []
    variables = []

    training = tf.placeholder(tf.bool, name="training")
    variables.append(("training", training))

    # 2x conv3x3 relu batch-norm + pool
    conv1 = conv2d_relu_bn(
        x,
        size=3,
        n_filters=32,
        is_training=training,
        regularizer=tf.contrib.layers.l2_regularizer(weight_decay),
        kernel_init=He_normal(seed=seed + 1),
        name="conv_1")
    layers.append(("conv_1", conv1))
    conv2 = conv2d_relu_bn(
        conv1,
        size=3,
        n_filters=32,
        is_training=training,
        regularizer=tf.contrib.layers.l2_regularizer(weight_decay),
        kernel_init=He_normal(seed=seed + 2),
        name="conv_2")
    layers.append(("conv_2", conv2))
    pool1 = max_pool2d(conv2, size=2, stride=2, name="pool_1")
    layers.append(("pool_1", pool1))
    dropout1 = tf.layers.dropout(pool1,
                                 rate=drop_rate_1,
                                 training=training,
                                 seed=seed + 2,
                                 name="dropout_1")
    layers.append(("dropout_1", dropout1))

    # 2x conv3x3 relu batch-norm + pool
    conv3 = conv2d_relu_bn(
        dropout1,
        size=3,
        n_filters=64,
        is_training=training,
        regularizer=tf.contrib.layers.l2_regularizer(weight_decay),
        kernel_init=He_normal(seed=seed + 3),
        name="conv_3")
    layers.append(("conv_3", conv3))
    conv4 = conv2d_relu_bn(
        conv3,
        size=3,
        n_filters=64,
        is_training=training,
        regularizer=tf.contrib.layers.l2_regularizer(weight_decay),
        kernel_init=He_normal(seed=seed + 4),
        name="conv_4")
    layers.append(("conv_4", conv4))
    pool2 = max_pool2d(conv4, size=2, stride=2, name="pool_2")
    layers.append(("pool_2", pool2))
    dropout2 = tf.layers.dropout(pool2,
                                 rate=drop_rate_2,
                                 training=training,
                                 seed=seed + 4,
                                 name="dropout_2")
    layers.append(("dropout_2", dropout2))

    # 2x conv3x3 relu batch-norm + pool
    conv5 = conv2d_relu_bn(
        dropout2,
        size=3,
        n_filters=128,
        is_training=training,
        regularizer=tf.contrib.layers.l2_regularizer(weight_decay),
        kernel_init=He_normal(seed=seed + 5),
        name="conv_5")
    layers.append(("conv_5", conv5))
    conv6 = conv2d_relu_bn(
        conv5,
        size=3,
        n_filters=128,
        is_training=training,
        regularizer=tf.contrib.layers.l2_regularizer(weight_decay),
        kernel_init=He_normal(seed=seed + 6),
        name="conv_6")
    layers.append(("conv_6", conv6))
    pool3 = max_pool2d(conv6, size=2, stride=2, name="pool_3")
    layers.append(("pool_3", pool3))
    dropout3 = tf.layers.dropout(pool3,
                                 rate=drop_rate_3,
                                 training=training,
                                 seed=seed + 6,
                                 name="dropout_3")
    layers.append(("dropout_3", dropout3))

    flat = flatten(dropout3, name="flatten")
    layers.append(("flatten", flat))

    # dense softmax
    dense1 = dense(flat,
                   n_units=10,
                   regularizer=tf.contrib.layers.l2_regularizer(weight_decay),
                   kernel_init=Kumar_normal(activation=None,
                                            mode="FAN_IN",
                                            seed=seed + 7),
                   name="dense_1")
    layers.append(("logit", dense1))
    prob = tf.nn.softmax(dense1, name="prob")
    layers.append(("prob", prob))

    return layers, variables
def cifar10_mobilenet_v2(x, expand_ratio=6, seed=42):
    layers = []
    variables = []

    training = tf.placeholder(tf.bool, name="training")
    variables.append(("training", training))

    conv1 = conv2d_bn_relu(x,
                           size=3,
                           n_filters=32,
                           kernel_init=He_normal(seed=seed + 1),
                           is_training=training,
                           name="initial_conv")
    layers.append(("initial_conv", conv1))

    # 32x32x32 -> 32x32x16
    invres1 = mobilenet_v2.inverted_residual_block(
        conv1,
        n_repeat=1,
        n_filters=16,
        stride=1,
        expand_ratio=1,
        kernel_init=He_normal(seed=seed + 2),
        is_training=training,
        name="inverted_residual_block_1")
    layers.append(("inverted_residual_block_1", invres1))

    # 32x32x16 -> 32x32x24
    invres2 = mobilenet_v2.inverted_residual_block(
        invres1,
        n_repeat=2,
        n_filters=24,
        stride=1,
        expand_ratio=expand_ratio,
        kernel_init=He_normal(seed=seed + 3),
        is_training=training,
        name="inverted_residual_block_2")
    layers.append(("inverted_residual_block_2", invres2))

    #32x32x24 -> 16x16x32
    invres3 = mobilenet_v2.inverted_residual_block(
        invres2,
        n_repeat=3,
        n_filters=32,
        stride=2,
        expand_ratio=expand_ratio,
        kernel_init=He_normal(seed=seed + 4),
        is_training=training,
        name="inverted_residual_block_3")
    layers.append(("inverted_residual_block_3", invres3))

    #16x16x32 -> 8x8x64
    invres4 = mobilenet_v2.inverted_residual_block(
        invres3,
        n_repeat=4,
        n_filters=64,
        stride=2,
        expand_ratio=expand_ratio,
        kernel_init=He_normal(seed=seed + 5),
        is_training=training,
        name="inverted_residual_block_4")
    layers.append(("inverted_residual_block_4", invres4))

    #8x8x64 -> 8x8x96
    invres5 = mobilenet_v2.inverted_residual_block(
        invres4,
        n_repeat=3,
        n_filters=96,
        stride=1,
        expand_ratio=expand_ratio,
        kernel_init=He_normal(seed=seed + 6),
        is_training=training,
        name="inverted_residual_block_5")
    layers.append(("inverted_residual_block_5", invres5))

    #8x8x96 -> 4x4x160
    invres6 = mobilenet_v2.inverted_residual_block(
        invres5,
        n_repeat=3,
        n_filters=160,
        stride=2,
        expand_ratio=expand_ratio,
        kernel_init=He_normal(seed=seed + 7),
        is_training=training,
        name="inverted_residual_block_6")
    layers.append(("inverted_residual_block_6", invres6))

    #4x4x160 -> 4x4x320
    invres7 = mobilenet_v2.inverted_residual_block(
        invres6,
        n_repeat=1,
        n_filters=320,
        stride=1,
        expand_ratio=expand_ratio,
        kernel_init=He_normal(seed=seed + 8),
        is_training=training,
        name="inverted_residual_block_7")
    layers.append(("inverted_residual_block_7", invres7))

    conv2 = conv2d_bn_relu(invres7,
                           size=1,
                           n_filters=1280,
                           kernel_init=He_normal(seed=seed + 9),
                           is_training=training,
                           name="final_conv")
    layers.append(("final_conv", conv2))

    pool = global_avg_pool2d(conv2)
    layers.append(("pool", pool))

    dense1 = dense(pool,
                   n_units=10,
                   kernel_init=Kumar_normal(activation=None,
                                            mode="FAN_IN",
                                            seed=seed + 10),
                   name="dense_1")
    layers.append(("logit", dense1))

    prob = tf.nn.softmax(dense1, name="prob")
    layers.append(("prob", prob))

    return layers, variables
Example #13
0
def cifar10_se_resnet(x, n_blocks, ratio = 8, seed = 42):
    layers = []
    variables = []

    training = tf.placeholder(tf.bool, name="training")
    variables.append(("training", training))

    conv = conv2d_bn_act(
            x, size = 3, n_filters = 16,
            activation = tf.nn.relu,
            is_training = training,
            kernel_init = He_normal(seed = seed+1),
            name = "initial_conv")
    layers.append(("initial_conv", conv))
            
    res1 = resnet.residual_layer(
            conv, n_filters = 16, n_blocks = n_blocks, stride = 1,
            block_function = partial(
                    senet.se_resnet_residual_block,
                    ratio = ratio,
                    se_kernel_init_1 = He_normal(seed = seed+2),
                    se_kernel_init_2 = Kumar_normal(activation = "sigmoid", mode = "FAN_AVG", seed = seed+2),
                    ), 
            is_training = training,
            kernel_init = He_normal(seed = seed+2),
            name = "se_residual_1")
    layers.append(("se_residual_1", res1))

    res2 = resnet.residual_layer(
            res1, n_filters = 32, n_blocks = n_blocks, stride = 2,
            block_function = partial(
                    senet.se_resnet_residual_block,
                    ratio = ratio,
                    se_kernel_init_1 = He_normal(seed = seed+3),
                    se_kernel_init_2 = Kumar_normal(activation = "sigmoid", mode = "FAN_AVG", seed = seed+3),
                    ), 
            is_training = training,
            kernel_init = He_normal(seed = seed+3),
            name = "se_residual_2")
    layers.append(("se_residual_2", res2))

    res3 = resnet.residual_layer(
            res2, n_filters = 64, n_blocks = n_blocks, stride = 2,
            block_function = partial(
                    senet.se_resnet_residual_block,
                    ratio = ratio,
                    se_kernel_init_1 = He_normal(seed = seed+4),
                    se_kernel_init_2 = Kumar_normal(activation = "sigmoid", mode = "FAN_AVG", seed = seed+4),
                    ),
            is_training = training,
            kernel_init = He_normal(seed = seed+4),
            name = "se_residual_3")
    layers.append(("se_residual_3", res3))

    pool = global_avg_pool2d(res3)
    layers.append(("pool", pool))
    
    dense1 = dense(
            pool, n_units = 10,
            kernel_init = Kumar_normal(activation = None, mode = "FAN_IN", seed = seed+5),
            name = "dense_1")
    layers.append(("logit", dense1))
    
    prob = tf.nn.softmax(dense1, name = "prob")
    layers.append(("prob", prob))
    
    return layers, variables
Example #14
0
def mnist_resnet_cbn1r3d1(x):
    """Creates residual neural network for MNIST. The network
        uses 1 batch normalized convolutional and 3 residual layers to create 
        the representation part of the network. The output probabilities are 
        generated by one dense layer followed by a softmax function.
    Args:
        x: A tensor representing the input.
    Returns:
        A tuple containing the layers of the network graph and additional
        placeholders if any. Layers are represented as list of named tuples.
    """

    layers = []
    variables = []

    training = tf.placeholder(tf.bool, name="training")
    variables.append(("training", training))

    conv1 = conv2d_bn(x,
                      size=3,
                      n_filters=16,
                      kernel_init=Kumar_initializer(mode="FAN_AVG"),
                      name="initial_conv")
    layers.append(("initial_conv", conv1))

    res1 = resnet.residual_layer(conv1,
                                 n_filters=16,
                                 n_blocks=2,
                                 stride=1,
                                 is_training=training,
                                 name="residual1")
    layers.append(("residual1", res1))

    res2 = resnet.residual_layer(res1,
                                 n_filters=32,
                                 n_blocks=2,
                                 stride=2,
                                 is_training=training,
                                 name="residual2")
    layers.append(("residual2", res2))

    res3 = resnet.residual_layer(res2,
                                 n_filters=64,
                                 n_blocks=2,
                                 stride=2,
                                 is_training=training,
                                 name="residual3")
    layers.append(("residual3", res3))

    pool1 = tf.reduce_mean(res3, [1, 2])  # global average pooling
    layers.append(("pool", pool1))

    fc2 = dense(pool1,
                n_units=10,
                activation=None,
                kernel_init=Kumar_initializer(mode="FAN_IN"),
                name="fc2")
    layers.append(("fc2", fc2))

    prob = tf.nn.softmax(fc2, name="prob")
    layers.append(("prob", prob))

    return layers, variables
Example #15
0
def cifar10_xception_wd(x, drop_rate=0.5, weight_decay=0.0001, seed=42):
    layers = []
    variables = []

    training = tf.placeholder(tf.bool, name="training")
    variables.append(("training", training))

    # 32x32
    conv = conv2d_bn_relu(
        x,
        size=3,
        n_filters=64,
        is_training=training,
        regularizer=tf.contrib.layers.l2_regularizer(weight_decay),
        kernel_init=He_normal(seed=seed + 1),
        name="initial_conv")
    layers.append(("initial_conv", conv))

    entry = xception.entry_module(
        conv,
        n_filters=[128, 256, 728],
        is_training=training,
        regularizer=tf.contrib.layers.l2_regularizer(weight_decay),
        kernel_init=He_normal(seed=seed + 2),
        name="entry")
    layers.append(("entry", entry))

    mid = xception.middle_module(
        entry,
        n_filters=728,
        n_repeat=3,
        is_training=training,
        regularizer=tf.contrib.layers.l2_regularizer(weight_decay),
        kernel_init=He_normal(seed=seed + 3),
        name="middle")
    layers.append(("middle", mid))

    exits = xception.exit_module(
        mid,
        n_filters_1=[728, 1024],
        n_filters_2=[1536, 2048],
        is_training=training,
        regularizer=tf.contrib.layers.l2_regularizer(weight_decay),
        kernel_init=He_normal(seed=seed + 4),
        name="exit")
    layers.append(("exit", exits))

    pool1 = global_avg_pool2d(exits, name="pool1")
    layers.append(("pool1", pool1))

    dropout1 = tf.layers.dropout(pool1,
                                 rate=drop_rate,
                                 training=training,
                                 seed=seed + 5,
                                 name="dropout")
    layers.append(("dropout1", dropout1))

    dense1 = dense(dropout1,
                   n_units=10,
                   regularizer=tf.contrib.layers.l2_regularizer(weight_decay),
                   kernel_init=Kumar_normal(activation=None,
                                            mode="FAN_IN",
                                            seed=seed + 6),
                   name="dense1")
    layers.append(("logit", dense1))

    prob = tf.nn.softmax(dense1, name="prob")
    layers.append(("prob", prob))

    return layers, variables
Example #16
0
def cifar10_resnet_identity(x, n_blocks=3, seed=42):
    layers = []
    variables = []

    training = tf.placeholder(tf.bool, name="training")
    variables.append(("training", training))

    conv = conv2d_bn_act(x,
                         size=3,
                         n_filters=16,
                         activation=tf.nn.relu,
                         is_training=training,
                         kernel_init=He_normal(seed=seed + 1),
                         name="initial_conv")
    layers.append(("initial_conv", conv))

    res1 = resnet.residual_layer(conv,
                                 n_filters=16,
                                 n_blocks=n_blocks,
                                 stride=1,
                                 block_function=partial(
                                     resnet_identity.identity_mapping_block,
                                     skip_first_bn_act=True),
                                 is_training=training,
                                 kernel_init=He_normal(seed=seed + 2),
                                 name="residual_1")
    layers.append(("residual_1", res1))

    res2 = resnet.residual_layer(
        res1,
        n_filters=32,
        n_blocks=n_blocks,
        stride=2,
        block_function=resnet_identity.identity_mapping_block,
        is_training=training,
        kernel_init=He_normal(seed=seed + 3),
        name="residual_2")
    layers.append(("residual_2", res2))

    res3 = resnet.residual_layer(
        res2,
        n_filters=64,
        n_blocks=n_blocks,
        stride=2,
        block_function=resnet_identity.identity_mapping_block,
        is_training=training,
        kernel_init=He_normal(seed=seed + 4),
        name="residual_3")
    layers.append(("residual_3", res3))

    bn = tf.layers.batch_normalization(res3,
                                       training=training,
                                       name="batch_norm")
    layers.append(("batch_norm", bn))
    bn_relu = tf.nn.relu(bn, name="relu")
    layers.append(("batch_norm_relu", bn_relu))

    pool = global_avg_pool2d(bn_relu)
    layers.append(("pool", pool))

    dense1 = dense(pool,
                   n_units=10,
                   kernel_init=Kumar_normal(activation=None,
                                            mode="FAN_IN",
                                            seed=seed + 5),
                   name="dense_1")
    layers.append(("logit", dense1))

    prob = tf.nn.softmax(dense1, name="prob")
    layers.append(("prob", prob))

    return layers, variables
Example #17
0
def mnist_sequential_c2d2(x, drop_rate=0.5):
    """Creates sequential convolutional neural network for MNIST. The network
        uses 2 convolutional+pooling layers to create the representation part
        of the network, and 1 fully-connected dense layer to create the
        classifier part. Dropout layer is used for regularization. The output 
        probabilities are generated by one dense layer followed by a softmax 
        function.
    Args:
        x: A tensor representing the input.
    Returns:
        A tuple containing the layers of the network graph and additional
        placeholders if any. Layers are represented as list of named tuples.
    """

    layers = []
    variables = []

    training = tf.placeholder(tf.bool, name="training")
    variables.append(("training", training))

    conv1 = conv2d(x,
                   size=5,
                   n_filters=32,
                   kernel_init=Kumar_initializer(mode="FAN_AVG"),
                   name="conv1")
    layers.append(("conv1", conv1))

    pool1 = max_pool(conv1, name="pool1")
    layers.append(("pool1", pool1))

    conv2 = conv2d(pool1,
                   size=5,
                   n_filters=64,
                   kernel_init=Kumar_initializer(mode="FAN_IN"),
                   name="conv2")
    layers.append(("conv2", conv2))

    pool2 = max_pool(conv2, name="pool2")
    layers.append(("pool2", pool2))

    flat = flatten(pool2, name="flatten")
    layers.append(("flatten", flat))

    fc1 = dense(flat,
                n_units=1024,
                kernel_init=Kumar_initializer(mode="FAN_IN"),
                name="fc1")
    layers.append(("fc1", fc1))

    dropout1 = tf.layers.dropout(fc1,
                                 rate=drop_rate,
                                 training=training,
                                 seed=42,
                                 name="dropout")
    layers.append(("dropout1", dropout1))

    fc2 = dense(dropout1,
                n_units=10,
                activation=None,
                kernel_init=Kumar_initializer(activation=None, mode="FAN_IN"),
                name="fc2")
    layers.append(("fc2", fc2))

    prob = tf.nn.softmax(fc2, name="prob")
    layers.append(("prob", prob))

    return layers, variables
Example #18
0
def cifar10_nasnet(x, drop_rate = 0.0, seed = 42):
    penultimate_filters = 768
    nb_blocks = 6
    stem_filters = 32
    filters_multiplier = 2

    filters = penultimate_filters // 24 # 2x2x6 -> increase two times 2x and concatenate 6 branches
    
    layers = []
    variables = []

    training = tf.placeholder(tf.bool, name="training")
    variables.append(("training", training))
    
    
    x = conv2d_bn(
        x, n_filters = stem_filters, size = 3, stride = 1,
        is_training = training,
        kernel_init = He_normal(seed = seed+1),
        name = "initial_conv")
    layers.append(("initial_conv", x))
    
    p = None
        
    for i in range(nb_blocks):
        x, p = nasnet.Normal_A(
            x, p,
            n_filters = filters,
            is_training = training,
            kernel_init = He_normal(seed = seed+2+i),
            name = "nasnet_normal_"+str(i)
            )
        layers.append(("nasnet_normal_"+str(i), x))

    x, _ = nasnet.Reduction_A(
        x, p,
        n_filters = filters * filters_multiplier,
        is_training = training,
        kernel_init = He_normal(seed = seed+3+nb_blocks),
        name = "nasnet_reduction_0"
        )
    layers.append(("nasnet_reduction_0", x))

    for i in range(nb_blocks):
        x, p = nasnet.Normal_A(
            x, p,
            n_filters = filters * filters_multiplier,
            is_training = training,
            kernel_init = He_normal(seed = seed+4+nb_blocks+i),
            name = "nasnet_normal_"+str(nb_blocks+i)
            )
        layers.append(("nasnet_normal_"+str(nb_blocks+i), x))

    x, _ = nasnet.Reduction_A(
        x, p,
        n_filters = filters * filters_multiplier ** 2,
        is_training = training,
        kernel_init = He_normal(seed = seed+5+2*nb_blocks),
        name = "nasnet_reduction_1"
        )
    layers.append(("nasnet_reduction_1", x))

    aux = nasnet.auxiliary_classifier(
        x, classes = 10,
        is_training = training,
        conv_kernel_init = He_normal(seed = seed),
        dense_kernel_init = Kumar_normal(activation = None, mode = "FAN_IN", seed = seed),
        name = "nasnet_aux_classifier"
        )
    layers.append(("aux_logit", aux))
    aux_prob = tf.nn.softmax(aux, name = "prob")
    layers.append(("aux_prob", aux_prob)) 

    for i in range(nb_blocks):
        x, p = nasnet.Normal_A(
            x, p,
            n_filters = filters * filters_multiplier ** 2,
            is_training = training,
            kernel_init = He_normal(seed = seed+6+2*nb_blocks+i),
            name = "nasnet_normal_"+str(2*nb_blocks+i)
            )
        layers.append(("nasnet_normal_"+str(2*nb_blocks+i), x))
        
    x = tf.nn.relu(x, name = "relu")    
    layers.append(("relu", x))
    
    x = global_avg_pool2d(x, name = "pool")
    layers.append(("pool", x))
    if drop_rate > 0.0:
        x = tf.layers.dropout(
                x, rate = drop_rate, training = training,
                seed = seed+7+3*nb_blocks, name = "dropout")
        layers.append(("dropout", x))
    x = dense(
            x, n_units = 10,
            kernel_init = Kumar_normal(activation = None, mode = "FAN_IN", seed = seed+8+3*nb_blocks),
            name = "dense")
    layers.append(("logit", x))
    
    prob = tf.nn.softmax(x, name = "prob")
    layers.append(("prob", prob))
    
    return layers, variables
Example #19
0
def cifar10_sequential_cbn3d(x,
                             drop_rate_1=0.2,
                             drop_rate_2=0.3,
                             drop_rate_3=0.4,
                             seed=42):
    layers = []
    variables = []

    training = tf.placeholder(tf.bool, name="training")
    variables.append(("training", training))

    # conv5x5 batch-norm relu + pool
    conv1 = conv2d_bn_relu(x,
                           size=5,
                           n_filters=32,
                           is_training=training,
                           kernel_init=He_normal(seed=seed + 1),
                           name="conv_1")
    layers.append(("conv_1", conv1))
    pool1 = max_pool2d(conv1, size=3, stride=2, name="pool_1")
    layers.append(("pool_1", pool1))
    dropout1 = tf.layers.dropout(pool1,
                                 rate=drop_rate_1,
                                 training=training,
                                 seed=seed + 1,
                                 name="dropout_1")
    layers.append(("dropout_1", dropout1))

    # conv5x5 batch-norm relu + pool
    conv2 = conv2d_bn_relu(dropout1,
                           size=5,
                           n_filters=64,
                           is_training=training,
                           kernel_init=He_normal(seed=seed + 2),
                           name="conv_2")
    layers.append(("conv_2", conv2))
    pool2 = max_pool2d(conv2, size=3, stride=2, name="pool_2")
    layers.append(("pool_2", pool2))
    dropout2 = tf.layers.dropout(pool2,
                                 rate=drop_rate_2,
                                 training=training,
                                 seed=seed + 2,
                                 name="dropout_2")
    layers.append(("dropout_2", dropout2))

    # conv3x3 batch-norm relu + pool
    conv3 = conv2d_bn_relu(dropout2,
                           size=3,
                           n_filters=128,
                           is_training=training,
                           kernel_init=He_normal(seed=seed + 3),
                           name="conv_3")
    layers.append(("conv_3", conv3))
    pool3 = max_pool2d(conv3, size=2, stride=2, name="pool_3")
    layers.append(("pool_3", pool3))
    dropout3 = tf.layers.dropout(pool3,
                                 rate=drop_rate_3,
                                 training=training,
                                 seed=seed + 3,
                                 name="dropout_3")
    layers.append(("dropout_3", dropout3))

    flat = flatten(dropout3, name="flatten")
    layers.append(("flatten", flat))

    # dense softmax
    dense1 = dense(flat,
                   n_units=10,
                   kernel_init=Kumar_normal(activation=None,
                                            mode="FAN_IN",
                                            seed=seed + 4),
                   name="dense_1")
    layers.append(("logit", dense1))
    prob = tf.nn.softmax(dense1, name="prob")
    layers.append(("prob", prob))

    return layers, variables
Example #20
0
def cifar10_resnet_bottleneck_wd(x, n_blocks=3, weight_decay=0.0001, seed=42):
    layers = []
    variables = []

    training = tf.placeholder(tf.bool, name="training")
    variables.append(("training", training))

    conv = conv2d_bn_act(
        x,
        size=3,
        n_filters=16,
        activation=tf.nn.relu,
        is_training=training,
        regularizer=tf.contrib.layers.l2_regularizer(weight_decay),
        kernel_init=He_normal(seed=seed + 1),
        name="initial_conv")
    layers.append(("initial_conv", conv))

    res1 = resnet.residual_layer(
        conv,
        n_filters=16,
        n_blocks=n_blocks,
        stride=1,
        block_function=partial(
            resnet.residual_block,
            regularizer=tf.contrib.layers.l2_regularizer(weight_decay)),
        is_training=training,
        kernel_init=He_normal(seed=seed + 2),
        name="residual_1")
    layers.append(("residual_1", res1))

    res2 = resnet.residual_layer(
        res1,
        n_filters=32,
        n_blocks=n_blocks,
        stride=2,
        block_function=partial(
            resnet.residual_block,
            regularizer=tf.contrib.layers.l2_regularizer(weight_decay)),
        is_training=training,
        kernel_init=He_normal(seed=seed + 3),
        name="residual_2")
    layers.append(("residual_2", res2))

    res3 = resnet.residual_layer(
        res2,
        n_filters=64,
        n_blocks=n_blocks,
        stride=2,
        block_function=partial(
            resnet.bottleneck_block,
            n_filters_reduce=16,
            regularizer=tf.contrib.layers.l2_regularizer(weight_decay)),
        is_training=training,
        kernel_init=He_normal(seed=seed + 4),
        name="residual_3")
    layers.append(("residual_3", res3))

    pool = global_avg_pool2d(res3)
    layers.append(("pool", pool))

    dense1 = dense(pool,
                   n_units=10,
                   regularizer=tf.contrib.layers.l2_regularizer(weight_decay),
                   kernel_init=Kumar_normal(activation=None,
                                            mode="FAN_IN",
                                            seed=seed + 5),
                   name="dense_1")
    layers.append(("logit", dense1))

    prob = tf.nn.softmax(dense1, name="prob")
    layers.append(("prob", prob))

    return layers, variables
def cifar10_mobilenet(x, seed=42):
    layers = []
    variables = []

    training = tf.placeholder(tf.bool, name="training")
    variables.append(("training", training))

    conv = conv2d_bn_relu(x,
                          size=3,
                          n_filters=32,
                          kernel_init=He_normal(seed=seed + 1),
                          is_training=training,
                          name="initial_conv")
    layers.append(("initial_conv", conv))

    mblock1 = mobilenet.mobilenet_block(conv,
                                        n_filters=64,
                                        stride=1,
                                        kernel_init=He_normal(seed=seed + 2),
                                        is_training=training,
                                        name="mobilenet_block_1")
    layers.append(("mobilenet_block_1", mblock1))

    # 16x16
    mblock2 = mobilenet.mobilenet_block(mblock1,
                                        n_filters=128,
                                        stride=2,
                                        kernel_init=He_normal(seed=seed + 3),
                                        is_training=training,
                                        name="mobilenet_block_2")
    layers.append(("mobilenet_block_2", mblock2))

    mblock3 = mobilenet.mobilenet_block(mblock2,
                                        n_filters=128,
                                        stride=1,
                                        kernel_init=He_normal(seed=seed + 4),
                                        is_training=training,
                                        name="mobilenet_block_3")
    layers.append(("mobilenet_block_3", mblock3))

    # 8x8
    mblock4 = mobilenet.mobilenet_block(mblock3,
                                        n_filters=256,
                                        stride=2,
                                        kernel_init=He_normal(seed=seed + 5),
                                        is_training=training,
                                        name="mobilenet_block_4")
    layers.append(("mobilenet_block_4", mblock4))

    mblock5 = mobilenet.mobilenet_block(mblock4,
                                        n_filters=256,
                                        stride=1,
                                        kernel_init=He_normal(seed=seed + 6),
                                        is_training=training,
                                        name="mobilenet_block_5")
    layers.append(("mobilenet_block_5", mblock5))

    # 4x4
    mblock6 = mobilenet.mobilenet_block(mblock5,
                                        n_filters=512,
                                        stride=2,
                                        kernel_init=He_normal(seed=seed + 7),
                                        is_training=training,
                                        name="mobilenet_block_6")
    layers.append(("mobilenet_block_6", mblock6))

    mblock7 = mobilenet.mobilenet_block(mblock6,
                                        n_filters=512,
                                        stride=1,
                                        kernel_init=He_normal(seed=seed + 8),
                                        is_training=training,
                                        name="mobilenet_block_7")
    layers.append(("mobilenet_block_7", mblock7))

    mblock8 = mobilenet.mobilenet_block(mblock7,
                                        n_filters=512,
                                        stride=1,
                                        kernel_init=He_normal(seed=seed + 9),
                                        is_training=training,
                                        name="mobilenet_block_8")
    layers.append(("mobilenet_block_8", mblock8))

    mblock9 = mobilenet.mobilenet_block(mblock8,
                                        n_filters=512,
                                        stride=1,
                                        kernel_init=He_normal(seed=seed + 10),
                                        is_training=training,
                                        name="mobilenet_block_9")
    layers.append(("mobilenet_block_9", mblock9))

    mblock10 = mobilenet.mobilenet_block(mblock9,
                                         n_filters=512,
                                         stride=1,
                                         kernel_init=He_normal(seed=seed + 11),
                                         is_training=training,
                                         name="mobilenet_block_10")
    layers.append(("mobilenet_block_10", mblock10))

    mblock11 = mobilenet.mobilenet_block(mblock10,
                                         n_filters=512,
                                         stride=1,
                                         kernel_init=He_normal(seed=seed + 12),
                                         is_training=training,
                                         name="mobilenet_block_11")
    layers.append(("mobilenet_block_11", mblock11))

    # 2x2
    mblock12 = mobilenet.mobilenet_block(mblock11,
                                         n_filters=1024,
                                         stride=2,
                                         kernel_init=He_normal(seed=seed + 13),
                                         is_training=training,
                                         name="mobilenet_block_12")
    layers.append(("mobilenet_block_12", mblock12))

    mblock13 = mobilenet.mobilenet_block(mblock12,
                                         n_filters=1024,
                                         stride=1,
                                         kernel_init=He_normal(seed=seed + 14),
                                         is_training=training,
                                         name="mobilenet_block_13")
    layers.append(("mobilenet_block_13", mblock13))

    pool = global_avg_pool2d(mblock13)
    layers.append(("pool", pool))

    dense1 = dense(pool,
                   n_units=10,
                   kernel_init=Kumar_normal(activation=None,
                                            mode="FAN_IN",
                                            seed=seed + 15),
                   name="dense1")
    layers.append(("logit", dense1))

    prob = tf.nn.softmax(dense1, name="prob")
    layers.append(("prob", prob))

    return layers, variables