Ejemplo n.º 1
0
def cifar10_sequential_c3d_selu(x, seed=42):
    layers = []
    variables = []

    training = tf.placeholder(tf.bool, name="training")
    variables.append(("training", training))

    # conv5x5 selu + pool
    conv1 = conv2d_selu(x,
                        size=5,
                        n_filters=32,
                        kernel_init=conv_selu_safe_initializer(x,
                                                               5,
                                                               seed=seed + 1),
                        name="conv_1")
    layers.append(("conv_1", conv1))
    pool1 = max_pool2d(conv1, size=3, stride=2, name="pool_1")
    layers.append(("pool_1", pool1))

    # conv5x5 selu + pool
    conv2 = conv2d_selu(pool1,
                        size=5,
                        n_filters=64,
                        kernel_init=conv_selu_safe_initializer(pool1,
                                                               5,
                                                               seed=seed + 2),
                        name="conv_2")
    layers.append(("conv_2", conv2))
    pool2 = max_pool2d(conv2, size=3, stride=2, name="pool_2")
    layers.append(("pool_2", pool2))

    # conv3x3 selu + pool
    conv3 = conv2d_selu(pool2,
                        size=3,
                        n_filters=128,
                        kernel_init=conv_selu_safe_initializer(pool2,
                                                               3,
                                                               seed=seed + 3),
                        name="conv_3")
    layers.append(("conv_3", conv3))
    pool3 = max_pool2d(conv3, size=2, stride=2, name="pool_3")
    layers.append(("pool_3", pool3))

    flat = flatten(pool3, name="flatten")
    layers.append(("flatten", flat))

    # dense softmax
    dense1 = dense(flat,
                   n_units=10,
                   kernel_init=dense_selu_safe_initializer(flat,
                                                           seed=seed + 4),
                   name="dense_1")
    layers.append(("logit", dense1))
    prob = tf.nn.softmax(dense1, name="prob")
    layers.append(("prob", prob))

    return layers, variables
Ejemplo n.º 2
0
def exit_module(inputs,
                size=3,
                n_filters_1=[728, 1024],
                n_filters_2=[1536, 2048],
                pool_size=3,
                is_training=False,
                regularizer=None,
                kernel_init=He_normal(seed=42),
                bias_init=tf.zeros_initializer(),
                name="xception_exit_module"):
    with tf.variable_scope(name):
        shortcut = conv2d_bn(inputs,
                             size=1,
                             n_filters=n_filters_1[-1],
                             stride=2,
                             is_training=is_training,
                             regularizer=regularizer,
                             kernel_init=kernel_init,
                             bias_init=bias_init,
                             name="shortcut")

        x = inputs
        for r in range(len(n_filters_1)):
            x = tf.nn.relu(x, name="relu_1_" + str(r))
            x = separable_conv2d(x,
                                 size=size,
                                 n_filters=n_filters_1[r],
                                 stride=1,
                                 depth_multiplier=1,
                                 regularizer=regularizer,
                                 depth_init=kernel_init,
                                 pointwise_init=kernel_init,
                                 bias_init=bias_init,
                                 name="separable_conv_1_" + str(r))
            x = tf.layers.batch_normalization(x,
                                              training=is_training,
                                              name="bn_1_" + str(r))
        x = max_pool2d(x, size=pool_size, stride=2, name="max_pool")
        x = tf.add(x, shortcut, name="add_1")

        for r in range(len(n_filters_2)):
            x = separable_conv2d(x,
                                 size=size,
                                 n_filters=n_filters_2[r],
                                 stride=1,
                                 depth_multiplier=1,
                                 regularizer=regularizer,
                                 depth_init=kernel_init,
                                 pointwise_init=kernel_init,
                                 bias_init=bias_init,
                                 name="separable_conv_2_" + str(r))
            x = tf.layers.batch_normalization(x,
                                              training=is_training,
                                              name="bn_2_" + str(r))
            x = tf.nn.relu(x, name="relu_2_" + str(r))
    return x
Ejemplo n.º 3
0
def entry_block(inputs,
                n_filters,
                n_repeat=2,
                conv_size=3,
                pool_size=3,
                init_activation=tf.nn.relu,
                regularizer=None,
                kernel_init=He_normal(seed=42),
                bias_init=tf.zeros_initializer(),
                is_training=False,
                name="xception_entry_block"):

    with tf.variable_scope(name):
        shortcut = conv2d_bn(inputs,
                             size=1,
                             n_filters=n_filters,
                             stride=2,
                             is_training=is_training,
                             regularizer=regularizer,
                             kernel_init=kernel_init,
                             bias_init=bias_init,
                             name="shortcut")
        x = inputs
        for r in range(n_repeat):
            if r == 0:
                activation = init_activation
            else:
                activation = tf.nn.relu
            if activation is not None:
                x = activation(x)
            x = separable_conv2d(x,
                                 size=conv_size,
                                 n_filters=n_filters,
                                 stride=1,
                                 depth_multiplier=1,
                                 regularizer=regularizer,
                                 depth_init=kernel_init,
                                 pointwise_init=kernel_init,
                                 bias_init=bias_init,
                                 name="separable_conv_" + str(r))
            x = tf.layers.batch_normalization(x,
                                              training=is_training,
                                              name="bn_" + str(r))
        x = max_pool2d(x, size=pool_size, stride=2, name="max_pool")
        outputs = tf.add(x, shortcut, name="add")
    return outputs
Ejemplo n.º 4
0
def mnist_siamese_base(x, training=False, weight_decay=0.0001, seed=42):
    layers = []
    variables = []

    conv1 = conv2d_relu(
        x,
        size=7,
        n_filters=32,
        regularizer=tf.contrib.layers.l2_regularizer(weight_decay),
        kernel_init=He_normal(seed=seed + 1),
        name="conv1")
    layers.append(("conv1", conv1))

    pool1 = max_pool2d(conv1, size=2, stride=2, name="pool1")
    layers.append(("pool1", pool1))  # 14x14

    conv2 = conv2d_relu(
        pool1,
        size=5,
        n_filters=64,
        regularizer=tf.contrib.layers.l2_regularizer(weight_decay),
        kernel_init=He_normal(seed=seed + 2),
        name="conv2")
    layers.append(("conv2", conv2))

    pool2 = max_pool2d(conv2, size=2, stride=2, name="pool2")
    layers.append(("pool2", pool2))  # 7x7

    conv3 = conv2d_relu(
        pool2,
        size=3,
        n_filters=128,
        regularizer=tf.contrib.layers.l2_regularizer(weight_decay),
        kernel_init=He_normal(seed=seed + 3),
        name="conv3")
    layers.append(("conv3", conv3))

    pool3 = max_pool2d(conv3, size=2, stride=2, name="pool3")
    layers.append(("pool3", pool3))  # 4x4

    conv4 = conv2d_relu(
        pool3,
        size=1,
        n_filters=256,
        regularizer=tf.contrib.layers.l2_regularizer(weight_decay),
        kernel_init=He_normal(seed=seed + 4),
        name="conv4")
    layers.append(("conv4", conv4))

    pool4 = max_pool2d(conv4, size=2, stride=2, name="pool4")
    layers.append(("pool4", pool4))  # 2x2

    conv5 = conv2d(pool4,
                   size=1,
                   n_filters=2,
                   kernel_init=He_normal(seed=seed + 5),
                   name="conv5")
    layers.append(("conv5", conv5))

    pool5 = max_pool2d(conv5, size=2, stride=2, name="pool5")
    layers.append(("pool5", pool5))  # 1x1

    flat1 = flatten(pool5, name="flatten1")
    layers.append(("output", flat1))

    return layers, variables
Ejemplo n.º 5
0
def cifar10_sequential_c5d3_selu_drop_wd(x,
                                         drop_rate=0.05,
                                         weight_decay=0.001,
                                         seed=42):
    layers = []
    variables = []

    training = tf.placeholder(tf.bool, name="training")
    variables.append(("training", training))

    # 2x conv3x3 selu + pool
    conv1 = conv2d_selu(
        x,
        size=5,
        n_filters=64,
        regularizer=tf.contrib.layers.l2_regularizer(weight_decay),
        kernel_init=conv_selu_safe_initializer(x, 5, seed=seed + 1),
        name="conv_1")
    layers.append(("conv_1", conv1))
    pool1 = max_pool2d(conv1, size=3, stride=2, name="pool_1")
    layers.append(("pool_1", pool1))

    # 2x conv3x3 selu + pool
    conv2 = conv2d_selu(
        pool1,
        size=5,
        n_filters=64,
        regularizer=tf.contrib.layers.l2_regularizer(weight_decay),
        kernel_init=conv_selu_safe_initializer(pool1, 5, seed=seed + 2),
        name="conv_2")
    layers.append(("conv_2", conv2))
    pool2 = max_pool2d(conv2, size=3, stride=2, name="pool_2")
    layers.append(("pool_2", pool2))

    # 2x conv3x3 selu + pool
    conv3 = conv2d_selu(
        pool2,
        size=3,
        n_filters=128,
        regularizer=tf.contrib.layers.l2_regularizer(weight_decay),
        kernel_init=conv_selu_safe_initializer(pool2, 3, seed=seed + 3),
        name="conv_3")
    layers.append(("conv_3", conv3))
    conv4 = conv2d_selu(
        conv3,
        size=3,
        n_filters=128,
        regularizer=tf.contrib.layers.l2_regularizer(weight_decay),
        kernel_init=conv_selu_safe_initializer(conv3, 3, seed=seed + 4),
        name="conv_4")
    layers.append(("conv_4", conv4))
    conv5 = conv2d_selu(
        conv4,
        size=3,
        n_filters=128,
        regularizer=tf.contrib.layers.l2_regularizer(weight_decay),
        kernel_init=conv_selu_safe_initializer(conv4, 3, seed=seed + 5),
        name="conv_5")
    layers.append(("conv_5", conv5))
    pool3 = max_pool2d(conv5, size=2, stride=2, name="pool_3")
    layers.append(("pool_3", pool3))

    flat = flatten(pool3, name="flatten")
    layers.append(("flatten", flat))

    dense1 = dense_selu(
        flat,
        n_units=384,
        regularizer=tf.contrib.layers.l2_regularizer(weight_decay),
        kernel_init=dense_selu_safe_initializer(flat, seed=seed + 6),
        name="dense_1")
    layers.append(("dense_1", dense1))

    if drop_rate > 0.0:
        dense1 = dropout_selu(dense1,
                              rate=drop_rate,
                              training=training,
                              seed=seed + 7)

    dense2 = dense_selu(
        dense1,
        n_units=192,
        regularizer=tf.contrib.layers.l2_regularizer(weight_decay),
        kernel_init=dense_selu_safe_initializer(dense1, seed=seed + 8),
        name="dense_2")
    layers.append(("dense_2", dense2))

    if drop_rate > 0.0:
        dense2 = dropout_selu(dense2,
                              rate=drop_rate,
                              training=training,
                              seed=seed + 9)

    # dense softmax
    dense3 = dense(dense2,
                   n_units=10,
                   regularizer=tf.contrib.layers.l2_regularizer(weight_decay),
                   kernel_init=dense_selu_safe_initializer(dense2,
                                                           seed=seed + 4),
                   name="dense_3")
    layers.append(("logit", dense3))
    prob = tf.nn.softmax(dense3, name="prob")
    layers.append(("prob", prob))

    return layers, variables
Ejemplo n.º 6
0
def cifar10_sequential_clrn5d3_wd(x,
                                  drop_rate=0.5,
                                  weight_decay=0.001,
                                  seed=42):
    layers = []
    variables = []

    training = tf.placeholder(tf.bool, name="training")
    variables.append(("training", training))

    # 2x conv3x3 selu + pool
    conv1 = conv2d_relu(
        x,
        size=5,
        n_filters=64,
        regularizer=tf.contrib.layers.l2_regularizer(weight_decay),
        kernel_init=He_normal(seed=seed + 1),
        name="conv_1")
    layers.append(("conv_1", conv1))
    norm1 = tf.nn.lrn(conv1,
                      depth_radius=4,
                      bias=1.0,
                      alpha=0.001 / 9.0,
                      beta=0.75,
                      name="norm_1")
    layers.append(("norm_1", norm1))
    pool1 = max_pool2d(norm1, size=3, stride=2, name="pool_1")
    layers.append(("pool_1", pool1))

    # 2x conv3x3 selu + pool
    conv2 = conv2d_relu(
        pool1,
        size=5,
        n_filters=64,
        regularizer=tf.contrib.layers.l2_regularizer(weight_decay),
        kernel_init=He_normal(seed=seed + 2),
        name="conv_2")
    layers.append(("conv_2", conv2))
    norm2 = tf.nn.lrn(conv2,
                      depth_radius=4,
                      bias=1.0,
                      alpha=0.001 / 9.0,
                      beta=0.75,
                      name="norm_2")
    layers.append(("norm_2", norm2))
    pool2 = max_pool2d(norm2, size=3, stride=2, name="pool_2")
    layers.append(("pool_2", pool2))

    # 2x conv3x3 selu + pool
    conv3 = conv2d_relu(
        pool2,
        size=3,
        n_filters=128,
        regularizer=tf.contrib.layers.l2_regularizer(weight_decay),
        kernel_init=He_normal(seed=seed + 3),
        name="conv_3")
    layers.append(("conv_3", conv3))
    norm3 = tf.nn.lrn(conv3,
                      depth_radius=4,
                      bias=1.0,
                      alpha=0.001 / 9.0,
                      beta=0.75,
                      name="norm_3")
    layers.append(("norm_3", norm3))
    conv4 = conv2d_relu(
        norm3,
        size=3,
        n_filters=128,
        regularizer=tf.contrib.layers.l2_regularizer(weight_decay),
        kernel_init=He_normal(seed=seed + 4),
        name="conv_4")
    layers.append(("conv_4", conv4))
    norm4 = tf.nn.lrn(conv4,
                      depth_radius=4,
                      bias=1.0,
                      alpha=0.001 / 9.0,
                      beta=0.75,
                      name="norm_1")
    layers.append(("norm_4", norm4))
    conv5 = conv2d_relu(
        norm4,
        size=3,
        n_filters=128,
        regularizer=tf.contrib.layers.l2_regularizer(weight_decay),
        kernel_init=He_normal(seed=seed + 5),
        name="conv_5")
    layers.append(("conv_5", conv5))
    norm5 = tf.nn.lrn(conv5,
                      depth_radius=4,
                      bias=1.0,
                      alpha=0.001 / 9.0,
                      beta=0.75,
                      name="norm_5")
    layers.append(("norm_5", norm5))

    pool3 = max_pool2d(norm5, size=2, stride=2, name="pool_3")
    layers.append(("pool_3", pool3))

    flat = flatten(pool3, name="flatten")
    layers.append(("flatten", flat))

    dense1 = dense_relu(
        flat,
        n_units=384,
        regularizer=tf.contrib.layers.l2_regularizer(weight_decay),
        kernel_init=He_normal(seed=seed + 6),
        name="dense_1")
    layers.append(("dense_1", dense1))

    if drop_rate > 0.0:
        dense1 = tf.layers.dropout(dense1,
                                   rate=drop_rate,
                                   training=training,
                                   seed=seed + 6,
                                   name="dropout_1")

    dense2 = dense_relu(
        dense1,
        n_units=192,
        regularizer=tf.contrib.layers.l2_regularizer(weight_decay),
        kernel_init=He_normal(seed=seed + 7),
        name="dense_2")
    layers.append(("dense_2", dense2))

    if drop_rate > 0.0:
        dense2 = tf.layers.dropout(dense2,
                                   rate=drop_rate,
                                   training=training,
                                   seed=seed + 7,
                                   name="dropout_2")

    # dense softmax
    dense3 = dense(dense2,
                   n_units=10,
                   regularizer=tf.contrib.layers.l2_regularizer(weight_decay),
                   kernel_init=Kumar_normal(activation=None,
                                            mode="FAN_IN",
                                            seed=seed + 8),
                   name="dense_3")
    layers.append(("logit", dense3))
    prob = tf.nn.softmax(dense3, name="prob")
    layers.append(("prob", prob))

    return layers, variables
Ejemplo n.º 7
0
def cifar10_sequential_cbn6d_wd(x,
                                drop_rate_1=0.2,
                                drop_rate_2=0.3,
                                drop_rate_3=0.4,
                                weight_decay=0.0001,
                                seed=42):
    layers = []
    variables = []

    training = tf.placeholder(tf.bool, name="training")
    variables.append(("training", training))

    # 2x conv3x3 relu batch-norm + pool
    conv1 = conv2d_relu_bn(
        x,
        size=3,
        n_filters=32,
        is_training=training,
        regularizer=tf.contrib.layers.l2_regularizer(weight_decay),
        kernel_init=He_normal(seed=seed + 1),
        name="conv_1")
    layers.append(("conv_1", conv1))
    conv2 = conv2d_relu_bn(
        conv1,
        size=3,
        n_filters=32,
        is_training=training,
        regularizer=tf.contrib.layers.l2_regularizer(weight_decay),
        kernel_init=He_normal(seed=seed + 2),
        name="conv_2")
    layers.append(("conv_2", conv2))
    pool1 = max_pool2d(conv2, size=2, stride=2, name="pool_1")
    layers.append(("pool_1", pool1))
    dropout1 = tf.layers.dropout(pool1,
                                 rate=drop_rate_1,
                                 training=training,
                                 seed=seed + 2,
                                 name="dropout_1")
    layers.append(("dropout_1", dropout1))

    # 2x conv3x3 relu batch-norm + pool
    conv3 = conv2d_relu_bn(
        dropout1,
        size=3,
        n_filters=64,
        is_training=training,
        regularizer=tf.contrib.layers.l2_regularizer(weight_decay),
        kernel_init=He_normal(seed=seed + 3),
        name="conv_3")
    layers.append(("conv_3", conv3))
    conv4 = conv2d_relu_bn(
        conv3,
        size=3,
        n_filters=64,
        is_training=training,
        regularizer=tf.contrib.layers.l2_regularizer(weight_decay),
        kernel_init=He_normal(seed=seed + 4),
        name="conv_4")
    layers.append(("conv_4", conv4))
    pool2 = max_pool2d(conv4, size=2, stride=2, name="pool_2")
    layers.append(("pool_2", pool2))
    dropout2 = tf.layers.dropout(pool2,
                                 rate=drop_rate_2,
                                 training=training,
                                 seed=seed + 4,
                                 name="dropout_2")
    layers.append(("dropout_2", dropout2))

    # 2x conv3x3 relu batch-norm + pool
    conv5 = conv2d_relu_bn(
        dropout2,
        size=3,
        n_filters=128,
        is_training=training,
        regularizer=tf.contrib.layers.l2_regularizer(weight_decay),
        kernel_init=He_normal(seed=seed + 5),
        name="conv_5")
    layers.append(("conv_5", conv5))
    conv6 = conv2d_relu_bn(
        conv5,
        size=3,
        n_filters=128,
        is_training=training,
        regularizer=tf.contrib.layers.l2_regularizer(weight_decay),
        kernel_init=He_normal(seed=seed + 6),
        name="conv_6")
    layers.append(("conv_6", conv6))
    pool3 = max_pool2d(conv6, size=2, stride=2, name="pool_3")
    layers.append(("pool_3", pool3))
    dropout3 = tf.layers.dropout(pool3,
                                 rate=drop_rate_3,
                                 training=training,
                                 seed=seed + 6,
                                 name="dropout_3")
    layers.append(("dropout_3", dropout3))

    flat = flatten(dropout3, name="flatten")
    layers.append(("flatten", flat))

    # dense softmax
    dense1 = dense(flat,
                   n_units=10,
                   regularizer=tf.contrib.layers.l2_regularizer(weight_decay),
                   kernel_init=Kumar_normal(activation=None,
                                            mode="FAN_IN",
                                            seed=seed + 7),
                   name="dense_1")
    layers.append(("logit", dense1))
    prob = tf.nn.softmax(dense1, name="prob")
    layers.append(("prob", prob))

    return layers, variables
Ejemplo n.º 8
0
def cifar10_sequential_cbn3d(x,
                             drop_rate_1=0.2,
                             drop_rate_2=0.3,
                             drop_rate_3=0.4,
                             seed=42):
    layers = []
    variables = []

    training = tf.placeholder(tf.bool, name="training")
    variables.append(("training", training))

    # conv5x5 batch-norm relu + pool
    conv1 = conv2d_bn_relu(x,
                           size=5,
                           n_filters=32,
                           is_training=training,
                           kernel_init=He_normal(seed=seed + 1),
                           name="conv_1")
    layers.append(("conv_1", conv1))
    pool1 = max_pool2d(conv1, size=3, stride=2, name="pool_1")
    layers.append(("pool_1", pool1))
    dropout1 = tf.layers.dropout(pool1,
                                 rate=drop_rate_1,
                                 training=training,
                                 seed=seed + 1,
                                 name="dropout_1")
    layers.append(("dropout_1", dropout1))

    # conv5x5 batch-norm relu + pool
    conv2 = conv2d_bn_relu(dropout1,
                           size=5,
                           n_filters=64,
                           is_training=training,
                           kernel_init=He_normal(seed=seed + 2),
                           name="conv_2")
    layers.append(("conv_2", conv2))
    pool2 = max_pool2d(conv2, size=3, stride=2, name="pool_2")
    layers.append(("pool_2", pool2))
    dropout2 = tf.layers.dropout(pool2,
                                 rate=drop_rate_2,
                                 training=training,
                                 seed=seed + 2,
                                 name="dropout_2")
    layers.append(("dropout_2", dropout2))

    # conv3x3 batch-norm relu + pool
    conv3 = conv2d_bn_relu(dropout2,
                           size=3,
                           n_filters=128,
                           is_training=training,
                           kernel_init=He_normal(seed=seed + 3),
                           name="conv_3")
    layers.append(("conv_3", conv3))
    pool3 = max_pool2d(conv3, size=2, stride=2, name="pool_3")
    layers.append(("pool_3", pool3))
    dropout3 = tf.layers.dropout(pool3,
                                 rate=drop_rate_3,
                                 training=training,
                                 seed=seed + 3,
                                 name="dropout_3")
    layers.append(("dropout_3", dropout3))

    flat = flatten(dropout3, name="flatten")
    layers.append(("flatten", flat))

    # dense softmax
    dense1 = dense(flat,
                   n_units=10,
                   kernel_init=Kumar_normal(activation=None,
                                            mode="FAN_IN",
                                            seed=seed + 4),
                   name="dense_1")
    layers.append(("logit", dense1))
    prob = tf.nn.softmax(dense1, name="prob")
    layers.append(("prob", prob))

    return layers, variables
Ejemplo n.º 9
0
def cifar10_sequential_clrn3d(x,
                              drop_rate_1=0.2,
                              drop_rate_2=0.3,
                              drop_rate_3=0.4,
                              seed=42):
    layers = []
    variables = []

    training = tf.placeholder(tf.bool, name="training")
    variables.append(("training", training))

    # conv5x5 relu + lrn + pool
    conv1 = conv2d_relu(x,
                        size=5,
                        n_filters=32,
                        kernel_init=tf.truncated_normal_initializer(
                            stddev=5e-2, seed=seed + 1),
                        name="conv_1")
    layers.append(("conv_1", conv1))
    norm1 = tf.nn.lrn(conv1,
                      depth_radius=4,
                      bias=1.0,
                      alpha=0.001 / 9.0,
                      beta=0.75,
                      name="norm_1")
    layers.append(("norm_1", norm1))
    pool1 = max_pool2d(norm1, size=3, stride=2, name="pool_1")
    layers.append(("pool_1", pool1))
    dropout1 = tf.layers.dropout(pool1,
                                 rate=drop_rate_1,
                                 training=training,
                                 seed=seed + 1,
                                 name="dropout_1")
    layers.append(("dropout_1", dropout1))

    # conv5x5 relu + lrn + pool
    conv2 = conv2d_relu(dropout1,
                        size=5,
                        n_filters=64,
                        kernel_init=tf.truncated_normal_initializer(
                            stddev=5e-2, seed=seed + 2),
                        name="conv_2")
    layers.append(("conv_2", conv2))
    norm2 = tf.nn.lrn(conv2,
                      depth_radius=4,
                      bias=1.0,
                      alpha=0.001 / 9.0,
                      beta=0.75,
                      name="norm_2")
    layers.append(("norm_2", norm2))
    pool2 = max_pool2d(norm2, size=3, stride=2, name="pool_2")
    layers.append(("pool_2", pool2))
    dropout2 = tf.layers.dropout(pool2,
                                 rate=drop_rate_2,
                                 training=training,
                                 seed=seed + 2,
                                 name="dropout_2")
    layers.append(("dropout_2", dropout2))

    # conv3x3 relu + lrn + pool
    conv3 = conv2d_relu(dropout2,
                        size=3,
                        n_filters=128,
                        kernel_init=tf.truncated_normal_initializer(
                            stddev=5e-2, seed=seed + 3),
                        name="conv_3")
    layers.append(("conv_3", conv3))
    norm3 = tf.nn.lrn(conv3,
                      depth_radius=4,
                      bias=1.0,
                      alpha=0.001 / 9.0,
                      beta=0.75,
                      name="norm_3")
    layers.append(("norm_3", norm3))
    pool3 = max_pool2d(norm3, size=2, stride=2, name="pool_3")
    layers.append(("pool_3", pool3))
    dropout3 = tf.layers.dropout(pool3,
                                 rate=drop_rate_3,
                                 training=training,
                                 seed=seed + 3,
                                 name="dropout_3")
    layers.append(("dropout_3", dropout3))

    flat = flatten(dropout3, name="flatten")
    layers.append(("flatten", flat))

    # dense softmax
    dense1 = dense(flat,
                   n_units=10,
                   kernel_init=tf.truncated_normal_initializer(stddev=1 /
                                                               192.0,
                                                               seed=seed + 4),
                   name="dense_1")
    layers.append(("logit", dense1))
    prob = tf.nn.softmax(dense1, name="prob")
    layers.append(("prob", prob))

    return layers, variables
Ejemplo n.º 10
0
def Reduction_A(inputs,
                p,
                n_filters,
                is_training=False,
                regularizer=None,
                activation=tf.nn.relu,
                kernel_init=He_normal(seed=42),
                bias_init=tf.zeros_initializer(),
                name="nasnet_normal_a"):

    with tf.variable_scope(name):
        # adjust to the reduction
        p = adjust(p, ref=inputs, n_filters=n_filters, name="adjust")

        # squeeze inputs to match dimensions
        h = squeeze(inputs,
                    n_filters=n_filters,
                    is_training=is_training,
                    regularizer=regularizer,
                    kernel_init=kernel_init,
                    bias_init=bias_init,
                    name="squeeze")

        with tf.variable_scope("block_1"):
            x1_1 = separable_conv2d(h,
                                    size=5,
                                    n_filters=n_filters,
                                    stride=2,
                                    regularizer=regularizer,
                                    depth_init=kernel_init,
                                    pointwise_init=kernel_init,
                                    bias_init=bias_init,
                                    name="separable_conv_1_5x5")
            x1_2 = separable_conv2d(p,
                                    size=7,
                                    n_filters=n_filters,
                                    stride=2,
                                    regularizer=regularizer,
                                    depth_init=kernel_init,
                                    pointwise_init=kernel_init,
                                    bias_init=bias_init,
                                    name="separable_conv_1_7x7")
            x1 = tf.add(x1_1, x1_2, name="add_1")

        with tf.variable_scope("block_2"):
            x2_1 = max_pool2d(h, size=3, stride=2, name="max_pool_2")
            x2_2 = separable_conv2d(p,
                                    size=7,
                                    n_filters=n_filters,
                                    stride=2,
                                    regularizer=regularizer,
                                    depth_init=kernel_init,
                                    pointwise_init=kernel_init,
                                    bias_init=bias_init,
                                    name="separable_conv_2")
            x2 = tf.add(x2_1, x2_2, name="add_2")

        with tf.variable_scope("block_3"):
            x3_1 = avg_pool2d(h, size=3, stride=2, name="avg_pool_3")
            x3_2 = separable_conv2d(p,
                                    size=5,
                                    n_filters=n_filters,
                                    stride=2,
                                    regularizer=regularizer,
                                    depth_init=kernel_init,
                                    pointwise_init=kernel_init,
                                    bias_init=bias_init,
                                    name="separable_conv_3")
            x3 = tf.add(x3_1, x3_2, name="add_3")

        with tf.variable_scope("block_4"):
            x4_1 = max_pool2d(h, size=3, stride=2, name="max_pool_4")
            x4_2 = separable_conv2d(x1,
                                    size=3,
                                    n_filters=n_filters,
                                    stride=1,
                                    regularizer=regularizer,
                                    depth_init=kernel_init,
                                    pointwise_init=kernel_init,
                                    bias_init=bias_init,
                                    name="separable_conv_4")
            x4 = tf.add(x4_1, x4_2, name="add_4")

        with tf.variable_scope("block_5"):
            x5 = avg_pool2d(x1, size=3, stride=1, name="avg_pool_5")
            x5 = tf.add(x2, x5, name="add_5")

        outputs = tf.concat([x2, x3, x4, x5], axis=3, name="concat")
    return outputs, inputs