コード例 #1
0
def cifar10_sequential_c3d_selu(x, seed=42):
    layers = []
    variables = []

    training = tf.placeholder(tf.bool, name="training")
    variables.append(("training", training))

    # conv5x5 selu + pool
    conv1 = conv2d_selu(x,
                        size=5,
                        n_filters=32,
                        kernel_init=conv_selu_safe_initializer(x,
                                                               5,
                                                               seed=seed + 1),
                        name="conv_1")
    layers.append(("conv_1", conv1))
    pool1 = max_pool2d(conv1, size=3, stride=2, name="pool_1")
    layers.append(("pool_1", pool1))

    # conv5x5 selu + pool
    conv2 = conv2d_selu(pool1,
                        size=5,
                        n_filters=64,
                        kernel_init=conv_selu_safe_initializer(pool1,
                                                               5,
                                                               seed=seed + 2),
                        name="conv_2")
    layers.append(("conv_2", conv2))
    pool2 = max_pool2d(conv2, size=3, stride=2, name="pool_2")
    layers.append(("pool_2", pool2))

    # conv3x3 selu + pool
    conv3 = conv2d_selu(pool2,
                        size=3,
                        n_filters=128,
                        kernel_init=conv_selu_safe_initializer(pool2,
                                                               3,
                                                               seed=seed + 3),
                        name="conv_3")
    layers.append(("conv_3", conv3))
    pool3 = max_pool2d(conv3, size=2, stride=2, name="pool_3")
    layers.append(("pool_3", pool3))

    flat = flatten(pool3, name="flatten")
    layers.append(("flatten", flat))

    # dense softmax
    dense1 = dense(flat,
                   n_units=10,
                   kernel_init=dense_selu_safe_initializer(flat,
                                                           seed=seed + 4),
                   name="dense_1")
    layers.append(("logit", dense1))
    prob = tf.nn.softmax(dense1, name="prob")
    layers.append(("prob", prob))

    return layers, variables
コード例 #2
0
ファイル: graph.py プロジェクト: autasi/tensorflow_examples
def mnist_sequential_dbn2d1(x, drop_rate=0.5):
    """Creates sequential neural network for MNIST. The network contains 2 
        batch-normalized fully-connected dense layers. Dropout layers are used 
        for regularization. The output  probabilities are generated by one 
        dense layer followed by a softmax function.
    Args:
        x: A tensor representing the input.
    Returns:
        A tuple containing the layers of the network graph and additional
        placeholders if any. Layers are represented as list of named tuples.
    """
    layers = []
    variables = []

    training = tf.placeholder(tf.bool, name="training")
    variables.append(("training", training))

    flat = flatten(x, name="flatten")
    layers.append(("flatten", flat))

    fc1 = dense_bn(flat, n_units=256, is_training=training, name="fc1")
    layers.append(("fc1", fc1))

    dropout1 = tf.layers.dropout(fc1,
                                 rate=drop_rate,
                                 training=training,
                                 seed=42,
                                 name="dropout1")
    layers.append(("dropout1", dropout1))

    fc2 = dense_bn(dropout1, n_units=64, is_training=training, name="fc2")
    layers.append(("fc2", fc2))

    dropout2 = tf.layers.dropout(fc2,
                                 rate=drop_rate,
                                 training=training,
                                 seed=42,
                                 name="dropout2")
    layers.append(("dropout2", dropout2))

    fc3 = dense(dropout2,
                n_units=10,
                activation=None,
                kernel_init=Kumar_initializer(activation=None),
                name="fc3")
    layers.append(("fc3", fc3))

    prob = tf.nn.softmax(fc2, name="prob")
    layers.append(("prob", prob))

    return layers, variables
コード例 #3
0
def auxiliary_classifier(inputs,
                         pool=avg_pool2d,
                         pool_size=5,
                         pool_stride=3,
                         n_filters_1x1=128,
                         n_units=1024,
                         drop_rate=0.7,
                         seed=42,
                         is_training=False,
                         regularizer_conv=None,
                         regularizer_dense=None,
                         kernel_init_conv=He_normal(seed=42),
                         kernel_init_dense=He_normal(seed=42),
                         name="inception_auxiliary_classifier"):

    with tf.variable_scope(name):
        # pool
        pool = pool(inputs,
                    size=pool_size,
                    stride=pool_stride,
                    padding="VALID",
                    name="pool")

        # 1x1
        x_1x1 = conv2d_relu(pool,
                            size=1,
                            n_filters=n_filters_1x1,
                            stride=1,
                            regularizer=regularizer_conv,
                            kernel_init=kernel_init_conv,
                            name="conv_1x1")

        # dense
        flat = flatten(x_1x1, name="flatten")
        dense = dense_relu(flat,
                           n_units=n_units,
                           regularizer=regularizer_dense,
                           kernel_init=kernel_init_dense,
                           name="dense")

        # dropout
        if drop_rate > 0.0:
            dense = tf.layers.dropout(dense,
                                      rate=drop_rate,
                                      training=is_training,
                                      seed=seed,
                                      name="dropout")

        return dense
コード例 #4
0
def auxiliary_classifier_bn(inputs,
                            pool=avg_pool2d,
                            pool_size=5,
                            pool_stride=3,
                            n_filters_1x1=128,
                            n_units=1024,
                            is_training=False,
                            regularizer_conv=None,
                            regularizer_dense=None,
                            kernel_init_conv=He_normal(seed=42),
                            kernel_init_dense=He_normal(seed=42),
                            name="inception_auxiliary_classifier_batchnorm"):

    with tf.variable_scope(name):
        # pool
        pool = pool(inputs,
                    size=pool_size,
                    stride=pool_stride,
                    padding="VALID",
                    name="pool")

        # 1x1
        x_1x1 = conv2d_bn_relu(inputs,
                               size=1,
                               n_filters=n_filters_1x1,
                               stride=1,
                               is_training=is_training,
                               regularizer=regularizer_conv,
                               kernel_init=kernel_init_conv,
                               name="conv_1x1")

        # dense
        flat = flatten(x_1x1, name="flatten")
        dense = dense_bn_relu(flat,
                              n_units=n_units,
                              is_training=is_training,
                              regularizer=regularizer_dense,
                              kernel_init=kernel_init_dense,
                              name="dense")

        return dense
コード例 #5
0
def mnist_siamese_base(x, training=False, weight_decay=0.0001, seed=42):
    layers = []
    variables = []

    conv1 = conv2d_relu(
        x,
        size=7,
        n_filters=32,
        regularizer=tf.contrib.layers.l2_regularizer(weight_decay),
        kernel_init=He_normal(seed=seed + 1),
        name="conv1")
    layers.append(("conv1", conv1))

    pool1 = max_pool2d(conv1, size=2, stride=2, name="pool1")
    layers.append(("pool1", pool1))  # 14x14

    conv2 = conv2d_relu(
        pool1,
        size=5,
        n_filters=64,
        regularizer=tf.contrib.layers.l2_regularizer(weight_decay),
        kernel_init=He_normal(seed=seed + 2),
        name="conv2")
    layers.append(("conv2", conv2))

    pool2 = max_pool2d(conv2, size=2, stride=2, name="pool2")
    layers.append(("pool2", pool2))  # 7x7

    conv3 = conv2d_relu(
        pool2,
        size=3,
        n_filters=128,
        regularizer=tf.contrib.layers.l2_regularizer(weight_decay),
        kernel_init=He_normal(seed=seed + 3),
        name="conv3")
    layers.append(("conv3", conv3))

    pool3 = max_pool2d(conv3, size=2, stride=2, name="pool3")
    layers.append(("pool3", pool3))  # 4x4

    conv4 = conv2d_relu(
        pool3,
        size=1,
        n_filters=256,
        regularizer=tf.contrib.layers.l2_regularizer(weight_decay),
        kernel_init=He_normal(seed=seed + 4),
        name="conv4")
    layers.append(("conv4", conv4))

    pool4 = max_pool2d(conv4, size=2, stride=2, name="pool4")
    layers.append(("pool4", pool4))  # 2x2

    conv5 = conv2d(pool4,
                   size=1,
                   n_filters=2,
                   kernel_init=He_normal(seed=seed + 5),
                   name="conv5")
    layers.append(("conv5", conv5))

    pool5 = max_pool2d(conv5, size=2, stride=2, name="pool5")
    layers.append(("pool5", pool5))  # 1x1

    flat1 = flatten(pool5, name="flatten1")
    layers.append(("output", flat1))

    return layers, variables
コード例 #6
0
def cifar10_sequential_c5d3_selu_drop_wd(x,
                                         drop_rate=0.05,
                                         weight_decay=0.001,
                                         seed=42):
    layers = []
    variables = []

    training = tf.placeholder(tf.bool, name="training")
    variables.append(("training", training))

    # 2x conv3x3 selu + pool
    conv1 = conv2d_selu(
        x,
        size=5,
        n_filters=64,
        regularizer=tf.contrib.layers.l2_regularizer(weight_decay),
        kernel_init=conv_selu_safe_initializer(x, 5, seed=seed + 1),
        name="conv_1")
    layers.append(("conv_1", conv1))
    pool1 = max_pool2d(conv1, size=3, stride=2, name="pool_1")
    layers.append(("pool_1", pool1))

    # 2x conv3x3 selu + pool
    conv2 = conv2d_selu(
        pool1,
        size=5,
        n_filters=64,
        regularizer=tf.contrib.layers.l2_regularizer(weight_decay),
        kernel_init=conv_selu_safe_initializer(pool1, 5, seed=seed + 2),
        name="conv_2")
    layers.append(("conv_2", conv2))
    pool2 = max_pool2d(conv2, size=3, stride=2, name="pool_2")
    layers.append(("pool_2", pool2))

    # 2x conv3x3 selu + pool
    conv3 = conv2d_selu(
        pool2,
        size=3,
        n_filters=128,
        regularizer=tf.contrib.layers.l2_regularizer(weight_decay),
        kernel_init=conv_selu_safe_initializer(pool2, 3, seed=seed + 3),
        name="conv_3")
    layers.append(("conv_3", conv3))
    conv4 = conv2d_selu(
        conv3,
        size=3,
        n_filters=128,
        regularizer=tf.contrib.layers.l2_regularizer(weight_decay),
        kernel_init=conv_selu_safe_initializer(conv3, 3, seed=seed + 4),
        name="conv_4")
    layers.append(("conv_4", conv4))
    conv5 = conv2d_selu(
        conv4,
        size=3,
        n_filters=128,
        regularizer=tf.contrib.layers.l2_regularizer(weight_decay),
        kernel_init=conv_selu_safe_initializer(conv4, 3, seed=seed + 5),
        name="conv_5")
    layers.append(("conv_5", conv5))
    pool3 = max_pool2d(conv5, size=2, stride=2, name="pool_3")
    layers.append(("pool_3", pool3))

    flat = flatten(pool3, name="flatten")
    layers.append(("flatten", flat))

    dense1 = dense_selu(
        flat,
        n_units=384,
        regularizer=tf.contrib.layers.l2_regularizer(weight_decay),
        kernel_init=dense_selu_safe_initializer(flat, seed=seed + 6),
        name="dense_1")
    layers.append(("dense_1", dense1))

    if drop_rate > 0.0:
        dense1 = dropout_selu(dense1,
                              rate=drop_rate,
                              training=training,
                              seed=seed + 7)

    dense2 = dense_selu(
        dense1,
        n_units=192,
        regularizer=tf.contrib.layers.l2_regularizer(weight_decay),
        kernel_init=dense_selu_safe_initializer(dense1, seed=seed + 8),
        name="dense_2")
    layers.append(("dense_2", dense2))

    if drop_rate > 0.0:
        dense2 = dropout_selu(dense2,
                              rate=drop_rate,
                              training=training,
                              seed=seed + 9)

    # dense softmax
    dense3 = dense(dense2,
                   n_units=10,
                   regularizer=tf.contrib.layers.l2_regularizer(weight_decay),
                   kernel_init=dense_selu_safe_initializer(dense2,
                                                           seed=seed + 4),
                   name="dense_3")
    layers.append(("logit", dense3))
    prob = tf.nn.softmax(dense3, name="prob")
    layers.append(("prob", prob))

    return layers, variables
コード例 #7
0
def cifar10_sequential_clrn5d3_wd(x,
                                  drop_rate=0.5,
                                  weight_decay=0.001,
                                  seed=42):
    layers = []
    variables = []

    training = tf.placeholder(tf.bool, name="training")
    variables.append(("training", training))

    # 2x conv3x3 selu + pool
    conv1 = conv2d_relu(
        x,
        size=5,
        n_filters=64,
        regularizer=tf.contrib.layers.l2_regularizer(weight_decay),
        kernel_init=He_normal(seed=seed + 1),
        name="conv_1")
    layers.append(("conv_1", conv1))
    norm1 = tf.nn.lrn(conv1,
                      depth_radius=4,
                      bias=1.0,
                      alpha=0.001 / 9.0,
                      beta=0.75,
                      name="norm_1")
    layers.append(("norm_1", norm1))
    pool1 = max_pool2d(norm1, size=3, stride=2, name="pool_1")
    layers.append(("pool_1", pool1))

    # 2x conv3x3 selu + pool
    conv2 = conv2d_relu(
        pool1,
        size=5,
        n_filters=64,
        regularizer=tf.contrib.layers.l2_regularizer(weight_decay),
        kernel_init=He_normal(seed=seed + 2),
        name="conv_2")
    layers.append(("conv_2", conv2))
    norm2 = tf.nn.lrn(conv2,
                      depth_radius=4,
                      bias=1.0,
                      alpha=0.001 / 9.0,
                      beta=0.75,
                      name="norm_2")
    layers.append(("norm_2", norm2))
    pool2 = max_pool2d(norm2, size=3, stride=2, name="pool_2")
    layers.append(("pool_2", pool2))

    # 2x conv3x3 selu + pool
    conv3 = conv2d_relu(
        pool2,
        size=3,
        n_filters=128,
        regularizer=tf.contrib.layers.l2_regularizer(weight_decay),
        kernel_init=He_normal(seed=seed + 3),
        name="conv_3")
    layers.append(("conv_3", conv3))
    norm3 = tf.nn.lrn(conv3,
                      depth_radius=4,
                      bias=1.0,
                      alpha=0.001 / 9.0,
                      beta=0.75,
                      name="norm_3")
    layers.append(("norm_3", norm3))
    conv4 = conv2d_relu(
        norm3,
        size=3,
        n_filters=128,
        regularizer=tf.contrib.layers.l2_regularizer(weight_decay),
        kernel_init=He_normal(seed=seed + 4),
        name="conv_4")
    layers.append(("conv_4", conv4))
    norm4 = tf.nn.lrn(conv4,
                      depth_radius=4,
                      bias=1.0,
                      alpha=0.001 / 9.0,
                      beta=0.75,
                      name="norm_1")
    layers.append(("norm_4", norm4))
    conv5 = conv2d_relu(
        norm4,
        size=3,
        n_filters=128,
        regularizer=tf.contrib.layers.l2_regularizer(weight_decay),
        kernel_init=He_normal(seed=seed + 5),
        name="conv_5")
    layers.append(("conv_5", conv5))
    norm5 = tf.nn.lrn(conv5,
                      depth_radius=4,
                      bias=1.0,
                      alpha=0.001 / 9.0,
                      beta=0.75,
                      name="norm_5")
    layers.append(("norm_5", norm5))

    pool3 = max_pool2d(norm5, size=2, stride=2, name="pool_3")
    layers.append(("pool_3", pool3))

    flat = flatten(pool3, name="flatten")
    layers.append(("flatten", flat))

    dense1 = dense_relu(
        flat,
        n_units=384,
        regularizer=tf.contrib.layers.l2_regularizer(weight_decay),
        kernel_init=He_normal(seed=seed + 6),
        name="dense_1")
    layers.append(("dense_1", dense1))

    if drop_rate > 0.0:
        dense1 = tf.layers.dropout(dense1,
                                   rate=drop_rate,
                                   training=training,
                                   seed=seed + 6,
                                   name="dropout_1")

    dense2 = dense_relu(
        dense1,
        n_units=192,
        regularizer=tf.contrib.layers.l2_regularizer(weight_decay),
        kernel_init=He_normal(seed=seed + 7),
        name="dense_2")
    layers.append(("dense_2", dense2))

    if drop_rate > 0.0:
        dense2 = tf.layers.dropout(dense2,
                                   rate=drop_rate,
                                   training=training,
                                   seed=seed + 7,
                                   name="dropout_2")

    # dense softmax
    dense3 = dense(dense2,
                   n_units=10,
                   regularizer=tf.contrib.layers.l2_regularizer(weight_decay),
                   kernel_init=Kumar_normal(activation=None,
                                            mode="FAN_IN",
                                            seed=seed + 8),
                   name="dense_3")
    layers.append(("logit", dense3))
    prob = tf.nn.softmax(dense3, name="prob")
    layers.append(("prob", prob))

    return layers, variables
コード例 #8
0
def cifar10_sequential_cbn6d_wd(x,
                                drop_rate_1=0.2,
                                drop_rate_2=0.3,
                                drop_rate_3=0.4,
                                weight_decay=0.0001,
                                seed=42):
    layers = []
    variables = []

    training = tf.placeholder(tf.bool, name="training")
    variables.append(("training", training))

    # 2x conv3x3 relu batch-norm + pool
    conv1 = conv2d_relu_bn(
        x,
        size=3,
        n_filters=32,
        is_training=training,
        regularizer=tf.contrib.layers.l2_regularizer(weight_decay),
        kernel_init=He_normal(seed=seed + 1),
        name="conv_1")
    layers.append(("conv_1", conv1))
    conv2 = conv2d_relu_bn(
        conv1,
        size=3,
        n_filters=32,
        is_training=training,
        regularizer=tf.contrib.layers.l2_regularizer(weight_decay),
        kernel_init=He_normal(seed=seed + 2),
        name="conv_2")
    layers.append(("conv_2", conv2))
    pool1 = max_pool2d(conv2, size=2, stride=2, name="pool_1")
    layers.append(("pool_1", pool1))
    dropout1 = tf.layers.dropout(pool1,
                                 rate=drop_rate_1,
                                 training=training,
                                 seed=seed + 2,
                                 name="dropout_1")
    layers.append(("dropout_1", dropout1))

    # 2x conv3x3 relu batch-norm + pool
    conv3 = conv2d_relu_bn(
        dropout1,
        size=3,
        n_filters=64,
        is_training=training,
        regularizer=tf.contrib.layers.l2_regularizer(weight_decay),
        kernel_init=He_normal(seed=seed + 3),
        name="conv_3")
    layers.append(("conv_3", conv3))
    conv4 = conv2d_relu_bn(
        conv3,
        size=3,
        n_filters=64,
        is_training=training,
        regularizer=tf.contrib.layers.l2_regularizer(weight_decay),
        kernel_init=He_normal(seed=seed + 4),
        name="conv_4")
    layers.append(("conv_4", conv4))
    pool2 = max_pool2d(conv4, size=2, stride=2, name="pool_2")
    layers.append(("pool_2", pool2))
    dropout2 = tf.layers.dropout(pool2,
                                 rate=drop_rate_2,
                                 training=training,
                                 seed=seed + 4,
                                 name="dropout_2")
    layers.append(("dropout_2", dropout2))

    # 2x conv3x3 relu batch-norm + pool
    conv5 = conv2d_relu_bn(
        dropout2,
        size=3,
        n_filters=128,
        is_training=training,
        regularizer=tf.contrib.layers.l2_regularizer(weight_decay),
        kernel_init=He_normal(seed=seed + 5),
        name="conv_5")
    layers.append(("conv_5", conv5))
    conv6 = conv2d_relu_bn(
        conv5,
        size=3,
        n_filters=128,
        is_training=training,
        regularizer=tf.contrib.layers.l2_regularizer(weight_decay),
        kernel_init=He_normal(seed=seed + 6),
        name="conv_6")
    layers.append(("conv_6", conv6))
    pool3 = max_pool2d(conv6, size=2, stride=2, name="pool_3")
    layers.append(("pool_3", pool3))
    dropout3 = tf.layers.dropout(pool3,
                                 rate=drop_rate_3,
                                 training=training,
                                 seed=seed + 6,
                                 name="dropout_3")
    layers.append(("dropout_3", dropout3))

    flat = flatten(dropout3, name="flatten")
    layers.append(("flatten", flat))

    # dense softmax
    dense1 = dense(flat,
                   n_units=10,
                   regularizer=tf.contrib.layers.l2_regularizer(weight_decay),
                   kernel_init=Kumar_normal(activation=None,
                                            mode="FAN_IN",
                                            seed=seed + 7),
                   name="dense_1")
    layers.append(("logit", dense1))
    prob = tf.nn.softmax(dense1, name="prob")
    layers.append(("prob", prob))

    return layers, variables
コード例 #9
0
def cifar10_sequential_cbn3d(x,
                             drop_rate_1=0.2,
                             drop_rate_2=0.3,
                             drop_rate_3=0.4,
                             seed=42):
    layers = []
    variables = []

    training = tf.placeholder(tf.bool, name="training")
    variables.append(("training", training))

    # conv5x5 batch-norm relu + pool
    conv1 = conv2d_bn_relu(x,
                           size=5,
                           n_filters=32,
                           is_training=training,
                           kernel_init=He_normal(seed=seed + 1),
                           name="conv_1")
    layers.append(("conv_1", conv1))
    pool1 = max_pool2d(conv1, size=3, stride=2, name="pool_1")
    layers.append(("pool_1", pool1))
    dropout1 = tf.layers.dropout(pool1,
                                 rate=drop_rate_1,
                                 training=training,
                                 seed=seed + 1,
                                 name="dropout_1")
    layers.append(("dropout_1", dropout1))

    # conv5x5 batch-norm relu + pool
    conv2 = conv2d_bn_relu(dropout1,
                           size=5,
                           n_filters=64,
                           is_training=training,
                           kernel_init=He_normal(seed=seed + 2),
                           name="conv_2")
    layers.append(("conv_2", conv2))
    pool2 = max_pool2d(conv2, size=3, stride=2, name="pool_2")
    layers.append(("pool_2", pool2))
    dropout2 = tf.layers.dropout(pool2,
                                 rate=drop_rate_2,
                                 training=training,
                                 seed=seed + 2,
                                 name="dropout_2")
    layers.append(("dropout_2", dropout2))

    # conv3x3 batch-norm relu + pool
    conv3 = conv2d_bn_relu(dropout2,
                           size=3,
                           n_filters=128,
                           is_training=training,
                           kernel_init=He_normal(seed=seed + 3),
                           name="conv_3")
    layers.append(("conv_3", conv3))
    pool3 = max_pool2d(conv3, size=2, stride=2, name="pool_3")
    layers.append(("pool_3", pool3))
    dropout3 = tf.layers.dropout(pool3,
                                 rate=drop_rate_3,
                                 training=training,
                                 seed=seed + 3,
                                 name="dropout_3")
    layers.append(("dropout_3", dropout3))

    flat = flatten(dropout3, name="flatten")
    layers.append(("flatten", flat))

    # dense softmax
    dense1 = dense(flat,
                   n_units=10,
                   kernel_init=Kumar_normal(activation=None,
                                            mode="FAN_IN",
                                            seed=seed + 4),
                   name="dense_1")
    layers.append(("logit", dense1))
    prob = tf.nn.softmax(dense1, name="prob")
    layers.append(("prob", prob))

    return layers, variables
コード例 #10
0
def cifar10_sequential_clrn3d(x,
                              drop_rate_1=0.2,
                              drop_rate_2=0.3,
                              drop_rate_3=0.4,
                              seed=42):
    layers = []
    variables = []

    training = tf.placeholder(tf.bool, name="training")
    variables.append(("training", training))

    # conv5x5 relu + lrn + pool
    conv1 = conv2d_relu(x,
                        size=5,
                        n_filters=32,
                        kernel_init=tf.truncated_normal_initializer(
                            stddev=5e-2, seed=seed + 1),
                        name="conv_1")
    layers.append(("conv_1", conv1))
    norm1 = tf.nn.lrn(conv1,
                      depth_radius=4,
                      bias=1.0,
                      alpha=0.001 / 9.0,
                      beta=0.75,
                      name="norm_1")
    layers.append(("norm_1", norm1))
    pool1 = max_pool2d(norm1, size=3, stride=2, name="pool_1")
    layers.append(("pool_1", pool1))
    dropout1 = tf.layers.dropout(pool1,
                                 rate=drop_rate_1,
                                 training=training,
                                 seed=seed + 1,
                                 name="dropout_1")
    layers.append(("dropout_1", dropout1))

    # conv5x5 relu + lrn + pool
    conv2 = conv2d_relu(dropout1,
                        size=5,
                        n_filters=64,
                        kernel_init=tf.truncated_normal_initializer(
                            stddev=5e-2, seed=seed + 2),
                        name="conv_2")
    layers.append(("conv_2", conv2))
    norm2 = tf.nn.lrn(conv2,
                      depth_radius=4,
                      bias=1.0,
                      alpha=0.001 / 9.0,
                      beta=0.75,
                      name="norm_2")
    layers.append(("norm_2", norm2))
    pool2 = max_pool2d(norm2, size=3, stride=2, name="pool_2")
    layers.append(("pool_2", pool2))
    dropout2 = tf.layers.dropout(pool2,
                                 rate=drop_rate_2,
                                 training=training,
                                 seed=seed + 2,
                                 name="dropout_2")
    layers.append(("dropout_2", dropout2))

    # conv3x3 relu + lrn + pool
    conv3 = conv2d_relu(dropout2,
                        size=3,
                        n_filters=128,
                        kernel_init=tf.truncated_normal_initializer(
                            stddev=5e-2, seed=seed + 3),
                        name="conv_3")
    layers.append(("conv_3", conv3))
    norm3 = tf.nn.lrn(conv3,
                      depth_radius=4,
                      bias=1.0,
                      alpha=0.001 / 9.0,
                      beta=0.75,
                      name="norm_3")
    layers.append(("norm_3", norm3))
    pool3 = max_pool2d(norm3, size=2, stride=2, name="pool_3")
    layers.append(("pool_3", pool3))
    dropout3 = tf.layers.dropout(pool3,
                                 rate=drop_rate_3,
                                 training=training,
                                 seed=seed + 3,
                                 name="dropout_3")
    layers.append(("dropout_3", dropout3))

    flat = flatten(dropout3, name="flatten")
    layers.append(("flatten", flat))

    # dense softmax
    dense1 = dense(flat,
                   n_units=10,
                   kernel_init=tf.truncated_normal_initializer(stddev=1 /
                                                               192.0,
                                                               seed=seed + 4),
                   name="dense_1")
    layers.append(("logit", dense1))
    prob = tf.nn.softmax(dense1, name="prob")
    layers.append(("prob", prob))

    return layers, variables
コード例 #11
0
ファイル: graph.py プロジェクト: autasi/tensorflow_examples
def mnist_sequential_c2d2(x, drop_rate=0.5):
    """Creates sequential convolutional neural network for MNIST. The network
        uses 2 convolutional+pooling layers to create the representation part
        of the network, and 1 fully-connected dense layer to create the
        classifier part. Dropout layer is used for regularization. The output 
        probabilities are generated by one dense layer followed by a softmax 
        function.
    Args:
        x: A tensor representing the input.
    Returns:
        A tuple containing the layers of the network graph and additional
        placeholders if any. Layers are represented as list of named tuples.
    """

    layers = []
    variables = []

    training = tf.placeholder(tf.bool, name="training")
    variables.append(("training", training))

    conv1 = conv2d(x,
                   size=5,
                   n_filters=32,
                   kernel_init=Kumar_initializer(mode="FAN_AVG"),
                   name="conv1")
    layers.append(("conv1", conv1))

    pool1 = max_pool(conv1, name="pool1")
    layers.append(("pool1", pool1))

    conv2 = conv2d(pool1,
                   size=5,
                   n_filters=64,
                   kernel_init=Kumar_initializer(mode="FAN_IN"),
                   name="conv2")
    layers.append(("conv2", conv2))

    pool2 = max_pool(conv2, name="pool2")
    layers.append(("pool2", pool2))

    flat = flatten(pool2, name="flatten")
    layers.append(("flatten", flat))

    fc1 = dense(flat,
                n_units=1024,
                kernel_init=Kumar_initializer(mode="FAN_IN"),
                name="fc1")
    layers.append(("fc1", fc1))

    dropout1 = tf.layers.dropout(fc1,
                                 rate=drop_rate,
                                 training=training,
                                 seed=42,
                                 name="dropout")
    layers.append(("dropout1", dropout1))

    fc2 = dense(dropout1,
                n_units=10,
                activation=None,
                kernel_init=Kumar_initializer(activation=None, mode="FAN_IN"),
                name="fc2")
    layers.append(("fc2", fc2))

    prob = tf.nn.softmax(fc2, name="prob")
    layers.append(("prob", prob))

    return layers, variables