コード例 #1
0
ファイル: mnist_cnn_bn.py プロジェクト: poppyred/mnist_CNN
def inference(x, y_, keep_prob, phase_train):
    with tf.variable_scope('conv_1'):
        conv1 = Convolution2D(x, (28, 28), 1, 32, (5, 5), activation='none')
        conv1_bn = batch_norm(conv1.output(), 32, phase_train)
        conv1_out = tf.nn.relu(conv1_bn)

        pool1 = MaxPooling2D(conv1_out)
        pool1_out = pool1.output()

    with tf.variable_scope('conv_2'):
        conv2 = Convolution2D(pool1_out, (14, 14),
                              32,
                              64, (5, 5),
                              activation='none')
        conv2_bn = batch_norm(conv2.output(), 64, phase_train)
        conv2_out = tf.nn.relu(conv2_bn)

        pool2 = MaxPooling2D(conv2_out)
        pool2_out = pool2.output()

        pool2_flat = tf.reshape(pool2_out, [-1, 7 * 7 * 64])

    with tf.variable_scope('fc1'):
        fc1 = FullConnected(pool2_flat, 7 * 7 * 64, 1024)
        fc1_out = fc1.output()
        fc1_dropped = tf.nn.dropout(fc1_out, keep_prob)

    y_pred = ReadOutLayer(fc1_dropped, 1024, 10).output()

    loss = tf.reduce_mean(
        -tf.reduce_sum(y_ * tf.log(y_pred), reduction_indices=[1]))
    accuracy = evaluation(y_pred, y_)

    return loss, accuracy, y_pred
コード例 #2
0
ファイル: stack_mnist.py プロジェクト: w-az-w/go_simulator
def res_block(inputs):
    conv_res1 = Convolution2D(inputs, (28, 28), 64, 64, (3, 3), activation='none')
    conv_res1_bn = batch_norm(conv_res1.output(), 64, phase_train)
    conv_res1_out = tf.nn.relu(conv_res1_bn)
    conv_res2 = Convolution2D(conv_res1_out, (28, 28), 64, 64, (3, 3), activation='none')
    conv_res2_bn = batch_norm(conv_res2.output(), 64, phase_train)
    conv_res2_out = tf.nn.relu(conv_res2_bn+inputs)
    return conv_res2_out
コード例 #3
0
def mk_nn_model(x, y_):
    # Encoding phase
    # x_image = tf.reshape(x, [-1, 28, 28, 1]
    x_image = x
    conv1 = Convolution2D(x_image, (28, 28), 1, 16, (3, 3), activation='relu')
    conv1_out = conv1.output()

    pool1 = MaxPooling2D(conv1_out)
    pool1_out = pool1.output()

    conv2 = Convolution2D(pool1_out, (14, 14),
                          16,
                          8, (3, 3),
                          activation='relu')
    conv2_out = conv2.output()

    pool2 = MaxPooling2D(conv2_out)
    pool2_out = pool2.output()

    conv3 = Convolution2D(pool2_out, (7, 7), 8, 8, (3, 3), activation='relu')
    conv3_out = conv3.output()

    pool3 = MaxPooling2D(conv3_out)
    pool3_out = pool3.output()
    # at this point the representation is (8, 4, 4) i.e. 128-dimensional
    # Decoding phase
    conv_t1 = Conv2Dtranspose(pool3_out, (7, 7),
                              8,
                              8, (3, 3),
                              activation='relu')
    conv_t1_out = conv_t1.output()

    conv_t2 = Conv2Dtranspose(conv_t1_out, (14, 14),
                              8,
                              8, (3, 3),
                              activation='relu')
    conv_t2_out = conv_t2.output()

    conv_t3 = Conv2Dtranspose(conv_t2_out, (28, 28),
                              8,
                              16, (3, 3),
                              activation='relu')
    conv_t3_out = conv_t3.output()

    conv_last = Convolution2D(conv_t3_out, (28, 28),
                              16,
                              1, (3, 3),
                              activation='sigmoid')
    decoded = conv_last.output()

    decoded = tf.reshape(decoded, [-1, 784])
    cross_entropy = -1. * x * tf.log(decoded) - (1. - x) * tf.log(1. - decoded)
    loss = tf.reduce_mean(cross_entropy)

    return loss, decoded
コード例 #4
0
    def inference(self, x, y_):
        x_image = tf.reshape(x, [-1, 28, 28, 5])

        with tf.variable_scope('conv_1'):
            conv1 = Convolution2D(x, (28, 28),
                                  5,
                                  64, (5, 5),
                                  activation='none')
            conv1_bn = self.batch_norm(conv1.output(), 64, self.phase_train)
            conv1_out = tf.nn.relu(conv1_bn)


#        pool1 = MaxPooling2D(conv1_out)
#        pool1_out = pool1.output()

        with tf.variable_scope('res_block'):
            # input
            res_block_out1 = self.res_block(conv1_out)
            res_block_out2 = self.res_block(res_block_out1)
            res_block_out3 = self.res_block(res_block_out2)
            res_block_out4 = self.res_block(res_block_out3)
            res_block_out5 = self.res_block(res_block_out4)
            res_block_out6 = self.res_block(res_block_out5)
            res_block_out7 = self.res_block(res_block_out6)
            res_block_out8 = self.res_block(res_block_out7)
            res_block_out9 = self.res_block(res_block_out8)

        with tf.variable_scope('conv_2'):
            conv2 = Convolution2D(res_block_out9, (28, 28),
                                  64,
                                  2, (1, 1),
                                  activation='none')
            conv2_bn = self.batch_norm(conv2.output(), 2, self.phase_train)
            conv2_out = tf.nn.relu(conv2_bn)

            # pool2 = MaxPooling2D(conv2_out)
            # pool2_out = pool2.output()
            pool2_flat = tf.reshape(conv2_out, [-1, 28 * 28 * 2])

        # with tf.variable_scope('fc1'):
        # fc1 = FullConnected(pool2_flat, 28*28*2, 1024)
        # fc1_out = fc1.output()
        # fc1_dropped = tf.nn.dropout(fc1_out)

        self.y_pred = ReadOutLayer(pool2_flat, 28 * 28 * 2, 10).output()
        cross_entropy = tf.reduce_mean(-tf.reduce_sum(
            y_ * tf.log(self.y_pred + 1e-7), reduction_indices=[1]))
        self.loss = cross_entropy
        self.train_step = self.training(self.loss, 1.e-4)
        self.accuracy = self.evaluation(self.y_pred, y_)
コード例 #5
0
def inference(x, y_, keep_prob, phase_train):
    x_image = tf.reshape(x, [-1, 28, 28, 1])

    with tf.variable_scope('conv_1'):
        conv1 = Convolution2D(x, (28, 28), 1, 64, (5, 5), activation='none')
        conv1_bn = batch_norm(conv1.output(), 64, phase_train)
        conv1_out = tf.nn.relu(conv1_bn)
        #        pool1 = MaxPooling2D(conv1_out)
        #        pool1_out = pool1.output()

        # input
        res_block_out1 = res_block(conv1_out)
        res_block_out2 = res_block(res_block_out1)
        res_block_out3 = res_block(res_block_out2)
        res_block_out4 = res_block(res_block_out3)
        res_block_out5 = res_block(res_block_out4)
        res_block_out6 = res_block(res_block_out5)
        res_block_out7 = res_block(res_block_out6)
        res_block_out8 = res_block(res_block_out7)
        res_block_out9 = res_block(res_block_out8)

    with tf.variable_scope('conv_2'):
        conv2 = Convolution2D(res_block_out9, (28, 28),
                              64,
                              2, (1, 1),
                              activation='none')
        conv2_bn = batch_norm(conv2.output(), 2, phase_train)
        conv2_out = tf.nn.relu(conv2_bn)

        # pool2 = MaxPooling2D(conv2_out)
        # pool2_out = pool2.output()
        pool2_flat = tf.reshape(conv2_out, [-1, 28 * 28 * 2])

#   with tf.variable_scope('fc1'):
#       fc1 = FullConnected(pool2_flat, 28*28*2, 1024)
#       fc1_out = fc1.output()
#       fc1_dropped = tf.nn.dropout(fc1_out, keep_prob)

    y_pred = ReadOutLayer(pool2_flat, 28 * 28 * 2, 10).output()
    cross_entropy = tf.reduce_mean(
        -tf.reduce_sum(y_ * tf.log(y_pred + 1e-7), reduction_indices=[1]))
    loss = cross_entropy
    train_step = training(loss, 1.e-4)
    accuracy = evaluation(y_pred, y_)

    return loss, accuracy, y_pred
コード例 #6
0
 def create_encoder_conv(self, conf):
     for i in range(len(conf)):
         conv = Convolution2D(self.layers[-1],
                              (self.layers[-1].get_shape().as_list()[1],
                               self.layers[-1].get_shape().as_list()[2]),
                              self.layers[-1].get_shape().as_list()[3],
                              conf[i], (2, 6),
                              activation='leaky_relu')
         self.layers.append(conv.output())
         pool = MaxPooling2D(self.layers[-1])
         self.layers.append(pool.output())
コード例 #7
0
def mk_nn_model(x, y_):
    # Encoding phase
    x_image = tf.reshape(x, [-1, 28, 28, 1])    
    conv1 = Convolution2D(x_image, (28, 28), 1, 8, 
                          (9, 9), activation='sigmoid')
    conv1_out = conv1.output()

#    pool1 = MaxPooling2D(conv1_out)
#    pool1_out = pool1.output()
#    pool1_out = tf.nn.dropout(pool1_out,keep_prob=0.2)

    conv2 = Convolution2D(conv1_out, (28, 28), 8, 4, 
                          (9, 9), activation='sigmoid')
    conv2_out = conv2.output()
    
#    pool2 = MaxPooling2D(conv2_out)
#    pool2_out = pool2.output()
#    pool2_out = tf.nn.dropout(pool2_out,keep_prob=0.2)

    # at this point the representation is (4, 28, 28) i.e. 128*16-dimensional
    po = tf.reshape(conv2_out,[-1,4*28*28])

    fc = FullConnected(po, 4*28*28, 256, activation='sigmoid')
    fc_out = fc.output()

#    fc2 = FullConnected(fc_out, 256, 2, activation='sigmoid')
#    fc2_out = fc2.output()

    fo = FullConnected(fc_out, 256, 10, activation='sigmoid')
    fo_out = fo.output()

    # Decoding phase
    dfc1 = FullConnected(fo_out, 10, 256, activation='sigmoid')
    dfc1_out = dfc1.output()

# reshape 
    deconvin = tf.reshape(dfc1_out, [-1,16,16,1])

#resize_images(images, size, method=ResizeMethod.BILINEAR, align_corners=False)
    deconvin = tf.image.resize_images(deconvin, (28,28),method=tf.image.ResizeMethod.BILINEAR, align_corners=False)

    conv_t1 = Conv2Dtranspose(deconvin, (28, 28), 1, 4,
                         (12, 12), activation='sigmoid')
    conv_t1_out = conv_t1.output()

    conv_t2 = Conv2Dtranspose(conv_t1_out, (28, 28), 4, 4,
                         (17, 17), activation='sigmoid')
    conv_t2_out = conv_t2.output()

    conv_t3 = Conv2Dtranspose(conv_t2_out, (28, 28), 4, 1, 
                         (1, 1), activation='sigmoid')
    decoded = conv_t3.output()

    decoded = tf.reshape(decoded, [-1, 784])
    cross_entropy = -1. *x *tf.log(decoded) - (1. - x) *tf.log(1. - decoded)
    loss = tf.reduce_mean(cross_entropy)

    # crossentry for  classifier
    cross_entropy_acc = tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=fo_out)
    lossacc = tf.reduce_mean(cross_entropy_acc)

    # accuracy of the trained model, between 0 (worst) and 1 (best)
    correct_prediction = tf.equal(tf.argmax(fo_out, 1), tf.argmax(y_, 1))
    accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

    return loss, decoded, lossacc, fo_out, accuracy
コード例 #8
0
def mk_nn_model(x, y_):
    # Encoding phase
    x_image = tf.reshape(x, [-1, 227, 227, 3])

    #1st conv.
    conv1 = Convolution2D(x_image, (227, 227),
                          3,
                          96, (7, 7),
                          activation='relu',
                          S=4)
    conv1_out = conv1.output()

    #1st pooling
    pool1 = MaxPooling2D(conv1_out, ksize=[1, 3, 3, 1], S=2)
    pool1_out = pool1.output()

    #dropout?
    #    pool1_out = tf.nn.dropout(pool1_out,keep_prob=0.2)

    #LRN1
    norm1 = tf.nn.local_response_normalization(pool1_out,
                                               depth_radius=5,
                                               alpha=0.0001,
                                               beta=0.75)

    #2nd conv.
    conv2 = Convolution2D(norm1, (29, 29),
                          96,
                          256, (5, 5),
                          activation='relu',
                          S=1)  # pad=2 - how to do it????
    conv2_out = conv2.output()

    #2nd pooling
    pool2 = MaxPooling2D(conv2_out, ksize=(1, 3, 3, 1), S=2)
    pool2_out = pool2.output()
    #    pool2_out = tf.nn.dropout(pool2_out,keep_prob=0.2)
    norm2 = tf.nn.local_response_normalization(pool2_out,
                                               depth_radius=5,
                                               alpha=0.0001,
                                               beta=0.75)

    #3rd conv.
    conv3 = Convolution2D(norm2, (15, 15),
                          256,
                          384, (3, 3),
                          activation='relu',
                          S=1)  # pad=2 - how to do it????
    conv3_out = conv3.output()

    #3rd pooling
    pool3 = MaxPooling2D(conv3_out, ksize=(1, 3, 3, 1), S=2)
    pool3_out = pool3.output()

    # at this point the representation is (4, 28, 28) i.e. 128*16-dimensional
    po = tf.reshape(pool3_out, [-1, 384 * 8 * 8])

    fc6 = FullConnected(po, 384 * 8 * 8, 512, activation='relu')
    fc6_out = fc6.output()

    drop6 = tf.nn.dropout(fc6_out, keep_prob=0.5)

    fc7 = FullConnected(drop6, 512, 512, activation='relu')
    fc7_out = fc7.output()

    drop7 = tf.nn.dropout(fc7_out, keep_prob=0.5)

    fc8 = FullConnected(drop7, 512, 8, activation='relu')
    fc8_out = fc8.output()

    # crossentry for  classifier
    cross_entropy = tf.nn.softmax_cross_entropy_with_logits(labels=y_,
                                                            logits=fc8_out)
    loss = tf.reduce_mean(cross_entropy)

    # accuracy of the trained model, between 0 (worst) and 1 (best)
    correct_prediction = tf.equal(tf.argmax(fc8_out, 1), tf.argmax(y_, 1))
    accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

    return loss, accuracy, fc8_out
コード例 #9
0
def mk_nn_model(x_image, y_image, encoded_y_image=None):
    # Encoding phase
    conv1 = Convolution2D(x_image, (28, 28), 1, 16, (3, 3), activation='relu')
    conv1_out = conv1.output()

    pool1 = MaxPooling2D(conv1_out)
    pool1_out = pool1.output()

    conv2 = Convolution2D(pool1_out, (14, 14),
                          16,
                          8, (3, 3),
                          activation='relu')
    conv2_out = conv2.output()

    pool2 = MaxPooling2D(conv2_out)
    pool2_out = pool2.output()

    conv3 = Convolution2D(pool2_out, (7, 7), 8, 8, (3, 3), activation='relu')
    conv3_out = conv3.output()

    pool3 = MaxPooling2D(conv3_out, name='encoded')
    pool3_out = pool3.output()

    encode = Convolution2D(pool3_out, (4, 4),
                           8,
                           latent_num, (2, 2),
                           activation='relu')
    encode_out = encode.output()
    encoded = encode_out
    # print(encoded.shape)
    # at this point the representation is (8, 4, 4) i.e. 128-dimensional
    # Decoding phase
    conv_t1 = Conv2Dtranspose(encode_out, (7, 7),
                              latent_num,
                              8, (3, 3),
                              activation='relu')
    conv_t1_out = conv_t1.output()

    conv_t2 = Conv2Dtranspose(conv_t1_out, (14, 14),
                              8,
                              8, (3, 3),
                              activation='relu')
    conv_t2_out = conv_t2.output()

    conv_t3 = Conv2Dtranspose(conv_t2_out, (28, 28),
                              8,
                              16, (3, 3),
                              activation='relu')
    conv_t3_out = conv_t3.output()

    conv_last = Convolution2D(conv_t3_out, (28, 28),
                              16,
                              1, (3, 3),
                              activation='sigmoid')
    decoded = conv_last.output()
    print(decoded.shape)

    # decoded = tf.reshape(decoded, [-1, 784])
    cross_entropy = -1. * y_image * tf.log(decoded) - (
        1. - y_image) * tf.log(1. - decoded)
    loss = tf.reduce_mean(cross_entropy)
    tf.summary.scalar('loss', loss)

    # encod_loss = 0
    # if encoded_y_image is not None:
    # encoded_cross_entropy = -1. * encoded_y_image * tf.log(encoded) - (1. - encoded_y_image) * tf.log(1. - encoded)
    # encod_loss = tf.reduce_mean(encoded_cross_entropy)
    encod_loss = tf.reduce_sum(tf.square(encoded_y_image - encoded))
    tf.summary.scalar('encoded_loss', encod_loss)
    merged = tf.summary.merge_all()

    return loss, decoded, encoded, encod_loss, merged
コード例 #10
0
ファイル: tf_AE.py プロジェクト: totodd/CNN_adversarial_Noise
def mk_nn_model(x_image, y_image, encoded_y_image=None):
    # Encoding phase
    conv1 = Convolution2D(x_image, (28, 28), 1, 16, (3, 3), activation='relu')
    conv1_out = conv1.output()

    pool1 = MaxPooling2D(conv1_out)
    pool1_out = pool1.output()

    conv2 = Convolution2D(pool1_out, (14, 14),
                          16,
                          8, (3, 3),
                          activation='relu')
    conv2_out = conv2.output()

    pool2 = MaxPooling2D(conv2_out)
    pool2_out = pool2.output()

    conv3 = Convolution2D(pool2_out, (7, 7), 8, 1, (3, 3), activation='relu')
    conv3_out = conv3.output()

    pool3 = MaxPooling2D(conv3_out, name='encoded')
    pool3_out = pool3.output()
    encoded = pool3_out
    # at this point the representation is (1, 4, 4) i.e. 128-dimensional
    # Decoding phase
    conv_t1 = Conv2Dtranspose(pool3_out, (7, 7),
                              1,
                              8, (3, 3),
                              activation='relu')
    conv_t1_out = conv_t1.output()

    conv_t2 = Conv2Dtranspose(conv_t1_out, (14, 14),
                              8,
                              8, (3, 3),
                              activation='relu')
    conv_t2_out = conv_t2.output()

    conv_t3 = Conv2Dtranspose(conv_t2_out, (28, 28),
                              8,
                              16, (3, 3),
                              activation='relu')
    conv_t3_out = conv_t3.output()

    conv_last = Convolution2D(conv_t3_out, (28, 28),
                              16,
                              1, (3, 3),
                              activation='sigmoid')
    decoded = conv_last.output()
    print(decoded.shape)

    # decoded = tf.reshape(decoded, [-1, 784])
    cross_entropy = -1. * y_image * tf.log(decoded) - (
        1. - y_image) * tf.log(1. - decoded)
    loss = tf.reduce_mean(cross_entropy)
    # encod_loss = 0
    # if encoded_y_image is not None:
    encoded_cross_entropy = -1. * encoded_y_image * tf.log(encoded) - (
        1. - encoded_y_image) * tf.log(1. - encoded)
    encod_loss = tf.reduce_mean(encoded_cross_entropy)

    return loss, decoded, encoded, encod_loss