コード例 #1
0
## RE31
x = tf.placeholder(tf.float32, shape=[None,15,15], name='x')        #input imagematrix_data to be fed
y = tf.placeholder(tf.float32, shape=[None,2], name='y')           #correct output to be fed
keep_prob = tf.placeholder(tf.float32, name='keep_prob')            #keep_prob parameter to be fed

x_image = tf.reshape(x, [-1,15,15,1])

## convolutional layer 1, kernel 4*4, insize 1, outsize 20
W_conv1 = nl.weight_variable([4,4,1,20])
b_conv1 = nl.bias_variable([20])
h_conv1 = nl.conv_layer(x_image, W_conv1) + b_conv1                 #outsize = batch*12*12*20
a_conv1 = tf.nn.tanh(h_conv1)                                       #outsize = batch*12*12*20

## max pooling layer 1
h_pool1 = nl.max_pool_22_layer(a_conv1)                             #outsize = batch*6*6*20
a_pool1 = tf.nn.tanh(h_pool1)                                       #outsize = batch*6*6*20

## convolutional layer 2, kernel 3*3, insize 20, outsize 40
W_conv2 = nl.weight_variable([3,3,20,40])
b_conv2 = nl.bias_variable([40])
h_conv2 = nl.conv_layer(a_pool1, W_conv2) + b_conv2                 #outsize = batch*4*4*40
a_conv2 = tf.nn.tanh(h_conv2)                                       #outsize = batch*4*4*40

## max pooling layer 2
h_pool2 = nl.max_pool_22_layer(a_conv2)                             #outsize = batch*2*2*40
a_pool2 = tf.nn.tanh(h_pool2)                                       #outsize = batch*2*2*40

## flatten layer
x_flat = tf.reshape(a_pool2, [-1,160])                              #outsize = batch*160
コード例 #2
0
                   name='x')  #input imagematrix_data to be fed
y = tf.placeholder(tf.float32, shape=[None, 10],
                   name='y')  #correct output to be fed
keep_prob = tf.placeholder(tf.float32,
                           name='keep_prob')  #keep_prob parameter to be fed

x_image = tf.reshape(x, [-1, 39, 39, 1])

## convolutional layer 1, kernel 4*4, insize 1, outsize 20
W_conv1 = nl.weight_variable([4, 4, 1, 20])
b_conv1 = nl.bias_variable([20])
h_conv1 = nl.conv_layer(x_image, W_conv1) + b_conv1  #outsize = batch*36*36*20
a_conv1 = tf.nn.relu(h_conv1)  #outsize = batch*36*36*20

## max pooling layer 1
h_pool1 = nl.max_pool_22_layer(a_conv1)  #outsize = batch*18*18*20
a_pool1 = tf.nn.relu(h_pool1)  #outsize = batch*18*18*20

## convolutional layer 2, kernel 3*3, insize 20, outsize 40
W_conv2 = nl.weight_variable([3, 3, 20, 40])
b_conv2 = nl.bias_variable([40])
h_conv2 = nl.conv_layer(a_pool1, W_conv2) + b_conv2  #outsize = batch*16*16*40
a_conv2 = tf.nn.relu(h_conv2)  #outsize = batch*16*16*40

## max pooling layer 2
h_pool2 = nl.max_pool_22_layer(a_conv2)  #outsize = batch*8*8*40
a_pool2 = tf.nn.relu(h_pool2)  #outsize = batch*8*8*40

## convolutional layer 3, kernel 3*3, insize 40, outsize 60
W_conv3 = nl.weight_variable([3, 3, 40, 60])
b_conv3 = nl.bias_variable([60])
コード例 #3
0
EN1_b_conv2 = bias_variable([40])
EN1_h_conv2 = conv2d(EN1_a_pool1, EN1_W_conv2) + EN1_b_conv2        #outsize = batch*12*16*40
EN1_a_conv2 = tf.nn.tanh(EN1_h_conv2)                               #outsize = batch*12*16*40

## max pooling layer 2
EN1_h_pool2 = max_pool_22(EN1_a_conv2)                              #outsize = batch*6*8*40
EN1_a_pool2 = tf.nn.tanh(EN1_h_pool2)                               #outsize = batch*6*8*40

## convolutional layer 3, kernel 3*3, insize 40, outsize 60
EN1_W_conv3 = weight_variable([3,3,40,60])
EN1_b_conv3 = bias_variable([60])
EN1_h_conv3 = conv2d(EN1_a_pool2, EN1_W_conv3) + EN1_b_conv3        #outsize = batch*4*6*60
EN1_a_conv3 = tf.nn.tanh(EN1_h_conv3)                               #outsize = batch*4*6*60

## max pooling layer 3
EN1_h_pool3 = nl.max_pool_22_layer(EN1_a_conv3)                     #outsize = batch*2*3*60
EN1_a_pool3 = tf.nn.tanh(EN1_h_pool3)                               #outsize = batch*2*3*60

## convolutional layer 4, kernel 2*2, insize 60, outsize 80
EN1_W_conv4 = weight_variable([2,2,60,80])
EN1_b_conv4 = bias_variable([80])
EN1_h_conv4 = conv2d(EN1_a_pool3, EN1_W_conv4) + EN1_b_conv4        #outsize = batch*1*2*80
EN1_a_conv4 = tf.nn.tanh(EN1_h_conv4)                               #outsize = batch*1*2*80

## flatten layer
EN1_x_flat = tf.reshape(EN1_a_conv4, [-1,160])                      #outsize = batch*160

## fully connected layer 1
EN1_W_fc1 = weight_variable([160,100])
EN1_b_fc1 = bias_variable([100])
EN1_h_fc1 = tf.matmul(EN1_x_flat, EN1_W_fc1) + EN1_b_fc1            #outsize = batch*100
## F1
x = tf.placeholder(tf.float32, shape=[None,39,39], name='x')        #input imagematrix_data to be fed
y = tf.placeholder(tf.float32, shape=[None,10], name='y')           #correct output to be fed
keep_prob = tf.placeholder(tf.float32, name='keep_prob')            #keep_prob parameter to be fed

x_image = tf.reshape(x, [-1,39,39,1])

## convolutional layer 1, kernel 4*4, insize 1, outsize 20
W_conv1 = tf.Variable(tf.truncated_normal(shape=[4,4,1,20], stddev=0.1), name = 'W_conv1')
b_conv1 = tf.Variable(tf.constant(0.1, shape=[20]), name = 'b_conv1')
h_conv1 = nl.conv_layer(x_image, W_conv1) + b_conv1                 #outsize = batch*36*36*20
a_conv1 = tf.nn.relu(h_conv1)                                       #outsize = batch*36*36*20

## max pooling layer 1
h_pool1 = nl.max_pool_22_layer(a_conv1)                             #outsize = batch*18*18*20
a_pool1 = tf.nn.relu(h_pool1)                                       #outsize = batch*18*18*20

## flatten layer
x_flat = tf.reshape(a_pool1, [-1,6480])                              #outsize = batch*6480

## fully connected layer 1
W_fc1 = tf.Variable(tf.truncated_normal(shape=[6480,120], stddev=0.1), name = 'W_fc1')
b_fc1 = tf.Variable(tf.constant(0.1, shape=[120]), name = 'b_fc1')
h_fc1 = tf.matmul(x_flat, W_fc1) + b_fc1                            #outsize = batch*120
a_fc1 = tf.nn.relu(h_fc1)                                           #outsize = batch*120
a_fc1_dropout = tf.nn.dropout(a_fc1, keep_prob)                     #dropout layer 1

## fully connected layer 2
W_fc2 = tf.Variable(tf.truncated_normal(shape=[120,10], stddev=0.1), name = 'W_fc2')
b_fc2 = tf.Variable(tf.constant(0.1, shape=[10]), name = 'b_fc2')