示例#1
0
EN1_h_conv4 = conv2d(EN1_a_pool3, EN1_W_conv4) + EN1_b_conv4        #outsize = batch*1*2*80
EN1_a_conv4 = tf.nn.tanh(EN1_h_conv4)                               #outsize = batch*1*2*80

## flatten layer
EN1_x_flat = tf.reshape(EN1_a_conv4, [-1,160])                      #outsize = batch*160

## fully connected layer 1
EN1_W_fc1 = weight_variable([160,100])
EN1_b_fc1 = bias_variable([100])
EN1_h_fc1 = tf.matmul(EN1_x_flat, EN1_W_fc1) + EN1_b_fc1            #outsize = batch*100
EN1_a_fc1 = tf.nn.relu(EN1_h_fc1)                                   #outsize = batch*100
EN1_a_fc1_dropout = tf.nn.dropout(EN1_a_fc1, keep_prob)             #dropout layer 1

## fully connected layer 2
EN1_W_fc2 = nl.weight_variable([100,6])
EN1_b_fc2 = nl.bias_variable([6])
EN1_h_fc2 = tf.matmul(EN1_a_fc1_dropout, EN1_W_fc2) + EN1_b_fc2     #outsize = batch*6
EN1_a_fc2 = tf.nn.relu(EN1_h_fc2)                                   #outsize = batch*6

#regularization and loss function
original_cost = tf.reduce_mean(tf.pow(y - EN1_a_fc2, 2))
tv = tf.trainable_variables()   #L2 regularization
regularization_cost = 2 * tf.reduce_sum([ tf.nn.l2_loss(v) for v in tv ])   #2 is a hyper parameter
cost = original_cost + regularization_cost
Optimizer = tf.train.AdamOptimizer(0.0001).minimize(cost)
init = tf.global_variables_initializer()
#average accuracy every batch
accuracy = tf.reduce_mean((y - EN1_a_fc2), 0)                       #average accuracy every batch
testaccuracy = np.zeros([27,6], dtype = np.float32)
#save the model
saver = tf.train.Saver()
示例#2
0
        newlandmarks[0] = (1-rx) / 2 * 39                           #ratio x
        newlandmarks[1] = (1-ry) / 2 * 39                           #ratio y
        #one dimension which represents one grey picture, set the first dimension as index
        x_test[14390+i*10+j,:,:] = imagematrix
        y_test[14390+i*10+j,:] = newlandmarks

## RE31
x = tf.placeholder(tf.float32, shape=[None,15,15], name='x')        #input imagematrix_data to be fed
y = tf.placeholder(tf.float32, shape=[None,2], name='y')           #correct output to be fed
keep_prob = tf.placeholder(tf.float32, name='keep_prob')            #keep_prob parameter to be fed

x_image = tf.reshape(x, [-1,15,15,1])

## convolutional layer 1, kernel 4*4, insize 1, outsize 20
W_conv1 = nl.weight_variable([4,4,1,20])
b_conv1 = nl.bias_variable([20])
h_conv1 = nl.conv_layer(x_image, W_conv1) + b_conv1                 #outsize = batch*12*12*20
a_conv1 = tf.nn.tanh(h_conv1)                                       #outsize = batch*12*12*20

## max pooling layer 1
h_pool1 = nl.max_pool_22_layer(a_conv1)                             #outsize = batch*6*6*20
a_pool1 = tf.nn.tanh(h_pool1)                                       #outsize = batch*6*6*20

## convolutional layer 2, kernel 3*3, insize 20, outsize 40
W_conv2 = nl.weight_variable([3,3,20,40])
b_conv2 = nl.bias_variable([40])
h_conv2 = nl.conv_layer(a_pool1, W_conv2) + b_conv2                 #outsize = batch*4*4*40
a_conv2 = tf.nn.tanh(h_conv2)                                       #outsize = batch*4*4*40

## max pooling layer 2
h_pool2 = nl.max_pool_22_layer(a_conv2)                             #outsize = batch*2*2*40
示例#3
0
    x_test[i + 11439, :, :] = imagematrix
    y_test[i + 11439, :] = newlandmarks

## F1
x = tf.placeholder(tf.float32, shape=[None, 39, 39],
                   name='x')  #input imagematrix_data to be fed
y = tf.placeholder(tf.float32, shape=[None, 10],
                   name='y')  #correct output to be fed
keep_prob = tf.placeholder(tf.float32,
                           name='keep_prob')  #keep_prob parameter to be fed

x_image = tf.reshape(x, [-1, 39, 39, 1])

## convolutional layer 1, kernel 4*4, insize 1, outsize 20
W_conv1 = nl.weight_variable([4, 4, 1, 20])
b_conv1 = nl.bias_variable([20])
h_conv1 = nl.conv_layer(x_image, W_conv1) + b_conv1  #outsize = batch*36*36*20
a_conv1 = tf.nn.relu(h_conv1)  #outsize = batch*36*36*20

## max pooling layer 1
h_pool1 = nl.max_pool_22_layer(a_conv1)  #outsize = batch*18*18*20
a_pool1 = tf.nn.relu(h_pool1)  #outsize = batch*18*18*20

## convolutional layer 2, kernel 3*3, insize 20, outsize 40
W_conv2 = nl.weight_variable([3, 3, 20, 40])
b_conv2 = nl.bias_variable([40])
h_conv2 = nl.conv_layer(a_pool1, W_conv2) + b_conv2  #outsize = batch*16*16*40
a_conv2 = tf.nn.relu(h_conv2)  #outsize = batch*16*16*40

## max pooling layer 2
h_pool2 = nl.max_pool_22_layer(a_conv2)  #outsize = batch*8*8*40