y_test[i + 11439, :] = newlandmarks ## F1 x = tf.placeholder(tf.float32, shape=[None, 39, 39], name='x') #input imagematrix_data to be fed y = tf.placeholder(tf.float32, shape=[None, 10], name='y') #correct output to be fed keep_prob = tf.placeholder(tf.float32, name='keep_prob') #keep_prob parameter to be fed x_image = tf.reshape(x, [-1, 39, 39, 1]) ## convolutional layer 1, kernel 4*4, insize 1, outsize 20 W_conv1 = nl.weight_variable([4, 4, 1, 20]) b_conv1 = nl.bias_variable([20]) h_conv1 = nl.conv_layer(x_image, W_conv1) + b_conv1 #outsize = batch*36*36*20 a_conv1 = tf.nn.relu(h_conv1) #outsize = batch*36*36*20 ## max pooling layer 1 h_pool1 = nl.max_pool_22_layer(a_conv1) #outsize = batch*18*18*20 a_pool1 = tf.nn.relu(h_pool1) #outsize = batch*18*18*20 ## convolutional layer 2, kernel 3*3, insize 20, outsize 40 W_conv2 = nl.weight_variable([3, 3, 20, 40]) b_conv2 = nl.bias_variable([40]) h_conv2 = nl.conv_layer(a_pool1, W_conv2) + b_conv2 #outsize = batch*16*16*40 a_conv2 = tf.nn.relu(h_conv2) #outsize = batch*16*16*40 ## max pooling layer 2 h_pool2 = nl.max_pool_22_layer(a_conv2) #outsize = batch*8*8*40 a_pool2 = tf.nn.relu(h_pool2) #outsize = batch*8*8*40
newlandmarks[1] = (1-ry) / 2 * 39 #ratio y #one dimension which represents one grey picture, set the first dimension as index x_test[14390+i*10+j,:,:] = imagematrix y_test[14390+i*10+j,:] = newlandmarks ## RE31 x = tf.placeholder(tf.float32, shape=[None,15,15], name='x') #input imagematrix_data to be fed y = tf.placeholder(tf.float32, shape=[None,2], name='y') #correct output to be fed keep_prob = tf.placeholder(tf.float32, name='keep_prob') #keep_prob parameter to be fed x_image = tf.reshape(x, [-1,15,15,1]) ## convolutional layer 1, kernel 4*4, insize 1, outsize 20 W_conv1 = nl.weight_variable([4,4,1,20]) b_conv1 = nl.bias_variable([20]) h_conv1 = nl.conv_layer(x_image, W_conv1) + b_conv1 #outsize = batch*12*12*20 a_conv1 = tf.nn.tanh(h_conv1) #outsize = batch*12*12*20 ## max pooling layer 1 h_pool1 = nl.max_pool_22_layer(a_conv1) #outsize = batch*6*6*20 a_pool1 = tf.nn.tanh(h_pool1) #outsize = batch*6*6*20 ## convolutional layer 2, kernel 3*3, insize 20, outsize 40 W_conv2 = nl.weight_variable([3,3,20,40]) b_conv2 = nl.bias_variable([40]) h_conv2 = nl.conv_layer(a_pool1, W_conv2) + b_conv2 #outsize = batch*4*4*40 a_conv2 = tf.nn.tanh(h_conv2) #outsize = batch*4*4*40 ## max pooling layer 2 h_pool2 = nl.max_pool_22_layer(a_conv2) #outsize = batch*2*2*40 a_pool2 = tf.nn.tanh(h_pool2) #outsize = batch*2*2*40
newlandmarks[k] = (rawlandmarks[k].value - test_table.cell(i+1440,1).value + 0.05 * height) / (1.1 * height) * 39 #one dimension which represents one grey picture, set the first dimension as index x_test[i+1439,:,:] = imagematrix y_test[i+1439,:] = newlandmarks ## F1 x = tf.placeholder(tf.float32, shape=[None,39,39], name='x') #input imagematrix_data to be fed y = tf.placeholder(tf.float32, shape=[None,10], name='y') #correct output to be fed keep_prob = tf.placeholder(tf.float32, name='keep_prob') #keep_prob parameter to be fed x_image = tf.reshape(x, [-1,39,39,1]) ## convolutional layer 1, kernel 4*4, insize 1, outsize 20 W_conv1 = tf.Variable(tf.truncated_normal(shape=[4,4,1,20], stddev=0.1), name = 'W_conv1') b_conv1 = tf.Variable(tf.constant(0.1, shape=[20]), name = 'b_conv1') h_conv1 = nl.conv_layer(x_image, W_conv1) + b_conv1 #outsize = batch*36*36*20 a_conv1 = tf.nn.relu(h_conv1) #outsize = batch*36*36*20 ## max pooling layer 1 h_pool1 = nl.max_pool_22_layer(a_conv1) #outsize = batch*18*18*20 a_pool1 = tf.nn.relu(h_pool1) #outsize = batch*18*18*20 ## flatten layer x_flat = tf.reshape(a_pool1, [-1,6480]) #outsize = batch*6480 ## fully connected layer 1 W_fc1 = tf.Variable(tf.truncated_normal(shape=[6480,120], stddev=0.1), name = 'W_fc1') b_fc1 = tf.Variable(tf.constant(0.1, shape=[120]), name = 'b_fc1') h_fc1 = tf.matmul(x_flat, W_fc1) + b_fc1 #outsize = batch*120 a_fc1 = tf.nn.relu(h_fc1) #outsize = batch*120 a_fc1_dropout = tf.nn.dropout(a_fc1, keep_prob) #dropout layer 1