def conv_net_check3(x, keep_prob): layer = create_conv2d(x, 10, strides=[2, 2], w_name='W1') layer = tf.nn.relu(layer) layer = tf.nn.max_pool(layer, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME') layer = create_conv2d(layer, 20, strides=[2, 2], w_name='W2') layer = tf.nn.relu(layer) layer = tf.nn.max_pool(layer, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME') layer = create_conv2d(layer, 40, strides=[2, 2], w_name='W3') layer = tf.nn.relu(layer) layer = tf.nn.max_pool(layer, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME') layer = create_conv2d(layer, 80, strides=[2, 2], w_name='W4') layer = tf.nn.relu(layer) layer = tf.nn.max_pool(layer, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME') tf.nn.dropout(layer, keep_prob=keep_prob) layer = tf.contrib.layers.flatten(layer) layer = tf.contrib.layers.fully_connected(layer, 400) layer = tf.nn.dropout(layer, keep_prob) layer = tf.contrib.layers.fully_connected(layer, 200) layer = tf.nn.dropout(layer, keep_prob) return tf.contrib.layers.fully_connected(layer, 10, activation_fn=None)
def make_logits(tensor, keep_prob): global n n += 1 # Inputs # keep_prob = cnn.neural_net_keep_prob_input() # Model nn = cnn.create_conv2d(tensor, 128, strides=[32, 32], w_name='W1' + str(n)) nn = tf.nn.relu(nn) nn = tf.nn.max_pool(nn, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME') nn = cnn.create_conv2d(nn, 256, strides=[16, 16], w_name='W2' + str(n)) nn = tf.nn.relu(nn) nn = tf.nn.max_pool(nn, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME') # nn = cnn.create_conv2d(nn, 256, strides=[8, 8], w_name='W3'+str(n)) # nn = tf.nn.relu(nn) # nn = tf.nn.max_pool(nn, ksize=[1,2,2,1], strides=[1,2,2,1], padding = 'SAME') # # nn = cnn.create_conv2d(nn, 512, strides=[8, 8], w_name='W4'+str(n)) # nn = tf.nn.relu(nn) # nn = tf.nn.max_pool(nn, ksize=[1,2,2,1], strides=[1,2,2,1], padding = 'SAME') # tf.nn.dropout(nn, keep_prob=keep_prob) # # nn = cnn.create_conv2d(nn, 512, strides=[6, 6], w_name='W5'+str(n)) # nn = tf.nn.relu(nn) # nn = tf.nn.max_pool(nn, ksize=[1,2,2,1], strides=[1,2,2,1], padding = 'SAME') # # tf.nn.dropout(nn, keep_prob=keep_prob) # # nn = cnn.create_conv2d(nn, 1024, strides=[6, 6], w_name='W6'+str(n)) # nn = tf.nn.relu(nn) # nn = tf.nn.max_pool(nn, ksize=[1,2,2,1], strides=[1,2,2,1], padding = 'SAME') tf.nn.dropout(nn, keep_prob=keep_prob) # # nn = cnn.create_conv2d(nn, 1024, strides=[3, 3], w_name='W7'+str(n)) # nn = tf.nn.relu(nn) # nn = tf.nn.max_pool(nn, ksize=[1,2,2,1], strides=[1,2,2,1], padding = 'SAME') layer = tf.contrib.layers.flatten(nn) # layer = tf.contrib.layers.fully_connected(layer, 2048, activation_fn=tf.nn.relu) tf.nn.dropout(layer, keep_prob=keep_prob) layer = tf.contrib.layers.fully_connected(layer, 1024, activation_fn=tf.nn.relu) # layer = tf.contrib.layers.fully_connected(layer, 100, activation_fn=tf.nn.relu) layer = tf.contrib.layers.fully_connected(layer, 1, activation_fn=None) return layer
def make_logits_for_live_prediction(tensor, keep_prob): nn = cnn.create_conv2d(tensor, 64, strides=[4, 4], w_name='W1') nn = tf.nn.relu(nn) nn = tf.nn.max_pool(nn, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME') nn = cnn.create_conv2d(nn, 128, strides=[4, 4], w_name='W2') nn = tf.nn.relu(nn) nn = tf.nn.max_pool(nn, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME') nn = cnn.create_conv2d(nn, 256, strides=[2, 2], w_name='W4') nn = tf.nn.relu(nn) nn = tf.nn.max_pool(nn, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME') nn = cnn.create_conv2d(nn, 512, strides=[2, 2], w_name='W5') nn = tf.nn.relu(nn) nn = tf.nn.max_pool(nn, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME') # nn = cnn.create_conv2d(nn, 512, strides=[2, 2], w_name='W6') # nn = tf.nn.relu(nn) # nn = tf.nn.max_pool(nn, ksize=[1,2,2,1], strides=[1,2,2,1], padding = 'SAME') # # nn = cnn.create_conv2d(nn, 1024, strides=[2, 2], w_name='W7') # nn = tf.nn.relu(nn) # nn = tf.nn.max_pool(nn, ksize=[1,2,2,1], strides=[1,2,2,1], padding = 'SAME') tf.nn.dropout(nn, keep_prob=keep_prob) layer = tf.contrib.layers.fully_connected( nn, 1024) # activation_fn=tf.nn.relu) tf.nn.dropout(layer, keep_prob=keep_prob) layer = tf.contrib.layers.fully_connected( layer, 512) #, activation_fn=tf.nn.relu) tf.nn.dropout(layer, keep_prob=keep_prob) layer = tf.contrib.layers.fully_connected(layer, 1, activation_fn=None) return layer
keep_prob = cnn.neural_net_keep_prob_input() # Model # nn1 = cnn.create_conv2d(x, 32, strides=[8, 8], w_name='W1') w_size, c_strides = cnn.get_weights_shape(x, 32, [8, 8]) W1 = tf.get_variable('W1', w_size, initializer=tf.contrib.layers.xavier_initializer(seed=0)) Z1 = tf.nn.conv2d(x, W1, strides=c_strides, padding='SAME', name='W1_conv2d') nn2 = tf.nn.relu(Z1) nn3 = tf.nn.max_pool(nn2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME') nn = cnn.create_conv2d(nn3, 64, strides=[4, 4], w_name='W2') nn = tf.nn.relu(nn) nn = tf.nn.max_pool(nn, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME') nn = cnn.create_conv2d(nn, 128, strides=[3, 3], w_name='W3') nn = tf.nn.relu(nn) nn = tf.nn.max_pool(nn, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME') nn = cnn.create_conv2d(nn, 256, strides=[3, 3], w_name='W4') nn = tf.nn.relu(nn) nn = tf.nn.max_pool(nn, ksize=[1, 2, 2, 1],
imw = 90 imh = 120 n_classes = 1 epochs = 15 batch_size = 64 keep_probability = 0.5 tf.reset_default_graph() # Inputs x = cnn.neural_net_image_input((imw, imh, 3)) y = cnn.neural_net_label_input(n_classes) keep_prob = cnn.neural_net_keep_prob_input() # Model nn = cnn.create_conv2d(x, 64, strides=[3, 3], w_name='W1') nn = tf.nn.relu(nn, name='W1_activated') nn = tf.nn.max_pool(nn, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME') nn = cnn.create_conv2d(nn, 128, strides=[3, 3], w_name='W2') nn = tf.nn.relu(nn, name='W2_activated') nn = tf.nn.max_pool(nn, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME') nn = cnn.create_conv2d(nn, 256, strides=[2, 2], w_name='W3') nn = tf.nn.relu(nn, name='W3_activated')