Beispiel #1
0
print(Y_train.shape)

config = tf.ConfigProto()
config.gpu_options.allocator_type = 'BFC'

imw = 189  #600 #189
imh = 252  #800 #252
n_classes = 2
epochs = 25
batch_size = 32
keep_probability = 0.5

tf.reset_default_graph()

# Inputs
x = cnn.neural_net_image_input((imw, imh, 3))
y = cnn.neural_net_label_input(n_classes)
keep_prob = cnn.neural_net_keep_prob_input()

# Model
# nn1 = cnn.create_conv2d(x, 32, strides=[8, 8], w_name='W1')
w_size, c_strides = cnn.get_weights_shape(x, 32, [8, 8])
W1 = tf.get_variable('W1',
                     w_size,
                     initializer=tf.contrib.layers.xavier_initializer(seed=0))
Z1 = tf.nn.conv2d(x, W1, strides=c_strides, padding='SAME', name='W1_conv2d')

nn2 = tf.nn.relu(Z1)
nn3 = tf.nn.max_pool(nn2,
                     ksize=[1, 2, 2, 1],
                     strides=[1, 2, 2, 1],
print('X_dev', X_dev.shape)
print('Y_dev', Y_dev.shape)

config = tf.ConfigProto()
config.gpu_options.allocator_type = 'BFC'

imw = 90
imh = 120
n_classes = 2
epochs = 7
batch_size = 256
keep_probability = 0.5

tf.reset_default_graph()

x = cnn.neural_net_image_input((imw, imh, 3), name='net')
y = cnn.neural_net_label_input(n_classes)
keep_prob = cnn.neural_net_keep_prob_input()

logits = make_simple_logits(x, keep_prob)
# Name logits Tensor, so that is can be loaded from disk after training
logits = tf.identity(logits, name='logits')

# Loss and Optimizer
cost = tf.reduce_mean(
    tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=y))
optimizer = tf.train.AdamOptimizer().minimize(cost)
# optimizer = tf.train.GradientDescentOptimizer(3).minimize(cost)

# Accuracy
p = logits[0][0]