activation=tf.nn.sigmoid) n = tf.shape(deconv1)[0] szx = tf.shape(deconv1)[1] szy = tf.shape(deconv1)[2] estimated = tf.slice(deconv1, [0, 4, 4, 0], [n, szx - 8, szy - 8, 1], "estimated") # Loss estimated_resized = tf.reshape(estimated, [-1, patch_size_label * patch_size_label]) labels_resized = tf.reshape(y, [-1, patch_size_label * patch_size_label]) labels_resized = tf.cast(labels_resized, tf.float32) loss = tf.reduce_mean( tf.nn.sigmoid_cross_entropy_with_logits(labels=labels_resized, logits=estimated_resized)) # Optimizer train_op = tf.compat.v1.train.AdamOptimizer( lr, name="optimizer").minimize(loss) # Initializer, saver, session init = tf.compat.v1.global_variables_initializer() saver = tf.compat.v1.train.Saver(max_to_keep=20) sess = tf.compat.v1.Session() sess.run(init) # Let's export a SavedModel create_savedmodel(sess, ["x:0", "y:0", "is_training:0"], ["estimated:0"], params.outdir)
# Placeholders x = tf.compat.v1.placeholder(tf.float32, [None, None, None, 4], name="x") y = tf.compat.v1.placeholder(tf.int32, [None, None, None, 1], name="y") lr = tf.compat.v1.placeholder_with_default(tf.constant(0.0002, dtype=tf.float32, shape=[]), shape=[], name="lr") # Output y_estimated, y_label = my_model(x) # Loss function cost = tf.compat.v1.losses.sparse_softmax_cross_entropy( labels=tf.reshape(y, [-1, 1]), logits=tf.reshape(y_estimated, [-1, params.nclasses])) # Optimizer optimizer = tf.compat.v1.train.AdamOptimizer( learning_rate=lr, name="optimizer").minimize(cost) # Initializer, saver, session init = tf.compat.v1.global_variables_initializer() saver = tf.compat.v1.train.Saver(max_to_keep=20) sess = tf.compat.v1.Session() sess.run(init) # Create a SavedModel create_savedmodel(sess, ["x:0", "y:0"], ["features:0", "prediction:0"], params.outdir)
params.n_dims, params.n_timestamps) testPrediction = tf.argmax(pred_full, 1, name="prediction") loss_full = tf.compat.v1.losses.sparse_softmax_cross_entropy( labels=tf.reshape(y, [-1, 1]), logits=tf.reshape(pred_full, [-1, params.nclasses])) loss_c1 = tf.compat.v1.losses.sparse_softmax_cross_entropy( labels=tf.reshape(y, [-1, 1]), logits=tf.reshape(pred_c1, [-1, params.nclasses])) loss_c2 = tf.compat.v1.losses.sparse_softmax_cross_entropy( labels=tf.reshape(y, [-1, 1]), logits=tf.reshape(pred_c2, [-1, params.nclasses])) cost = loss_full + (0.3 * loss_c1) + (0.3 * loss_c2) optimizer = tf.compat.v1.train.AdamOptimizer( learning_rate=learning_rate, name="optimizer").minimize(cost) correct = tf.equal(tf.argmax(pred_full, 1), tf.argmax(y, 1)) accuracy = tf.reduce_mean(tf.cast(correct, tf.float64)) # Initializer, saver, session init = tf.compat.v1.global_variables_initializer() saver = tf.compat.v1.train.Saver(max_to_keep=20) sess = tf.compat.v1.Session() sess.run(init) create_savedmodel(sess, ["x_cnn:0", "x_rnn:0", "y:0"], ["prediction:0"], params.outdir)