def test_should_not_add_dropout_not_in_training_mode_using_constant(self): with tf.Graph().as_default(): encoder_inputs = tf.ones((1, 8, 8, 3)) encoder_layer_specs = [5, 10] decoder_layer_specs = [(5, 0.5), (3, 0.0)] outputs = create_encoder_decoder(encoder_inputs, encoder_layer_specs, decoder_layer_specs, is_training=False) with tf.Session() as session: session.run(tf.global_variables_initializer()) # without dropout, the outputs are expected to the same for every run assert_all_close(session.run(outputs), session.run(outputs))
def test_should_add_dropout_in_training_mode_using_placeholder(self): with tf.Graph().as_default(): is_training = tf.placeholder(tf.bool) encoder_inputs = tf.ones((1, 8, 8, 3)) encoder_layer_specs = [5, 10] decoder_layer_specs = [(5, 0.5), (3, 0.0)] outputs = create_encoder_decoder(encoder_inputs, encoder_layer_specs, decoder_layer_specs, is_training=is_training) with tf.Session() as session: session.run(tf.global_variables_initializer()) feed_dict = {is_training: True} # with dropout, the outputs are expected to be different for every run assert_all_not_close(session.run(outputs, feed_dict=feed_dict), session.run(outputs, feed_dict=feed_dict))
def test_should_allow_undefined_batch_size(self): with tf.Graph().as_default(): input_shape = [None, 8, 8, 3] encoder_inputs = tf.placeholder(tf.float32, input_shape) encoder_layer_specs = [5, 10] decoder_layer_specs = [(5, 0.5), (3, 0.0)] outputs = create_encoder_decoder(encoder_inputs, encoder_layer_specs, decoder_layer_specs, is_training=False) assert outputs.get_shape().as_list() == input_shape with tf.Session() as session: session.run(tf.global_variables_initializer()) feed_dict = {encoder_inputs: np.ones([1] + input_shape[1:])} assert_all_close(session.run(outputs, feed_dict=feed_dict), session.run(outputs, feed_dict=feed_dict))