def _process_attributes(images, vocab, tagger): """Processes a list of images. Args: images: a list containing BestFirstMetadata objects. vocab: a Vocabulary object. tagger: a Tagger object from nltk. Returns: a list of AttributeMetadata objects. """ attribute_map = get_visual_attributes() attribute_images = [] for image in images: caption = image.captions[0] attributes = attribute_map.sentence_to_attributes(caption) attribute_images.append(AttributeMetadata( image_id=image.image_id, filename=image.filename, captions=image.captions, image_features=image.image_features, object_features=image.object_features, running_ids=image.running_ids, running_ids_splits=image.running_ids_splits, word_ids=image.word_ids, pointer_ids=image.pointer_ids, attributes=attributes)) return attribute_images
def __init__(self): """Creates handles to the TensorFlow computational graph.""" # TensorFlow ops for JPEG decoding. self.encoded_jpeg = tf.placeholder(dtype=tf.string) self.decoded_jpeg = tf.image.decode_jpeg(self.encoded_jpeg, channels=3) self.decoded_jpeg = tf.image.resize_images(self.decoded_jpeg, [ FLAGS.image_height, FLAGS.image_width]) # Create the model to extract image boxes self.box_extractor = BoxExtractor(get_faster_rcnn_config(), trainable=False) self.image_tensor = tf.placeholder(tf.float32, name='image_tensor', shape=[None, FLAGS.image_height, FLAGS.image_width, 3]) self.boxes, self.scores, self.cropped_images = self.box_extractor(self.image_tensor) # Create a single TensorFlow Session for all image decoding calls. self.sess = tf.Session() self.rcnn_saver = tf.train.Saver(var_list=self.box_extractor.variables) self.rcnn_saver.restore(self.sess, get_faster_rcnn_checkpoint()) self.lock = threading.Lock() self.attribute_map = get_visual_attributes()
def __init__(self): """Creates handles to the TensorFlow computational graph.""" # TensorFlow ops for JPEG decoding. self.encoded_jpeg = tf.placeholder(dtype=tf.string) self.decoded_jpeg = tf.image.decode_jpeg(self.encoded_jpeg, channels=3) self.decoded_jpeg = tf.image.resize_images( self.decoded_jpeg, [FLAGS.image_height, FLAGS.image_width]) # Create the model to extract image boxes self.box_extractor = BoxExtractor(get_faster_rcnn_config(), trainable=False) self.image_tensor = tf.placeholder( tf.float32, name='image_tensor', shape=[None, FLAGS.image_height, FLAGS.image_width, 3]) self.boxes, self.scores, self.cropped_images = self.box_extractor( self.image_tensor) # Create the model to extract the image features self.feature_extractor = FeatureExtractor(is_training=False, global_pool=False) # Compute the mean ResNet-101 features self.image_features = tf.reduce_mean( self.feature_extractor(self.image_tensor), [1, 2]) feature_batch = tf.shape(self.image_features)[0] feature_depth = tf.shape(self.image_features)[1] self.object_features = tf.reduce_mean( self.feature_extractor(self.cropped_images), [1, 2]) self.object_features = tf.reshape(self.object_features, [feature_batch, 100, feature_depth]) # Create a single TensorFlow Session for all image decoding calls. self.sess = tf.Session() rcnn_saver = tf.train.Saver(var_list=self.box_extractor.variables) resnet_saver = tf.train.Saver( var_list=self.feature_extractor.variables) rcnn_saver.restore(self.sess, get_faster_rcnn_checkpoint()) resnet_saver.restore(self.sess, get_resnet_v2_101_checkpoint()) self.lock = threading.Lock() self.attribute_map = get_visual_attributes()
def main(unused_argv): vocab, pretrained_matrix = load_glove(vocab_size=100000, embedding_size=300) attribute_map, attribute_embeddings_map = get_visual_attributes( ), np.random.normal(0, 0.1, [1000, 2048]) with tf.Graph().as_default(): image_id, mean_features, object_features, input_seq, target_seq, indicator = import_mscoco( mode="train", batch_size=FLAGS.batch_size, num_epochs=FLAGS.num_epochs, is_mini=FLAGS.is_mini) up_down_cell = UpDownCell(300, num_image_features=4096) attribute_image_captioner = AttributeImageCaptioner( up_down_cell, vocab, pretrained_matrix, attribute_map, attribute_embeddings_map) attribute_detector = AttributeDetector(1000) _, top_k_attributes = attribute_detector(mean_features) logits, ids = attribute_image_captioner( top_k_attributes, lengths=tf.reduce_sum(indicator, axis=1), mean_image_features=mean_features, mean_object_features=object_features, seq_inputs=input_seq) tf.losses.sparse_softmax_cross_entropy(target_seq, logits, weights=indicator) loss = tf.losses.get_total_loss() global_step = tf.train.get_or_create_global_step() optimizer = tf.train.AdamOptimizer() learning_step = optimizer.minimize( loss, var_list=attribute_image_captioner.variables, global_step=global_step) captioner_saver = tf.train.Saver( var_list=attribute_image_captioner.variables + [global_step]) attribute_detector_saver = tf.train.Saver( var_list=attribute_detector.variables) captioner_ckpt, captioner_ckpt_name = get_up_down_attribute_checkpoint( ) attribute_detector_ckpt, attribute_detector_ckpt_name = get_attribute_detector_checkpoint( ) with tf.Session() as sess: sess.run(tf.variables_initializer(optimizer.variables())) if captioner_ckpt is not None: captioner_saver.restore(sess, captioner_ckpt) else: sess.run( tf.variables_initializer( attribute_image_captioner.variables + [global_step])) if attribute_detector_ckpt is not None: attribute_detector_saver.restore(sess, attribute_detector_ckpt) else: sess.run(tf.variables_initializer( attribute_detector.variables)) captioner_saver.save(sess, captioner_ckpt_name, global_step=global_step) last_save = time.time() for i in itertools.count(): time_start = time.time() try: _target, _ids, _loss, _learning_step = sess.run( [target_seq, ids, loss, learning_step]) except: break iteration = sess.run(global_step) print( PRINT_STRING.format( iteration, _loss, list_of_ids_to_string(_ids[0, :].tolist(), vocab), list_of_ids_to_string(_target[0, :].tolist(), vocab), FLAGS.batch_size / (time.time() - time_start))) new_save = time.time() if new_save - last_save > 3600: # save the model every hour captioner_saver.save(sess, captioner_ckpt_name, global_step=global_step) last_save = new_save captioner_saver.save(sess, captioner_ckpt_name, global_step=global_step) print("Finishing training.")
from detailed_captioning.utils import get_val_annotations_file from detailed_captioning.inputs.spatial_image_features_only import import_mscoco PRINT_STRING = """({3:.2f} img/sec) iteration: {0:05d}\n caption: {1}\n label: {2}""" tf.logging.set_verbosity(tf.logging.INFO) tf.flags.DEFINE_integer("batch_size", 1, "") tf.flags.DEFINE_integer("beam_size", 3, "") tf.flags.DEFINE_boolean("is_mini", False, "") tf.flags.DEFINE_string("mode", "eval", "") FLAGS = tf.flags.FLAGS if __name__ == "__main__": vocab, pretrained_matrix = load_glove(vocab_size=100000, embedding_size=300) attribute_map, attribute_embeddings_map = get_visual_attributes( ), np.random.normal(0, 0.1, [1000, 2048]) with tf.Graph().as_default(): image_id, spatial_features, input_seq, target_seq, indicator = import_mscoco( mode=FLAGS.mode, batch_size=FLAGS.batch_size, num_epochs=1, is_mini=FLAGS.is_mini) visual_sentinel_cell = VisualSentinelCell(300, num_image_features=2048) attribute_image_captioner = AttributeImageCaptioner( visual_sentinel_cell, vocab, pretrained_matrix, attribute_map, attribute_embeddings_map) attribute_detector = AttributeDetector(1000) _, top_k_attributes = attribute_detector( tf.reduce_mean(spatial_features, [1, 2])) logits, ids = attribute_image_captioner(
def main(unused_argv): vocab, pretrained_matrix = load_glove(vocab_size=100000, embedding_size=300) attribute_map = get_visual_attributes() attribute_to_word_lookup_table = vocab.word_to_id( attribute_map.reverse_vocab) with tf.Graph().as_default(): (image_id, image_features, object_features, input_seq, target_seq, indicator, attributes) = import_mscoco(mode="train", batch_size=FLAGS.batch_size, num_epochs=FLAGS.num_epochs, is_mini=FLAGS.is_mini) attribute_detector = AttributeDetector(1000) _, image_attributes, object_attributes = attribute_detector( image_features, object_features) grounded_attribute_cell = GroundedAttributeCell(1024) attribute_captioner = AttributeCaptioner( grounded_attribute_cell, vocab, pretrained_matrix, attribute_to_word_lookup_table) logits, ids = attribute_captioner(lengths=tf.reduce_sum(indicator, axis=1), mean_image_features=image_features, mean_object_features=object_features, seq_inputs=input_seq, image_attributes=image_attributes, object_attributes=object_attributes) tf.losses.sparse_softmax_cross_entropy(target_seq, logits, weights=indicator) loss = tf.losses.get_total_loss() global_step = tf.train.get_or_create_global_step() learning_rate = tf.train.exponential_decay(5e-4, global_step, 3 * 586363 // FLAGS.batch_size, 0.8, staircase=True) optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate) learning_step = optimizer.minimize( loss, var_list=attribute_captioner.variables, global_step=global_step) detector_saver = tf.train.Saver(var_list=attribute_detector.variables + [global_step]) detector_ckpt, detector_ckpt_name = get_attribute_detector_checkpoint() captioner_saver = tf.train.Saver( var_list=attribute_captioner.variables + [global_step]) captioner_ckpt, captioner_ckpt_name = get_grounded_attribute_checkpoint( ) with tf.Session() as sess: sess.run(tf.variables_initializer(optimizer.variables())) if detector_ckpt is not None: detector_saver.restore(sess, detector_ckpt) else: sess.run( tf.variables_initializer(attribute_detector.variables + [global_step])) if captioner_ckpt is not None: captioner_saver.restore(sess, captioner_ckpt) else: sess.run( tf.variables_initializer(attribute_captioner.variables + [global_step])) captioner_saver.save(sess, captioner_ckpt_name, global_step=global_step) last_save = time.time() for i in itertools.count(): time_start = time.time() try: _target, _ids, _loss, _learning_step = sess.run( [target_seq, ids, loss, learning_step]) except: break iteration = sess.run(global_step) print( PRINT_STRING.format( iteration, _loss, list_of_ids_to_string(_ids[0, :].tolist(), vocab), list_of_ids_to_string(_target[0, :].tolist(), vocab), FLAGS.batch_size / (time.time() - time_start))) new_save = time.time() if new_save - last_save > 3600: # save the model every hour captioner_saver.save(sess, captioner_ckpt_name, global_step=global_step) last_save = new_save captioner_saver.save(sess, captioner_ckpt_name, global_step=global_step) print("Finishing training.")
def main(unused_argv): def _is_valid_num_shards(num_shards): """Returns True if num_shards is compatible with FLAGS.num_threads.""" return num_shards < FLAGS.num_threads or not num_shards % FLAGS.num_threads assert _is_valid_num_shards(FLAGS.train_shards), ( "Please make the FLAGS.num_threads commensurate with FLAGS.train_shards" ) assert _is_valid_num_shards(FLAGS.val_shards), ( "Please make the FLAGS.num_threads commensurate with FLAGS.val_shards") assert _is_valid_num_shards(FLAGS.test_shards), ( "Please make the FLAGS.num_threads commensurate with FLAGS.test_shards" ) # Create vocabulary from the glove embeddings. attribute_map = get_visual_attributes() if not tf.gfile.IsDirectory(FLAGS.output_dir): tf.gfile.MakeDirs(FLAGS.output_dir) # Load image metadata from caption files. mscoco_train_dataset = _load_and_process_metadata( FLAGS.train_captions_file, FLAGS.train_image_dir) mscoco_val_dataset = _load_and_process_metadata(FLAGS.val_captions_file, FLAGS.val_image_dir) # Redistribute the MSCOCO data as follows: # train_dataset = 99% of mscoco_train_dataset # val_dataset = 1% of mscoco_train_dataset (for validation during training). # test_dataset = 100% of mscoco_val_dataset (for final evaluation). train_cutoff = int(0.99 * len(mscoco_train_dataset)) train_dataset = mscoco_train_dataset[:train_cutoff] val_dataset = mscoco_train_dataset[train_cutoff:] test_dataset = mscoco_val_dataset # If needed crop the dataset to make it smaller max_train_size = len(train_dataset) if FLAGS.train_dataset_size < max_train_size: # Shuffle the ordering of images. Make the randomization repeatable. random.seed(12345) random.shuffle(train_dataset) train_dataset = train_dataset[:FLAGS.train_dataset_size] max_val_size = len(val_dataset) if FLAGS.val_dataset_size < max_val_size: # Shuffle the ordering of images. Make the randomization repeatable. random.seed(12345) random.shuffle(val_dataset) val_dataset = val_dataset[:FLAGS.val_dataset_size] max_test_size = len(test_dataset) if FLAGS.test_dataset_size < max_test_size: # Shuffle the ordering of images. Make the randomization repeatable. random.seed(12345) random.shuffle(test_dataset) test_dataset = test_dataset[:FLAGS.test_dataset_size] # Create the model to extract image features image_tensor = tf.placeholder( tf.float32, name='image_tensor', shape=[None, FLAGS.image_height, FLAGS.image_width, 3]) feature_extractor = FeatureExtractor(is_training=False, global_pool=False) # Compute the ResNet-101 features image_features = tf.reduce_mean(feature_extractor(image_tensor), [1, 2]) with tf.Session() as sess: resnet_saver = tf.train.Saver(var_list=feature_extractor.variables) resnet_saver.restore(sess, get_resnet_v2_101_checkpoint()) lock = threading.Lock() def run_model_fn(images): lock.acquire() r = sess.run(image_features, feed_dict={image_tensor: images}) lock.release() return r _process_dataset("train", train_dataset, attribute_map, FLAGS.train_shards, run_model_fn) _process_dataset("val", val_dataset, attribute_map, FLAGS.val_shards, run_model_fn) _process_dataset("test", test_dataset, attribute_map, FLAGS.test_shards, run_model_fn)
def main(unused_argv): attribute_map = get_visual_attributes() with tf.Graph().as_default(): (image_id, image_features, object_features, input_seq, target_seq, indicator, attributes) = import_mscoco(mode="train", batch_size=FLAGS.batch_size, num_epochs=FLAGS.num_epochs, is_mini=FLAGS.is_mini) attribute_detector = AttributeDetector(1000) logits, image_detections, object_detections = attribute_detector( image_features, object_features) tf.losses.sigmoid_cross_entropy(attributes, logits) loss = tf.losses.get_total_loss() global_step = tf.train.get_or_create_global_step() optimizer = tf.train.AdamOptimizer() learning_step = optimizer.minimize( loss, var_list=attribute_detector.variables, global_step=global_step) captioner_saver = tf.train.Saver( var_list=attribute_detector.variables + [global_step]) captioner_ckpt, captioner_ckpt_name = get_attribute_detector_checkpoint( ) with tf.Session() as sess: sess.run(tf.variables_initializer(optimizer.variables())) if captioner_ckpt is not None: captioner_saver.restore(sess, captioner_ckpt) else: sess.run( tf.variables_initializer(attribute_detector.variables + [global_step])) captioner_saver.save(sess, captioner_ckpt_name, global_step=global_step) last_save = time.time() for i in itertools.count(): time_start = time.time() try: _attributes, _detections, _loss, _ = sess.run( [attributes, image_detections, loss, learning_step]) except: break iteration = sess.run(global_step) ground_truth_ids = np.where(_attributes[0, :] > 0.5) print( PRINT_STRING.format( FLAGS.batch_size / (time.time() - time_start), iteration, _loss, str( attribute_map.id_to_word( _detections[0, :].tolist())), str( attribute_map.id_to_word( ground_truth_ids[0].tolist())), )) new_save = time.time() if new_save - last_save > 3600: # save the model every hour captioner_saver.save(sess, captioner_ckpt_name, global_step=global_step) last_save = new_save captioner_saver.save(sess, captioner_ckpt_name, global_step=global_step) print("Finishing training.")
from detailed_captioning.inputs.captions_and_attributes import import_mscoco PRINT_STRING = """({3:.2f} img/sec) iteration: {0:05d}\n caption: {1}\n label: {2}""" tf.logging.set_verbosity(tf.logging.INFO) tf.flags.DEFINE_integer("batch_size", 1, "") tf.flags.DEFINE_integer("beam_size", 3, "") tf.flags.DEFINE_boolean("is_mini", False, "") tf.flags.DEFINE_string("mode", "eval", "") FLAGS = tf.flags.FLAGS if __name__ == "__main__": vocab, pretrained_matrix = load_glove(vocab_size=100000, embedding_size=300) attribute_map = get_visual_attributes() attribute_to_word_lookup_table = vocab.word_to_id(attribute_map.reverse_vocab) with tf.Graph().as_default(): (image_id, image_features, object_features, input_seq, target_seq, indicator, attributes) = import_mscoco( mode=FLAGS.mode, batch_size=FLAGS.batch_size, num_epochs=1, is_mini=FLAGS.is_mini) attribute_detector = AttributeDetector(1000) _, image_attributes, object_attributes = attribute_detector(image_features, object_features) grounded_attribute_cell = GroundedAttributeCell(1024) attribute_captioner = AttributeCaptioner(grounded_attribute_cell, vocab, pretrained_matrix, attribute_to_word_lookup_table, trainable=False, beam_size=FLAGS.beam_size)