Beispiel #1
0
            captioner_saver.restore(sess, captioner_ckpt)
            used_ids = set()
            json_dump = []

            for i in itertools.count():
                time_start = time.time()
                try:
                    _ids, _target_seq, _image_id = sess.run(
                        [ids, target_seq, image_id])
                except:
                    break
                the_captions = recursive_ids_to_string(_ids[:, 0, :].tolist(),
                                                       vocab)
                the_labels = recursive_ids_to_string(
                    _target_seq[:, :].tolist(), vocab)
                the_image_ids = _image_id.tolist()
                for j, x, y in zip(the_image_ids, the_captions, the_labels):
                    if not j in used_ids:
                        used_ids.add(j)
                        json_dump.append({"image_id": j, "caption": x})
                print(
                    PRINT_STRING.format(
                        i, the_captions[0], the_labels[0],
                        FLAGS.batch_size / (time.time() - time_start)))

            print("Finishing evaluating.")
            evaluate(FLAGS.mode, json_dump,
                     captioner_ckpt_name.replace("model.ckpt", ""),
                     (get_train_annotations_file() if FLAGS.mode
                      in ["train", "eval"] else get_val_annotations_file()))
        with tf.Session() as sess:

            assert(captioner_ckpt is not None)
            captioner_saver.restore(sess, captioner_ckpt)
            used_ids = set()
            json_dump = []

            for i in itertools.count():
                time_start = time.time()
                try:
                    _ids, _target_seq, _image_id = sess.run([word_ids, target_seq, image_id])
                except:
                    break
                the_captions = recursive_ids_to_string(_ids[:, 0, :].tolist(), vocab)
                the_labels = recursive_ids_to_string(_target_seq[:, :].tolist(), vocab)
                the_image_ids = _image_id.tolist()
                for j, x, y in zip(the_image_ids, the_captions, the_labels):
                    if not j in used_ids:
                        used_ids.add(j)
                        json_dump.append({"image_id": j, "caption": x})
                print(PRINT_STRING.format(i, the_captions[0], the_labels[0], 
                    FLAGS.batch_size / (time.time() - time_start))) 

            print("Finishing evaluating.")
            evaluate(
                FLAGS.mode,
                json_dump, 
                captioner_ckpt_name.replace("model.ckpt", ""), 
                (get_train_annotations_file() if FLAGS.mode in ["train", "eval"] 
                    else get_val_annotations_file()))