Esempio n. 1
0
def initImg2TxtWorker(listenQ, sendQ, checkpointPath, vocabularyPath):
    from Demo.Img2Txt.im2txt.im2txt.inference_utils import caption_generator
    from Demo.Img2Txt.im2txt.im2txt.inference_utils import vocabulary
    from Demo.Img2Txt.im2txt.im2txt import inference_wrapper
    from Demo.Img2Txt.im2txt.im2txt import configuration

    import tensorflow as tf
    print("Starting Img2Txt model...")
    print("Using checkpoint:", checkpointPath)
    # Build the inference graph.
    g = tf.Graph()
    with g.as_default():
        model = inference_wrapper.InferenceWrapper()
        restore_fn = model.build_graph_from_config(configuration.ModelConfig(),
                                                   checkpointPath)
    g.finalize()

    vocab = vocabulary.Vocabulary(vocabularyPath)  # Init the vocabulary
    with tf.Session(graph=g) as sess:
        restore_fn(sess)  # Load the model from checkpoint.

        # Prepare the caption generator. Here we are implicitly using the default
        # beam search parameters. See caption_generator.py for a description of the
        # available beam search parameters.
        generator = caption_generator.CaptionGenerator(model, vocab)
        img2TxtPredictionCycle(sess, generator, vocab, listenQ, sendQ)
def run():
    """Runs evaluation in a loop, and logs summaries to TensorBoard."""
    # Create the evaluation directory if it doesn't exist.
    eval_dir = FLAGS.eval_dir
    if not tf.gfile.IsDirectory(eval_dir):
        tf.logging.info("Creating eval directory: %s", eval_dir)
        tf.gfile.MakeDirs(eval_dir)

    g = tf.Graph()
    with g.as_default():
        # Build the model for evaluation.
        model_config = configuration.ModelConfig()
        model_config.input_file_pattern = FLAGS.input_file_pattern
        model = show_and_tell_model.ShowAndTellModel(model_config, mode="eval")
        model.build()

        # Create the Saver to restore model Variables.
        saver = tf.train.Saver()

        # Create the summary operation and the summary writer.
        summary_op = tf.summary.merge_all()
        summary_writer = tf.summary.FileWriter(eval_dir)

        g.finalize()

        # Run a new evaluation run every eval_interval_secs.
        while True:
            start = time.time()
            tf.logging.info(
                "Starting evaluation at " +
                time.strftime("%Y-%m-%d-%H:%M:%S", time.localtime()))
            run_once(model, saver, summary_writer, summary_op)
            time_to_next_eval = start + FLAGS.eval_interval_secs - time.time()
            if time_to_next_eval > 0:
                time.sleep(time_to_next_eval)
Esempio n. 3
0
def mkPredictor():
    import tensorflow as tf
    from Demo.Img2Txt.im2txt.im2txt.inference_utils import caption_generator
    from Demo.Img2Txt.im2txt.im2txt.inference_utils import vocabulary
    from Demo.Img2Txt.im2txt.im2txt import inference_wrapper
    from Demo.Img2Txt.im2txt.im2txt import configuration

    # Init the vocabulary
    vocab = vocabulary.Vocabulary(VOCABULARY_PATH)

    # Build the graph
    import tensorflow as tf
    g = tf.Graph()
    with g.as_default():
        model = inference_wrapper.InferenceWrapper()
        restore_fn = model.build_graph_from_config(configuration.ModelConfig(),
                                                   CHECKPOINT_PATH)
    g.finalize()

    # Prepare a session object
    sess = tf.Session(graph=g)
    restore_fn(sess)  # Load the model from checkpoint.
    generator = caption_generator.CaptionGenerator(model, vocab)

    def predict(image):
        image = cv2.imencode('.jpg', image)[1].tobytes()
        for caption in generator.beam_search(sess, image):
            # Ignore begin and end words.
            sentence = [vocab.id_to_word(w) for w in caption.sentence[1:-1]]
            yield " ".join(sentence)

    return predict
def main():
    # Build the inference graph.
    g = tf.Graph()
    with g.as_default():
        model = inference_wrapper.InferenceWrapper()
        restore_fn = model.build_graph_from_config(configuration.ModelConfig(),
                                                   Constants.CHECKPOINT_PATH)
    g.finalize()

    vocab = vocabulary.Vocabulary(Constants.VOCABULARY_PATH)
    filenames = []

    for file_pattern in IMAGE_FILE.split(","):
        filenames.extend(tf.gfile.Glob(file_pattern))

    tf.logging.info("Running caption generation on %d files matching %s",
                    len(filenames), IMAGE_FILE)

    with tf.Session(graph=g) as sess:
        # Load the model from checkpoint.
        restore_fn(sess)

        # Prepare the caption generator. Here we are implicitly using the default
        # beam search parameters. See caption_generator.py for a description of the
        # available beam search parameters.
        generator = caption_generator.CaptionGenerator(model, vocab)

        for filename in filenames:
            f = open(filename, 'rb')
            image = f.read()

            captions = generator.beam_search(sess, image)
            print("Captions for image %s:" % os.path.basename(filename))
            for i, caption in enumerate(captions):
                # Ignore begin and end words.
                sentence = [
                    vocab.id_to_word(w) for w in caption.sentence[1:-1]
                ]
                sentence = " ".join(sentence)
                print("  %d) %s (p=%f)" %
                      (i, sentence, math.exp(caption.logprob)))
 def setUp(self):
     super(ShowAndTellModelTest, self).setUp()
     self._model_config = configuration.ModelConfig()
Esempio n. 6
0
def main(unused_argv):
    assert FLAGS.input_file_pattern, "--input_file_pattern is required"
    assert FLAGS.train_dir, "--train_dir is required"

    model_config = configuration.ModelConfig()
    model_config.input_file_pattern = FLAGS.input_file_pattern
    model_config.inception_checkpoint_file = FLAGS.inception_checkpoint_file
    training_config = configuration.TrainingConfig()

    # Create training directory.
    train_dir = FLAGS.train_dir
    if not tf.gfile.IsDirectory(train_dir):
        tf.logging.info("Creating training directory: %s", train_dir)
        tf.gfile.MakeDirs(train_dir)

    # Build the TensorFlow graph.
    g = tf.Graph()
    with g.as_default():
        # Build the model.
        model = show_and_tell_model.ShowAndTellModel(
            model_config, mode="train", train_inception=FLAGS.train_inception)
        model.build()

        # Set up the learning rate.
        learning_rate_decay_fn = None
        if FLAGS.train_inception:
            learning_rate = tf.constant(
                training_config.train_inception_learning_rate)
        else:
            learning_rate = tf.constant(training_config.initial_learning_rate)
            if training_config.learning_rate_decay_factor > 0:
                num_batches_per_epoch = (
                    training_config.num_examples_per_epoch /
                    model_config.batch_size)
                decay_steps = int(num_batches_per_epoch *
                                  training_config.num_epochs_per_decay)

                def _learning_rate_decay_fn(learning_rate, global_step):
                    return tf.train.exponential_decay(
                        learning_rate,
                        global_step,
                        decay_steps=decay_steps,
                        decay_rate=training_config.learning_rate_decay_factor,
                        staircase=True)

                learning_rate_decay_fn = _learning_rate_decay_fn

        # Set up the training ops.
        train_op = tf.contrib.layers.optimize_loss(
            loss=model.total_loss,
            global_step=model.global_step,
            learning_rate=learning_rate,
            optimizer=training_config.optimizer,
            clip_gradients=training_config.clip_gradients,
            learning_rate_decay_fn=learning_rate_decay_fn)

        # Set up the Saver for saving and restoring model checkpoints.
        saver = tf.train.Saver(
            max_to_keep=training_config.max_checkpoints_to_keep)

    # Run training.
    tf.contrib.slim.learning.train(train_op,
                                   train_dir,
                                   log_every_n_steps=FLAGS.log_every_n_steps,
                                   graph=g,
                                   global_step=model.global_step,
                                   number_of_steps=FLAGS.number_of_steps,
                                   init_fn=model.init_fn,
                                   saver=saver)