コード例 #1
0
ファイル: evaluate.py プロジェクト: ziyubiti/models
def run():
    """Runs evaluation in a loop, and logs summaries to TensorBoard."""
    # Create the evaluation directory if it doesn't exist.
    eval_dir = FLAGS.eval_dir
    if not tf.gfile.IsDirectory(eval_dir):
        tf.logging.info("Creating eval directory: %s", eval_dir)
        tf.gfile.MakeDirs(eval_dir)

    g = tf.Graph()
    with g.as_default():
        # Build the model for evaluation.
        model_config = configuration.ModelConfig()
        model_config.input_file_pattern = FLAGS.input_file_pattern
        model = show_and_tell_model.ShowAndTellModel(model_config, mode="eval")
        model.build()

        # Create the summary operation and the summary writer.
        summary_op = tf.merge_all_summaries()
        summary_writer = tf.train.SummaryWriter(eval_dir)

        g.finalize()

        # Run a new evaluation run every eval_interval_secs.
        while True:
            start = time.time()
            tf.logging.info(
                "Starting evaluation at " +
                time.strftime("%Y-%m-%d-%H:%M:%S", time.localtime()))
            run_once(model, summary_writer, summary_op)
            time_to_next_eval = start + FLAGS.eval_interval_secs - time.time()
            if time_to_next_eval > 0:
                time.sleep(time_to_next_eval)
コード例 #2
0
 def build_model(self,
                 model_config,
                 image_raw_feed=None,
                 input_feed=None,
                 mask_feed=None):
     model = show_and_tell_model.ShowAndTellModel(model_config,
                                                  mode="attack")
     model.build(image_raw_feed, input_feed, mask_feed)
     self.model = model
     return model
コード例 #3
0
def run_caption(checkpoint_path, filenames, heuristic_amount):
    g = tf.Graph()
    with g.as_default():
        # Build the model for evaluation.
        model_config = configuration.ModelConfig()
        model = show_and_tell_model.ShowAndTellModel(model_config,
                                                     mode="inference")
        model.build()
        # Create the Saver to restore model Variables.
        saver = tf.train.Saver()
        g.finalize()

    def _restore_fn(sess):
        tf.logging.info("Loading model from checkpoint: %s", checkpoint_path)
        saver.restore(sess, checkpoint_path)
        tf.logging.info("Successfully loaded checkpoint: %s",
                        os.path.basename(checkpoint_path))

    restore_fn = _restore_fn

    # Create the vocabulary.
    vocab = glove.load(model_config.config)[0]

    run_results = []
    with tf.Session(graph=g) as sess:
        # Load the model from checkpoint.
        restore_fn(sess)

        # Prepare the list of image bytes for evaluation.
        images = []
        for filename in filenames:
            with tf.gfile.GFile(filename, "rb") as f:
                images.append(f.read())
        captions = [
            sess.run(model.final_seqs,
                     feed_dict={
                         "image_feed:0": img,
                         model.heuristic_temperature: heuristic_amount
                     }) for img in images
        ]

        for i, caption in enumerate(captions):
            run_results.append({"filename": filenames[i], "captions": []})
            for j in range(caption.shape[1]):
                # Ignore begin and end words.
                c = caption[0, j, :].tolist()
                sentence = [vocab.id_to_word(w) for w in c[:-1]]
                sentence = " ".join(sentence)
                run_results[i]["captions"].append(sentence)

    return run_results
コード例 #4
0
def run():
    """Runs evaluation in a loop, and logs summaries to TensorBoard."""
    # Create the evaluation directory if it doesn't exist.
    eval_dir = FLAGS.eval_dir
    if not tf.gfile.IsDirectory(eval_dir):
        tf.logging.info("Creating eval directory: %s", eval_dir)
        tf.gfile.MakeDirs(eval_dir)

    g = tf.Graph()
    with g.as_default():
        # Build the model for evaluation.
        model_config = configuration.ModelConfig()
        model_config.input_file_pattern = FLAGS.input_file_pattern
        model_config.CNN_name = FLAGS.CNN_name
        # 若FLAGS.batch_size设置了,则以运行时的设置为准,否则以configuration.py中设置为准
        if FLAGS.batch_size > 0:
            model_config.batch_size = FLAGS.batch_size

        model = show_and_tell_model.ShowAndTellModel(model_config, mode="eval")
        model.build()

        # Create the Saver to restore model Variables.
        saver = tf.train.Saver()

        # Create the summary operation and the summary writer.
        summary_op = tf.summary.merge_all()
        summary_writer = tf.summary.FileWriter(eval_dir)

        g.finalize()

        eval_once = (FLAGS.eval_interval_secs == 0)
        # Run a new evaluation run every eval_interval_secs.
        while True:
            start = time.time()
            tf.logging.info(
                "Starting evaluation at " +
                time.strftime("%Y-%m-%d-%H:%M:%S", time.localtime()))
            run_once(model, saver, summary_writer, summary_op)
            if eval_once:
                tf.logging.info("Just eval once at this time!")
                break
            time_to_next_eval = start + FLAGS.eval_interval_secs - time.time()
            if time_to_next_eval > 0:
                time.sleep(time_to_next_eval)
コード例 #5
0
ファイル: inference_wrapper.py プロジェクト: hologerry/im2txt
 def build_model(self, model_config):
     model = show_and_tell_model.ShowAndTellModel(model_config,
                                                  mode="inference")
     model.build()
     return model
コード例 #6
0
def main(unused_argv):
    assert FLAGS.input_file_pattern, "--input_file_pattern is required"
    assert FLAGS.train_dir, "--train_dir is required"

    model_config = configuration.ModelConfig()
    model_config.input_file_pattern = FLAGS.input_file_pattern
    model_config.inception_checkpoint_file = FLAGS.inception_checkpoint_file
    training_config = configuration.TrainingConfig()

    # Create training directory.
    train_dir = FLAGS.train_dir
    if not tf.gfile.IsDirectory(train_dir):
        tf.logging.info("Creating training directory: %s", train_dir)
        tf.gfile.MakeDirs(train_dir)

    # Build the TensorFlow graph.
    g = tf.Graph()
    with g.as_default():
        # Build the model.
        model = show_and_tell_model.ShowAndTellModel(
            model_config, mode="train", train_inception=FLAGS.train_inception)
        model.build()

        # Set up the learning rate.
        learning_rate_decay_fn = None
        if FLAGS.train_inception:
            learning_rate = tf.constant(
                training_config.train_inception_learning_rate)
        else:
            learning_rate = tf.constant(training_config.initial_learning_rate)
            if training_config.learning_rate_decay_factor > 0:
                num_batches_per_epoch = (
                    training_config.num_examples_per_epoch /
                    model_config.batch_size)
                decay_steps = int(num_batches_per_epoch *
                                  training_config.num_epochs_per_decay)

                def _learning_rate_decay_fn(learning_rate, global_step):
                    return tf.train.exponential_decay(
                        learning_rate,
                        global_step,
                        decay_steps=decay_steps,
                        decay_rate=training_config.learning_rate_decay_factor,
                        staircase=True)

                learning_rate_decay_fn = _learning_rate_decay_fn

        # Set up the training ops.
        train_op = tf.contrib.layers.optimize_loss(
            loss=model.total_loss,
            global_step=model.global_step,
            learning_rate=learning_rate,
            optimizer=training_config.optimizer,
            clip_gradients=training_config.clip_gradients,
            learning_rate_decay_fn=learning_rate_decay_fn)

        # Set up the Saver for saving and restoring model checkpoints.
        saver = tf.train.Saver(
            max_to_keep=training_config.max_checkpoints_to_keep)

    # Run training.
    tf.contrib.slim.learning.train(train_op,
                                   train_dir,
                                   log_every_n_steps=FLAGS.log_every_n_steps,
                                   graph=g,
                                   global_step=model.global_step,
                                   number_of_steps=FLAGS.number_of_steps,
                                   init_fn=model.init_fn,
                                   saver=saver)
コード例 #7
0
 def build_model(self, model_config):
   model = show_and_tell_model.ShowAndTellModel(model_config, mode="saliency")
   model.config.batch_size = self.batch_size
   model.build()
   return model
コード例 #8
0
def main(unused_argv):
    assert FLAGS.input_file_pattern, "--input_file_pattern is required"
    assert FLAGS.train_dir, "--train_dir is required"

    model_config = configuration.ModelConfig()
    model_config.input_file_pattern = FLAGS.input_file_pattern
    model_config.image_keys = [model_config.image_feature_name]

    #make sure we have the right batch size
    if FLAGS.train_inception:
        assert FLAGS.batch_size == 8
    else:
        assert FLAGS.batch_size == 32
    if FLAGS.two_input_queues:
        FLAGS.batch_size = int(FLAGS.batch_size / 2)
    model_config.batch_size = FLAGS.batch_size

    #assert all batch sizes are right
    #set flags if you are training with blocked image
    if FLAGS.blocked_image:
        assert FLAGS.blocked_input_file_pattern, "--blocked_input_file_pattern is required if you would like to train with blocked images"
        model_config.blocked_input_file_pattern = FLAGS.blocked_input_file_pattern
        model_config.image_keys.append(model_config.blocked_image_feature_name)
    if FLAGS.two_input_queues:
        assert FLAGS.input_file_pattern2, "--input_file_pattern2 is required if you would like to train with two input queues"
        model_config.blocked_input_file_pattern = FLAGS.input_file_pattern2
        model_config.image_keys.append(model_config.image_feature_name)
    model_config.inception_checkpoint_file = FLAGS.inception_checkpoint_file
    training_config = configuration.TrainingConfig()

    # Create training directory.
    train_dir = FLAGS.train_dir
    if not tf.gfile.IsDirectory(train_dir):
        tf.logging.info("Creating training directory: %s", train_dir)
        tf.gfile.MakeDirs(train_dir)
    print('graph')
    #go from flags to dict
    g = tf.Graph()
    with g.as_default():
        # Build the model.
        if not isinstance(
                FLAGS.__flags['init_from'], str
        ):  #Tensorflow likes to change random things for different releases.  One random thing it likes to change is FLAGS.  This code takes care of that *sight*
            flag_dict = {}
            for key in FLAGS.__flags.keys():
                flag_dict[key] = FLAGS.__flags[key].value
        else:
            flag_dict = FLAGS.__flags

        model = show_and_tell_model.ShowAndTellModel(
            model_config,
            mode="train",
            train_inception=FLAGS.train_inception,
            flags=flag_dict
        )  #let's just pass in all the flags bc this is going to get annoying
        model.build()

        # Set up the learning rate.
        learning_rate_decay_fn = None
        if FLAGS.train_inception:
            learning_rate = tf.constant(
                training_config.train_inception_learning_rate)
        else:
            learning_rate = tf.constant(training_config.initial_learning_rate)
            if training_config.learning_rate_decay_factor > 0:
                num_batches_per_epoch = (
                    training_config.num_examples_per_epoch /
                    model_config.batch_size)
                decay_steps = int(num_batches_per_epoch *
                                  training_config.num_epochs_per_decay)

                def _learning_rate_decay_fn(learning_rate, global_step):
                    return tf.train.exponential_decay(
                        learning_rate,
                        global_step,
                        decay_steps=decay_steps,
                        decay_rate=training_config.learning_rate_decay_factor,
                        staircase=True)

                learning_rate_decay_fn = _learning_rate_decay_fn

        # Set up the training ops.
        train_op = tf.contrib.layers.optimize_loss(
            loss=model.total_loss,
            global_step=model.global_step,
            learning_rate=learning_rate,
            optimizer=training_config.optimizer,
            clip_gradients=training_config.clip_gradients,
            learning_rate_decay_fn=learning_rate_decay_fn)

        # Set up the Saver for saving and restoring model checkpoints.
        saver = tf.train.Saver(
            max_to_keep=training_config.max_checkpoints_to_keep)

        if FLAGS.init_from:
            inception_restore = model.init_fn

            def restore_full_model(sess):
                print("restoring full model")
                inception_restore(sess)
                saver.restore(sess, FLAGS.init_from)

            model.init_fn = restore_full_model
    print('train')
    # Run training.
    if FLAGS.debug:
        tf.contrib.slim.learning.train(
            train_op,
            train_dir,
            log_every_n_steps=FLAGS.log_every_n_steps,
            graph=g,
            global_step=model.global_step,
            number_of_steps=FLAGS.number_of_steps,
            init_fn=model.init_fn,
            saver=saver,
            session_wrapper=tf_debug.LocalCLIDebugWrapperSession)
    else:
        tf.contrib.slim.learning.train(
            train_op,
            train_dir,
            log_every_n_steps=FLAGS.log_every_n_steps,
            graph=g,
            global_step=model.global_step,
            number_of_steps=FLAGS.number_of_steps,
            init_fn=model.init_fn,
            saver=saver)
コード例 #9
0
def run_caption(checkpoint_path, attrigram_checkpoint_path, filenames,
                divisions):
    g = tf.Graph()
    with g.as_default():
        # Build the model for evaluation.
        model_config = configuration.ModelConfig()
        model = show_and_tell_model.ShowAndTellModel(model_config,
                                                     mode="inference")
        model.use_style = True
        model.build()
        # Create the Saver to restore model Variables.
        saver = tf.train.Saver(var_list=model.model_variables)
        g.finalize()

    def _restore_fn(sess):
        tf.logging.info("Loading model from checkpoint: %s", checkpoint_path)
        saver.restore(sess, checkpoint_path)
        tf.logging.info("Successfully loaded checkpoint: %s",
                        os.path.basename(checkpoint_path))

    restore_fn = _restore_fn

    # Create the vocabulary.
    vocab = glove.load(model_config.config)[0]

    run_results = []
    with tf.Session(graph=g) as sess:
        # Load the model from checkpoint.
        restore_fn(sess)

        # Run the attribute probabilities
        ps = run_attributes.run_attributes(attrigram_checkpoint_path,
                                           filenames, divisions)

        # Prepare the list of image bytes for evaluation.
        images = []
        for filename in filenames:
            with tf.gfile.GFile(filename, "rb") as f:
                images.append(f.read())

        captions = []
        for img, p in zip(images, ps):
            input_feed = {
                "image_feed:0": img,
                model.attribute_probabilities: [p["probabilities"]]
            }
            sess.run(model.assign_initial_states, feed_dict=input_feed)

            # The original caption
            img_progress = [sess.run(model.style_seqs, feed_dict=input_feed)]

            # Perform style transfer
            for x in range(10):
                sess.run(model.descend_style, feed_dict=input_feed)
                caption = sess.run(model.style_seqs, feed_dict=input_feed)
                img_progress.append(caption)
            captions.append(img_progress)

        for i, caption in enumerate(captions):
            run_results.append({"filename": filenames[i], "captions": []})
            for flow in caption:
                for j in range(flow.shape[0]):
                    # Ignore begin and end words.
                    c = flow[j, 0, :].tolist()
                    sentence = [vocab.id_to_word(w) for w in c[:-1]]
                    sentence = " ".join(sentence)
                    run_results[i]["captions"].append(sentence)

    return run_results
コード例 #10
0
tf.logging.set_verbosity(tf.logging.INFO)

assert FLAGS.input_file_pattern, "--input_file_pattern is required"
assert FLAGS.train_dir, "--train_dir is required"

model_config = configuration.ModelConfig()
model_config.input_file_pattern = FLAGS.input_file_pattern
model_config.inception_checkpoint_file = FLAGS.inception_checkpoint_file

training_config = configuration.TrainingConfig()

g = tf.Graph()
with g.as_default():
    # build the model
    model = show_and_tell_model.ShowAndTellModel(
        model_config, mode='train', train_inception=FLAGS.train_inception)
    model.build()

    # set up the learning rate
    learning_rate_decay_fn = None
    if FLAGS.train_inception:
        learning_rate = tf.constant(
            training_config.train_inception_learning_rate)
    else:
        learning_rate = tf.constant(training_config.initial_learning_rate)

        if training_config.learning_rate_decay_factor > 0:
            num_batches_per_epoch = training_config.num_examples_per_epoch / model_config.batch_size
            decay_steps = int(num_batches_per_epoch *
                              training_config.num_epochs_per_decay)
コード例 #11
0
def train(number_of_steps):
    model_config = configuration.ModelConfig()
    model_config.input_file_pattern = FLAGS.input_file_pattern
    training_config = configuration.TrainingConfig()
    model_config.inception_checkpoint_file = FLAGS.inception_checkpoint_file

    # Create training directory.
    train_dir = FLAGS.train_dir
    # if not tf.gfile.IsDirectory(train_dir):
    #   tf.logging.info("Creating training directory: %s", train_dir)
    #   tf.gfile.MakeDirs(train_dir)

    # Build the TensorFlow graph.
    g = tf.Graph()
    with g.as_default():
        # Build the model.
        model = show_and_tell_model.ShowAndTellModel(
            model_config, mode="train", train_inception=FLAGS.train_inception)
        model.build()

        # Set up the learning rate.
        learning_rate_decay_fn = None
        if FLAGS.train_inception:
            print(
                "The inception weights are fine-tuned together with weights in the LSTM units and word embeddings."
            )
            learning_rate = tf.constant(
                training_config.train_inception_learning_rate)
        else:
            print(
                "The inception weights are frozen. Only weights in the LSTMs and word embeddings are randomly"
                "initialized and trained.")
            learning_rate = tf.constant(training_config.initial_learning_rate)
            if training_config.learning_rate_decay_factor > 0:
                num_batches_per_epoch = (
                    training_config.num_examples_per_epoch /
                    model_config.batch_size)
                decay_steps = int(num_batches_per_epoch *
                                  training_config.num_epochs_per_decay)

                def _learning_rate_decay_fn(learning_rate, global_step):
                    return tf.train.exponential_decay(
                        learning_rate,
                        global_step,
                        decay_steps=decay_steps,
                        decay_rate=training_config.learning_rate_decay_factor,
                        staircase=True)

                learning_rate_decay_fn = _learning_rate_decay_fn

        # Set up the training ops.
        train_op = tf.contrib.layers.optimize_loss(
            loss=model.total_loss,
            global_step=model.global_step,
            learning_rate=learning_rate,
            optimizer=training_config.optimizer,
            clip_gradients=training_config.clip_gradients,
            learning_rate_decay_fn=learning_rate_decay_fn)

        saver = tf.train.Saver(keep_checkpoint_every_n_hours=0.5)
        # saver = tf.train.Saver(max_to_keep=training_config.max_checkpoints_to_keep)

    # Run training.
    tf.contrib.slim.learning.train(train_op,
                                   train_dir,
                                   log_every_n_steps=FLAGS.log_every_n_steps,
                                   graph=g,
                                   global_step=model.global_step,
                                   number_of_steps=number_of_steps,
                                   init_fn=model.init_fn,
                                   saver=saver)
コード例 #12
0
 def build_model_eval(self, model_config):
     model = show_and_tell_model.ShowAndTellModel(model_config, mode="eval")
     model.build()
     self.model = model
     return model
コード例 #13
0
 def build_model(self, model_config):
     model = show_and_tell_model.ShowAndTellModel(model_config,
                                                  mode="inference")
     model.build()
     self.global_stepp = model.global_step
     return model