예제 #1
0
def main(unused_argv):
    # Set up the model config.
    model_config = configuration.model_config(
        input_file_pattern=FLAGS.input_file_pattern,
        input_queue_capacity=FLAGS.num_eval_examples,
        shuffle_input_data=False)
    if FLAGS.model_config_overrides:
        model_config.parse_json(FLAGS.model_config_overrides)
    config_json = json.dumps(model_config.values(), indent=2)
    tf.logging.info("model_config: %s", config_json)

    with tf.Graph().as_default():
        # Build the model for evaluation.
        model = skip_thoughts_model.SkipThoughtsModel(model_config,
                                                      mode="eval")
        model.build()

        evaluation.evaluate_repeatedly(
            model=model,
            checkpoint_dir=FLAGS.checkpoint_dir,
            eval_dir=FLAGS.eval_dir,
            num_eval_examples=FLAGS.num_eval_examples,
            min_global_step_for_perplexity=FLAGS.min_global_step,
            master=FLAGS.master,
            eval_interval_secs=FLAGS.eval_interval_secs)
    def build_graph_from_config(self, model_config, checkpoint_path):
        """Builds the inference graph from a configuration object.

    Args:
      model_config: Object containing configuration for building the model.
      checkpoint_path: Checkpoint file or a directory containing a checkpoint
        file.

    Returns:
      restore_fn: A function such that restore_fn(sess) loads model variables
        from the checkpoint file.
    """
        tf.logging.info("Building model.")
        model = skip_thoughts_model.SkipThoughtsModel(model_config,
                                                      mode="encode")
        model.build()
        variables = tf.global_variables()
        variables_to_restore = []
        restore_names = get_trainable_vars_fromchpt(checkpoint_path)

        for v in variables:
            if v.name in restore_names:
                variables_to_restore += [v]
                print(v.name, v.name in restore_names)

        saver = tf.train.Saver(variables_to_restore)

        return self._create_restore_fn(checkpoint_path, saver)
def main(unused_argv):
    if not FLAGS.input_file_pattern:
        raise ValueError("--input_file_pattern is required.")
    if not FLAGS.checkpoint_dir:
        raise ValueError("--checkpoint_dir is required.")
    if not FLAGS.eval_dir:
        raise ValueError("--eval_dir is required.")

    # Create the evaluation directory if it doesn't exist.
    eval_dir = FLAGS.eval_dir
    if not tf.gfile.IsDirectory(eval_dir):
        tf.logging.info("Creating eval directory: %s", eval_dir)
        tf.gfile.MakeDirs(eval_dir)

    g = tf.Graph()
    with g.as_default():
        # Build the model for evaluation.
        model_config = configuration.model_config(
            input_file_pattern=FLAGS.input_file_pattern,
            input_queue_capacity=FLAGS.num_eval_examples,
            shuffle_input_data=False)
        model = skip_thoughts_model.SkipThoughtsModel(model_config,
                                                      mode="eval")
        model.build()

        losses = tf.concat(model.target_cross_entropy_losses, 0)
        weights = tf.concat(model.target_cross_entropy_loss_weights, 0)

        # Create the Saver to restore model Variables.
        saver = tf.train.Saver()

        # Create the summary operation and the summary writer.
        summary_op = tf.summary.merge_all()
        summary_writer = tf.summary.FileWriter(eval_dir)

        g.finalize()

        # Run a new evaluation run every eval_interval_secs.
        while True:
            start = time.time()
            tf.logging.info(
                "Starting evaluation at " +
                time.strftime("%Y-%m-%d-%H:%M:%S", time.localtime()))
            run_once(model, losses, weights, saver, summary_writer, summary_op)
            time_to_next_eval = start + FLAGS.eval_interval_secs - time.time()
            if time_to_next_eval > 0:
                time.sleep(time_to_next_eval)
    def build_graph_from_config(self, model_config, checkpoint_path):
        """Builds the inference graph from a configuration object.

        Args:
            model_config: Object containing configuration for building the model.
            checkpoint_path: Checkpoint file or a directory containing a checkpoint
                file.

        Returns:
            restore_fn: A function such that restore_fn(sess) loads model variables
                from the checkpoint file.
        """
        tf.logging.info("Building model.")
        model = skip_thoughts_model.SkipThoughtsModel(model_config, mode="encode")
        model.build()
        saver = tf.train.Saver()

        return self._create_restore_fn(checkpoint_path, saver)
예제 #5
0
def main(unused_argv):
    if not FLAGS.input_file_pattern:
        raise ValueError("--input_file_pattern is required.")
    if not FLAGS.train_dir:
        raise ValueError("--train_dir is required.")

    model_config = configuration.model_config(
        input_file_pattern=FLAGS.input_file_pattern)
    training_config = configuration.training_config()

    tf.logging.info("Building training graph.")
    g = tf.Graph()
    with g.as_default():
        model = skip_thoughts_model.SkipThoughtsModel(model_config,
                                                      mode="train")
        model.build()

        learning_rate = _setup_learning_rate(training_config,
                                             model.global_step)
        optimizer = tf.train.AdamOptimizer(learning_rate)

        train_tensor = tf.contrib.slim.learning.create_train_op(
            total_loss=model.total_loss,
            optimizer=optimizer,
            global_step=model.global_step,
            clip_gradient_norm=training_config.clip_gradient_norm)

        saver = tf.train.Saver()

    tf.contrib.slim.learning.train(
        train_op=train_tensor,
        logdir=FLAGS.train_dir,
        graph=g,
        global_step=model.global_step,
        number_of_steps=training_config.number_of_steps,
        save_summaries_secs=training_config.save_summaries_secs,
        saver=saver,
        save_interval_secs=training_config.save_model_secs)
예제 #6
0
def main(unused_argv):
    if not FLAGS.train_dir:
        raise ValueError("--train_dir is required.")

    #read_vocab(FLAGS.vocab)
    model_config = configuration.model_config()
    training_config = configuration.training_config()
    ################ define discriminator model ################
    disc_model = Discriminator(sequence_length=MAXLEN,
                               num_classes=1,
                               vocab_size=model_config.vocab_size,
                               embedding_size=model_config.word_embedding_dim,
                               filter_sizes=[1, 2, 3, 4, 5, 7, 10],
                               num_filters=[100, 100, 100, 100, 100, 100, 100])

    ################# define training model #################
    model = skip_thoughts_model.SkipThoughtsModel(model_config, mode="train")
    model.build()
    learning_rate = _setup_learning_rate(training_config, model.global_step)
    optimizer = tf.train.AdamOptimizer(learning_rate)
    variables_to_train = [v for v in tf.trainable_variables()]
    variables_to_restore = [
        v for v in tf.all_variables() if ('discriminator' not in v.name)
    ]

    print(len(variables_to_train))
    train_tensor = tf.contrib.slim.learning.create_train_op(
        total_loss=model.total_loss,
        optimizer=optimizer,
        clip_gradient_norm=training_config.clip_gradient_norm,
        variables_to_train=variables_to_train)

    ######################define target lstm ####################
    #target_lstm = skip_thoughts_model.TargetLSTM(config=model_config)
    #synthesized = True
    target_lstm = None
    synthesized = False
    ################ define testing model ################
    #model_config_test = configuration.model_config()
    #model_test = skip_thoughts_model.SkipThoughtsModel(model_config_test, mode="eval")
    #model_test.build(is_testing=True)

    ################ define savers ################
    reloader = tf.train.Saver(var_list=variables_to_restore)
    reloader_all = tf.train.Saver()
    saver = tf.train.Saver(max_to_keep=1000)
    gpu_config = tf.ConfigProto(gpu_options=tf.GPUOptions(
        per_process_gpu_memory_fraction=1.0, allow_growth=True),
                                allow_soft_placement=True,
                                log_device_placement=False)

    init_op = tf.global_variables_initializer()
    sess = tf.Session(config=gpu_config)
    run_metadata = tf.RunMetadata()
    sess.run(init_op,
             options=tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE,
                                   output_partition_graphs=True),
             run_metadata=run_metadata)
    with open("/tmp/meta.txt", 'w') as f:
        f.write(str(run_metadata))

    if FLAGS.reload_model:
        reloader.restore(sess, FLAGS.reload_model)
    if FLAGS.reload_model_all:
        reloader_all.restore(sess, FLAGS.reload_model_all)

    ################ load training data ##############
    train_data_loader = DataLoader(128)
    train_data_loader.load(FLAGS.train_corpus_en, FLAGS.train_corpus_fr)

    total_loss_sup_list = []
    total_loss_rl_list = []
    bleu_list = []
    fake_list, real_list, neglikely_list = [], [], []

    outf = open(os.path.join(FLAGS.train_dir, 'log.txt'), 'a')
    logf = open(os.path.join(FLAGS.train_dir, 'debug_log.txt'), 'w')

    ############### run training and testing #############
    for i in xrange(1000000):
        model_prefix = ""
        if i < FLAGS.pretrain_G_steps:
            model_prefix = "preG_"
            np_global_step, total_loss_sup, total_loss_rl, avg_bleu, avg_fake, avg_real, avg_neglikely = my_train_step(
                sess,
                train_tensor,
                model,
                train_data_loader,
                logf,
                train_sup=True,
                train_rl=False,
                disc_model=disc_model,
                adjustD=False,
                adjustG=True,
                given_num=MAXLEN)

        elif i < FLAGS.pretrain_G_steps + FLAGS.pretrain_D_steps:
            model_prefix = "preD_"
            np_global_step, total_loss_sup, total_loss_rl, avg_bleu, avg_fake, avg_real, avg_neglikely = my_train_step(
                sess,
                train_tensor,
                model,
                train_data_loader,
                logf,
                train_sup=False,
                train_rl=True,
                disc_model=disc_model,
                adjustD=True,
                adjustG=False,
                given_num=0)

        elif FLAGS.mixer_period and FLAGS.mixer_step and FLAGS.mixer_period > 0:
            gn = default_given_num - (
                i - FLAGS.pretrain_G_steps - FLAGS.pretrain_D_steps
            ) // FLAGS.mixer_period * FLAGS.mixer_step
            if gn < 0: gn = 0
            model_prefix = "mixGN" + str(gn) + "_"
            if i % 10 == 0:
                adjustD = FLAGS.adjustD
            else:
                adjustD = False
            if i % 200 == 0:
                print("gn=", gn)
            np_global_step, total_loss_sup, total_loss_rl, avg_bleu, avg_fake, avg_real, avg_neglikely  = my_train_step( \
                  sess, train_tensor, model, train_data_loader, logf, train_sup=False, train_rl=True, \
                  disc_model=disc_model, adjustD=adjustD, adjustG=FLAGS.adjustG, given_num=gn)

        else:
            model_prefix = ""
            np_global_step, total_loss_sup, total_loss_rl, avg_bleu, avg_fake, avg_real, avg_neglikely = my_train_step(
                sess,
                train_tensor,
                model,
                train_data_loader,
                logf,
                train_sup=False,
                train_rl=True,
                disc_model=disc_model,
                adjustD=FLAGS.adjustD,
                adjustG=FLAGS.adjustG)

        total_loss_sup_list.append(total_loss_sup)
        total_loss_rl_list.append(total_loss_rl)
        fake_list.append(avg_fake)
        real_list.append(avg_real)
        bleu_list.append(avg_bleu)
        neglikely_list.append(avg_neglikely)

        if np_global_step % 2000 == 0:
            saver.save(
                sess,
                os.path.join(FLAGS.train_dir,
                             model_prefix + "model-" + str(np_global_step)))
        if np_global_step % 20 == 0:
            # my_test_step(sess, model_test, FLAGS.test_result+'-'+str(np_global_step))
            print(np_global_step, np.mean(total_loss_sup_list),
                  np.mean(total_loss_rl_list))
            print(np.mean(bleu_list), np.mean(fake_list), np.mean(real_list))
            print(np.mean(neglikely_list))
            outf.write(
                str(np_global_step) + " " + str(np.mean(total_loss_sup_list)) +
                " " + str(np.mean(total_loss_rl_list)) + " " +
                str(np.mean(bleu_list)) + " " + str(np.mean(fake_list)) + " " +
                str(np.mean(real_list)) + " " + str(np.mean(neglikely_list)) +
                "\n")
            total_loss_sup_list, total_loss_rl_list, bleu_list, fake_list, real_list, neglikely_list = [],[],[],[],[],[]
예제 #7
0
def main(unused_argv):
    # Create training directory if it doesn't already exist.
    if not tf.gfile.IsDirectory(FLAGS.train_dir):
        tf.logging.info("Creating training directory: %s", FLAGS.train_dir)
        tf.gfile.MakeDirs(FLAGS.train_dir)

    # Set up the model config.
    model_config = configuration.model_config(
        input_file_pattern=FLAGS.input_file_pattern)
    if FLAGS.model_config_overrides:
        model_config.parse_json(FLAGS.model_config_overrides)
    _log_config(model_config, "model_config")

    # Set up the training config.
    training_config = configuration.training_config()
    if FLAGS.training_config_overrides:
        training_config.parse_json(FLAGS.training_config_overrides)
    _log_config(training_config, "training_config")

    tf.logging.info("Building training graph.")
    g = tf.Graph()
    with g.as_default(), g.device(
            tf.train.replica_device_setter(FLAGS.ps_tasks)):
        # Build the model.
        model = skip_thoughts_model.SkipThoughtsModel(model_config,
                                                      mode="train")
        model.build()

        _log_variable_device_placement()

        hooks = [
            # Stop training if loss is NaN.
            tf.train.NanTensorHook(model.total_loss),
            # Log every training step.
            tf.train.LoggingTensorHook(
                {
                    "global_step": model.global_step,
                    "total_loss": model.total_loss
                },
                every_n_iter=1)
        ]

        # Set up the learning rate and optimizer.
        learning_rate = training.create_learning_rate(training_config,
                                                      model.global_step)
        optimizer = training.create_optimizer(training_config, learning_rate)

        # Set up distributed sync or async training.
        is_chief = (FLAGS.task == 0)
        if FLAGS.sync_replicas:
            optimizer = tf.SyncReplicasOptimizer(
                optimizer,
                replicas_to_aggregate=FLAGS.replicas_to_aggregate,
                total_num_replicas=FLAGS.total_num_replicas)
            hooks.append(optimizer.make_session_run_hook(is_chief))
        else:
            # Startup delay for non-chief asynchronous workers.
            if not is_chief and training_config.startup_delay_steps:
                hooks.append(
                    tf.train.GlobalStepWaiterHook(
                        training_config.startup_delay_steps))

        train_tensor = training.create_train_op(training_config, optimizer,
                                                model)
        keep_every_n = training_config.keep_checkpoint_every_n_hours
        saver = tf.train.Saver(
            max_to_keep=training_config.max_checkpoints_to_keep,
            keep_checkpoint_every_n_hours=keep_every_n,
            save_relative_paths=True)
        scaffold = tf.train.Scaffold(saver=saver)

        # Possibly set a step limit.
        if training_config.number_of_steps:
            hooks.append(
                tf.train.StopAtStepHook(
                    last_step=training_config.number_of_steps))

        # Create the TensorFlow session.
        with tf.train.MonitoredTrainingSession(
                master=FLAGS.master,
                is_chief=is_chief,
                checkpoint_dir=FLAGS.train_dir,
                scaffold=scaffold,
                hooks=hooks,
                save_checkpoint_secs=training_config.save_model_secs,
                save_summaries_steps=None,
                save_summaries_secs=training_config.save_summaries_secs
        ) as sess:

            # Run training.
            while not sess.should_stop():
                sess.run(train_tensor)
예제 #8
0
def main(unused_argv):
  if not FLAGS.input_file_pattern:
    raise ValueError("--input_file_pattern is required.")
  if not FLAGS.train_dir:
    raise ValueError("--train_dir is required.")

  model_config = configuration.model_config(
      input_file_pattern=FLAGS.input_file_pattern, bidirectional_encoder=True)
  training_config = configuration.training_config()

  tf.logging.info("Building training graph.")
  g = tf.Graph()
  with g.as_default():
    model = skip_thoughts_model.SkipThoughtsModel(model_config, mode="train")
    model.build()
     
    encoder_variables = [v for v in tf.global_variables()
                    if v.name.startswith("encoder") and "Adam" not in v.name]
    embedding_variables = [v for v in tf.global_variables()
                           if v.name.startswith("word_embedding") and "Adam" not in v.name]
    print([v.name for v in (encoder_variables+embedding_variables)])

    learning_rate = _setup_learning_rate(training_config, model.global_step)
    optimizer = tf.train.AdamOptimizer(learning_rate)

    
    encoder_mult = 0.1
    embedding_mult = 0.01
    multiply = dict([(v, encoder_mult) for v in encoder_variables] + [(v, embedding_mult) for v in embedding_variables])

    train_tensor = tf.contrib.slim.learning.create_train_op(
        total_loss=model.total_loss,
        optimizer=optimizer,
        gradient_multipliers=multiply,
        global_step=model.global_step,
        clip_gradient_norm=training_config.clip_gradient_norm)

    saver = tf.train.Saver()
    model_path = tf.train.latest_checkpoint(FLAGS.train_dir)
    
    pretrain_saver = tf.train.Saver(encoder_variables+embedding_variables)

  print(model_path)
  if model_path:
    def restore_fn(sess):
       tf.logging.info(
      "Restoring SA&T variables from checkpoint file")
       saver.restore(sess, model_path)
  else:
    def restore_fn(sess):
      tf.logging.info(
        "Restoring SA&T variables from pretrained model")
      #saver.restore(sess, "/home/ubuntu/code/A_skip_thoughts_2/skip_thoughts/model/backup/run1/model.ckpt-2111")
      pretrain_saver.restore(sess, "/home/ubuntu/code/pretrained/bi/model.ckpt-500008")
  
  tf.contrib.slim.learning.train(
      train_op=train_tensor,
      logdir=FLAGS.train_dir,
      graph=g,
      global_step=model.global_step,
      number_of_steps=training_config.number_of_steps,
      save_summaries_secs=training_config.save_summaries_secs,
      saver=saver,
      save_interval_secs=training_config.save_model_secs,
      init_fn = restore_fn)
예제 #9
0
def main(unused_argv):
    if not FLAGS.input_file_pattern:
        raise ValueError("--input_file_pattern is required.")
    if not FLAGS.train_dir:
        raise ValueError("--train_dir is required.")

    model_config = configuration.model_config(
        input_file_pattern=FLAGS.input_file_pattern)
    training_config = configuration.training_config()

    tf.logging.info("Building training graph.")
    g = tf.Graph()
    with g.as_default():
        grads_tower = []
        for dev_ind in range(4):
            with tf.device('/gpu:%d' % dev_ind):
                model = skip_thoughts_model.SkipThoughtsModel(model_config,
                                                              mode="train")
                model.build()

                learning_rate = _setup_learning_rate(training_config,
                                                     model.global_step)
                optimizer = tf.train.AdamOptimizer(learning_rate)

                total_loss = model.total_loss
                # Update ops use GraphKeys.UPDATE_OPS collection if update_ops is None.
                update_ops = set(ops.get_collection(ops.GraphKeys.UPDATE_OPS))

                # Make sure update_ops are computed before total_loss.
                if update_ops:
                    with ops.control_dependencies(update_ops):
                        barrier = control_flow_ops.no_op(name='update_barrier')
                    total_loss = control_flow_ops.with_dependencies([barrier],
                                                                    total_loss)

                variables_to_train = tf_variables.trainable_variables()

                assert variables_to_train

                gate_gradients = tf_optimizer.Optimizer.GATE_OP
                # Create the gradients. Note that apply_gradients adds the gradient
                # computation to the current graph.
                grads = optimizer.compute_gradients(
                    total_loss,
                    variables_to_train,
                    gate_gradients=gate_gradients,
                    aggregation_method=None,
                    colocate_gradients_with_ops=False)

                grads = tf.contrib.slim.learning.clip_gradient_norms(
                    grads, training_config.clip_gradient_norm)

                grads_tower.append(grads)

        avg_grads = average_gradients.average_gradients(grads_tower)
        # Create gradient updates.
        grad_updates = optimizer.apply_gradients(avg_grads,
                                                 global_step=model.global_step)

        with ops.name_scope('train_op'):
            # Make sure total_loss is valid.
            total_loss = array_ops.check_numerics(total_loss,
                                                  'LossTensor is inf or nan')

            # Ensure the train_tensor computes grad_updates.
            train_op = control_flow_ops.with_dependencies([grad_updates],
                                                          total_loss)

        # Add the operation used for training to the 'train_op' collection
        train_ops = ops.get_collection_ref(ops.GraphKeys.TRAIN_OP)
        if train_op not in train_ops:
            train_ops.append(train_op)

        saver = tf.train.Saver()

    tf.contrib.slim.learning.train(
        train_op=train_op,
        logdir=FLAGS.train_dir,
        graph=g,
        global_step=model.global_step,
        number_of_steps=training_config.number_of_steps,
        save_summaries_secs=training_config.save_summaries_secs,
        saver=saver,
        save_interval_secs=training_config.save_model_secs)
예제 #10
0
def main(unused_argv):
    if not FLAGS.input_file_pattern:
        raise ValueError("--input_file_pattern is required.")
    if not FLAGS.run_dir:
        raise ValueError("--run_dir is required.")
    if not FLAGS.decoder:
        raise ValueError("--decoder is required.")

    if not FLAGS.train_dir:
        train_dir = os.path.join(FLAGS.run_dir,
                                 'run_{t}'.format(t=time.time()))
        tf.logging.info(
            "No specified --train_dir. Creating {d}.".format(d=train_dir))
        os.makedirs(train_dir)

        write_config(train_dir=train_dir, flags=FLAGS)

    else:
        tf.logging.info("Specified --train_dir {d}; Not autocreating.".format(
            d=FLAGS.train_dir))
        train_dir = FLAGS.train_dir

    decoder_config = experiments.get_decoder_config(flags=FLAGS)
    model_config = configuration.model_config(
        input_file_pattern=FLAGS.input_file_pattern,
        vocab_size=FLAGS.vocab_size,
        batch_size=FLAGS.batch_size,
        word_embedding_dim=FLAGS.word_dim,
        pretrained_word_emb_file=FLAGS.pretrained_word_emb_file,
        word_emb_trainable=FLAGS.word_emb_trainable,
        encoder_dim=FLAGS.encoder_dim,
        skipgram_encoder=FLAGS.skipgram_encoder,
        sequence_decoder_pre=decoder_config.sequence_decoder_pre,
        sequence_decoder_cur=decoder_config.sequence_decoder_cur,
        sequence_decoder_post=decoder_config.sequence_decoder_post,
        skipgram_decoder_pre=decoder_config.skipgram_decoder_pre,
        skipgram_decoder_cur=decoder_config.skipgram_decoder_cur,
        skipgram_decoder_post=decoder_config.skipgram_decoder_post,
        share_weights_logits=FLAGS.share_weights_logits,
        normalise_decoder_losses=FLAGS.normalise_decoder_losses,
        skipgram_prefactor=FLAGS.skipgram_prefactor,
        sequence_prefactor=FLAGS.sequence_prefactor)
    training_config = configuration.training_config(
        number_of_steps=FLAGS.number_of_steps)

    tf.logging.info("Building training graph.")
    g = tf.Graph()
    with g.as_default():
        tf.set_random_seed(1234)
        model = skip_thoughts_model.SkipThoughtsModel(model_config,
                                                      mode="train")
        model.build()

        learning_rate = _setup_learning_rate(training_config,
                                             model.global_step)
        optimizer = tf.train.AdamOptimizer(learning_rate)

        train_tensor = tf.contrib.slim.learning.create_train_op(
            total_loss=model.total_loss,
            optimizer=optimizer,
            global_step=model.global_step,
            clip_gradient_norm=training_config.clip_gradient_norm,
            summarize_gradients=True,
            check_numerics=True)

        saver = tf.train.Saver()

    gpu_options = tf.GPUOptions(
        per_process_gpu_memory_fraction=FLAGS.gpu_fraction)

    tf.contrib.slim.learning.train(
        train_op=train_tensor,
        logdir=train_dir,
        graph=g,
        global_step=model.global_step,
        number_of_steps=training_config.number_of_steps,
        session_config=tf.ConfigProto(gpu_options=gpu_options),
        save_summaries_secs=training_config.save_summaries_secs,
        saver=saver,
        save_interval_secs=training_config.save_model_secs)