示例#1
0
def main(unused_argv):
    if not FLAGS.input_file_pattern:
        raise ValueError("--input_file_pattern is required.")
    if not FLAGS.train_dir:
        raise ValueError("--train_dir is required.")

    model_config = configuration.model_config(
        input_file_pattern=FLAGS.input_file_pattern)
    training_config = configuration.training_config()

    tf.logging.info("Building training graph.")
    g = tf.Graph()
    with g.as_default():
        model = skip_thoughts_model.SkipThoughtsModel(model_config,
                                                      mode="train")
        model.build()

        learning_rate = _setup_learning_rate(training_config,
                                             model.global_step)
        optimizer = tf.train.AdamOptimizer(learning_rate)

        train_tensor = tf.contrib.slim.learning.create_train_op(
            total_loss=model.total_loss,
            optimizer=optimizer,
            global_step=model.global_step,
            clip_gradient_norm=training_config.clip_gradient_norm)

        saver = tf.train.Saver()

    tf.contrib.slim.learning.train(
        train_op=train_tensor,
        logdir=FLAGS.train_dir,
        graph=g,
        global_step=model.global_step,
        number_of_steps=training_config.number_of_steps,
        save_summaries_secs=training_config.save_summaries_secs,
        saver=saver,
        save_interval_secs=training_config.save_model_secs)
示例#2
0
文件: train.py 项目: ALISCIFP/models
def main(unused_argv):
  if not FLAGS.input_file_pattern:
    raise ValueError("--input_file_pattern is required.")
  if not FLAGS.train_dir:
    raise ValueError("--train_dir is required.")

  model_config = configuration.model_config(
      input_file_pattern=FLAGS.input_file_pattern)
  training_config = configuration.training_config()

  tf.logging.info("Building training graph.")
  g = tf.Graph()
  with g.as_default():
    model = skip_thoughts_model.SkipThoughtsModel(model_config, mode="train")
    model.build()

    learning_rate = _setup_learning_rate(training_config, model.global_step)
    optimizer = tf.train.AdamOptimizer(learning_rate)

    train_tensor = tf.contrib.slim.learning.create_train_op(
        total_loss=model.total_loss,
        optimizer=optimizer,
        global_step=model.global_step,
        clip_gradient_norm=training_config.clip_gradient_norm)

    saver = tf.train.Saver()

  tf.contrib.slim.learning.train(
      train_op=train_tensor,
      logdir=FLAGS.train_dir,
      graph=g,
      global_step=model.global_step,
      number_of_steps=training_config.number_of_steps,
      save_summaries_secs=training_config.save_summaries_secs,
      saver=saver,
      save_interval_secs=training_config.save_model_secs)
示例#3
0
def main(unused_argv):
    if not FLAGS.train_dir:
        raise ValueError("--train_dir is required.")

    #read_vocab(FLAGS.vocab)
    model_config = configuration.model_config()
    training_config = configuration.training_config()
    ################ define discriminator model ################
    disc_model = Discriminator(sequence_length=MAXLEN,
                               num_classes=1,
                               vocab_size=model_config.vocab_size,
                               embedding_size=model_config.word_embedding_dim,
                               filter_sizes=[1, 2, 3, 4, 5, 7, 10],
                               num_filters=[100, 100, 100, 100, 100, 100, 100])

    ################# define training model #################
    model = skip_thoughts_model.SkipThoughtsModel(model_config, mode="train")
    model.build()
    learning_rate = _setup_learning_rate(training_config, model.global_step)
    optimizer = tf.train.AdamOptimizer(learning_rate)
    variables_to_train = [v for v in tf.trainable_variables()]
    variables_to_restore = [
        v for v in tf.all_variables() if ('discriminator' not in v.name)
    ]

    print(len(variables_to_train))
    train_tensor = tf.contrib.slim.learning.create_train_op(
        total_loss=model.total_loss,
        optimizer=optimizer,
        clip_gradient_norm=training_config.clip_gradient_norm,
        variables_to_train=variables_to_train)

    ######################define target lstm ####################
    #target_lstm = skip_thoughts_model.TargetLSTM(config=model_config)
    #synthesized = True
    target_lstm = None
    synthesized = False
    ################ define testing model ################
    #model_config_test = configuration.model_config()
    #model_test = skip_thoughts_model.SkipThoughtsModel(model_config_test, mode="eval")
    #model_test.build(is_testing=True)

    ################ define savers ################
    reloader = tf.train.Saver(var_list=variables_to_restore)
    reloader_all = tf.train.Saver()
    saver = tf.train.Saver(max_to_keep=1000)
    gpu_config = tf.ConfigProto(gpu_options=tf.GPUOptions(
        per_process_gpu_memory_fraction=1.0, allow_growth=True),
                                allow_soft_placement=True,
                                log_device_placement=False)

    init_op = tf.global_variables_initializer()
    sess = tf.Session(config=gpu_config)
    run_metadata = tf.RunMetadata()
    sess.run(init_op,
             options=tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE,
                                   output_partition_graphs=True),
             run_metadata=run_metadata)
    with open("/tmp/meta.txt", 'w') as f:
        f.write(str(run_metadata))

    if FLAGS.reload_model:
        reloader.restore(sess, FLAGS.reload_model)
    if FLAGS.reload_model_all:
        reloader_all.restore(sess, FLAGS.reload_model_all)

    ################ load training data ##############
    train_data_loader = DataLoader(128)
    train_data_loader.load(FLAGS.train_corpus_en, FLAGS.train_corpus_fr)

    total_loss_sup_list = []
    total_loss_rl_list = []
    bleu_list = []
    fake_list, real_list, neglikely_list = [], [], []

    outf = open(os.path.join(FLAGS.train_dir, 'log.txt'), 'a')
    logf = open(os.path.join(FLAGS.train_dir, 'debug_log.txt'), 'w')

    ############### run training and testing #############
    for i in xrange(1000000):
        model_prefix = ""
        if i < FLAGS.pretrain_G_steps:
            model_prefix = "preG_"
            np_global_step, total_loss_sup, total_loss_rl, avg_bleu, avg_fake, avg_real, avg_neglikely = my_train_step(
                sess,
                train_tensor,
                model,
                train_data_loader,
                logf,
                train_sup=True,
                train_rl=False,
                disc_model=disc_model,
                adjustD=False,
                adjustG=True,
                given_num=MAXLEN)

        elif i < FLAGS.pretrain_G_steps + FLAGS.pretrain_D_steps:
            model_prefix = "preD_"
            np_global_step, total_loss_sup, total_loss_rl, avg_bleu, avg_fake, avg_real, avg_neglikely = my_train_step(
                sess,
                train_tensor,
                model,
                train_data_loader,
                logf,
                train_sup=False,
                train_rl=True,
                disc_model=disc_model,
                adjustD=True,
                adjustG=False,
                given_num=0)

        elif FLAGS.mixer_period and FLAGS.mixer_step and FLAGS.mixer_period > 0:
            gn = default_given_num - (
                i - FLAGS.pretrain_G_steps - FLAGS.pretrain_D_steps
            ) // FLAGS.mixer_period * FLAGS.mixer_step
            if gn < 0: gn = 0
            model_prefix = "mixGN" + str(gn) + "_"
            if i % 10 == 0:
                adjustD = FLAGS.adjustD
            else:
                adjustD = False
            if i % 200 == 0:
                print("gn=", gn)
            np_global_step, total_loss_sup, total_loss_rl, avg_bleu, avg_fake, avg_real, avg_neglikely  = my_train_step( \
                  sess, train_tensor, model, train_data_loader, logf, train_sup=False, train_rl=True, \
                  disc_model=disc_model, adjustD=adjustD, adjustG=FLAGS.adjustG, given_num=gn)

        else:
            model_prefix = ""
            np_global_step, total_loss_sup, total_loss_rl, avg_bleu, avg_fake, avg_real, avg_neglikely = my_train_step(
                sess,
                train_tensor,
                model,
                train_data_loader,
                logf,
                train_sup=False,
                train_rl=True,
                disc_model=disc_model,
                adjustD=FLAGS.adjustD,
                adjustG=FLAGS.adjustG)

        total_loss_sup_list.append(total_loss_sup)
        total_loss_rl_list.append(total_loss_rl)
        fake_list.append(avg_fake)
        real_list.append(avg_real)
        bleu_list.append(avg_bleu)
        neglikely_list.append(avg_neglikely)

        if np_global_step % 2000 == 0:
            saver.save(
                sess,
                os.path.join(FLAGS.train_dir,
                             model_prefix + "model-" + str(np_global_step)))
        if np_global_step % 20 == 0:
            # my_test_step(sess, model_test, FLAGS.test_result+'-'+str(np_global_step))
            print(np_global_step, np.mean(total_loss_sup_list),
                  np.mean(total_loss_rl_list))
            print(np.mean(bleu_list), np.mean(fake_list), np.mean(real_list))
            print(np.mean(neglikely_list))
            outf.write(
                str(np_global_step) + " " + str(np.mean(total_loss_sup_list)) +
                " " + str(np.mean(total_loss_rl_list)) + " " +
                str(np.mean(bleu_list)) + " " + str(np.mean(fake_list)) + " " +
                str(np.mean(real_list)) + " " + str(np.mean(neglikely_list)) +
                "\n")
            total_loss_sup_list, total_loss_rl_list, bleu_list, fake_list, real_list, neglikely_list = [],[],[],[],[],[]
示例#4
0
def main(unused_argv):
  if not FLAGS.input_file_pattern:
    raise ValueError("--input_file_pattern is required.")
  if not FLAGS.train_dir:
    raise ValueError("--train_dir is required.")

  model_config = configuration.model_config(
      input_file_pattern=FLAGS.input_file_pattern, bidirectional_encoder=True)
  training_config = configuration.training_config()

  tf.logging.info("Building training graph.")
  g = tf.Graph()
  with g.as_default():
    model = skip_thoughts_model.SkipThoughtsModel(model_config, mode="train")
    model.build()
     
    encoder_variables = [v for v in tf.global_variables()
                    if v.name.startswith("encoder") and "Adam" not in v.name]
    embedding_variables = [v for v in tf.global_variables()
                           if v.name.startswith("word_embedding") and "Adam" not in v.name]
    print([v.name for v in (encoder_variables+embedding_variables)])

    learning_rate = _setup_learning_rate(training_config, model.global_step)
    optimizer = tf.train.AdamOptimizer(learning_rate)

    
    encoder_mult = 0.1
    embedding_mult = 0.01
    multiply = dict([(v, encoder_mult) for v in encoder_variables] + [(v, embedding_mult) for v in embedding_variables])

    train_tensor = tf.contrib.slim.learning.create_train_op(
        total_loss=model.total_loss,
        optimizer=optimizer,
        gradient_multipliers=multiply,
        global_step=model.global_step,
        clip_gradient_norm=training_config.clip_gradient_norm)

    saver = tf.train.Saver()
    model_path = tf.train.latest_checkpoint(FLAGS.train_dir)
    
    pretrain_saver = tf.train.Saver(encoder_variables+embedding_variables)

  print(model_path)
  if model_path:
    def restore_fn(sess):
       tf.logging.info(
      "Restoring SA&T variables from checkpoint file")
       saver.restore(sess, model_path)
  else:
    def restore_fn(sess):
      tf.logging.info(
        "Restoring SA&T variables from pretrained model")
      #saver.restore(sess, "/home/ubuntu/code/A_skip_thoughts_2/skip_thoughts/model/backup/run1/model.ckpt-2111")
      pretrain_saver.restore(sess, "/home/ubuntu/code/pretrained/bi/model.ckpt-500008")
  
  tf.contrib.slim.learning.train(
      train_op=train_tensor,
      logdir=FLAGS.train_dir,
      graph=g,
      global_step=model.global_step,
      number_of_steps=training_config.number_of_steps,
      save_summaries_secs=training_config.save_summaries_secs,
      saver=saver,
      save_interval_secs=training_config.save_model_secs,
      init_fn = restore_fn)
示例#5
0
def main(unused_argv):
    if not FLAGS.input_file_pattern:
        raise ValueError("--input_file_pattern is required.")
    if not FLAGS.train_dir:
        raise ValueError("--train_dir is required.")

    model_config = configuration.model_config(
        input_file_pattern=FLAGS.input_file_pattern)
    training_config = configuration.training_config()

    tf.logging.info("Building training graph.")
    g = tf.Graph()
    with g.as_default():
        grads_tower = []
        for dev_ind in range(4):
            with tf.device('/gpu:%d' % dev_ind):
                model = skip_thoughts_model.SkipThoughtsModel(model_config,
                                                              mode="train")
                model.build()

                learning_rate = _setup_learning_rate(training_config,
                                                     model.global_step)
                optimizer = tf.train.AdamOptimizer(learning_rate)

                total_loss = model.total_loss
                # Update ops use GraphKeys.UPDATE_OPS collection if update_ops is None.
                update_ops = set(ops.get_collection(ops.GraphKeys.UPDATE_OPS))

                # Make sure update_ops are computed before total_loss.
                if update_ops:
                    with ops.control_dependencies(update_ops):
                        barrier = control_flow_ops.no_op(name='update_barrier')
                    total_loss = control_flow_ops.with_dependencies([barrier],
                                                                    total_loss)

                variables_to_train = tf_variables.trainable_variables()

                assert variables_to_train

                gate_gradients = tf_optimizer.Optimizer.GATE_OP
                # Create the gradients. Note that apply_gradients adds the gradient
                # computation to the current graph.
                grads = optimizer.compute_gradients(
                    total_loss,
                    variables_to_train,
                    gate_gradients=gate_gradients,
                    aggregation_method=None,
                    colocate_gradients_with_ops=False)

                grads = tf.contrib.slim.learning.clip_gradient_norms(
                    grads, training_config.clip_gradient_norm)

                grads_tower.append(grads)

        avg_grads = average_gradients.average_gradients(grads_tower)
        # Create gradient updates.
        grad_updates = optimizer.apply_gradients(avg_grads,
                                                 global_step=model.global_step)

        with ops.name_scope('train_op'):
            # Make sure total_loss is valid.
            total_loss = array_ops.check_numerics(total_loss,
                                                  'LossTensor is inf or nan')

            # Ensure the train_tensor computes grad_updates.
            train_op = control_flow_ops.with_dependencies([grad_updates],
                                                          total_loss)

        # Add the operation used for training to the 'train_op' collection
        train_ops = ops.get_collection_ref(ops.GraphKeys.TRAIN_OP)
        if train_op not in train_ops:
            train_ops.append(train_op)

        saver = tf.train.Saver()

    tf.contrib.slim.learning.train(
        train_op=train_op,
        logdir=FLAGS.train_dir,
        graph=g,
        global_step=model.global_step,
        number_of_steps=training_config.number_of_steps,
        save_summaries_secs=training_config.save_summaries_secs,
        saver=saver,
        save_interval_secs=training_config.save_model_secs)
示例#6
0
def main(unused_argv):
    if not FLAGS.input_file_pattern:
        raise ValueError("--input_file_pattern is required.")
    if not FLAGS.run_dir:
        raise ValueError("--run_dir is required.")
    if not FLAGS.decoder:
        raise ValueError("--decoder is required.")

    if not FLAGS.train_dir:
        train_dir = os.path.join(FLAGS.run_dir,
                                 'run_{t}'.format(t=time.time()))
        tf.logging.info(
            "No specified --train_dir. Creating {d}.".format(d=train_dir))
        os.makedirs(train_dir)

        write_config(train_dir=train_dir, flags=FLAGS)

    else:
        tf.logging.info("Specified --train_dir {d}; Not autocreating.".format(
            d=FLAGS.train_dir))
        train_dir = FLAGS.train_dir

    decoder_config = experiments.get_decoder_config(flags=FLAGS)
    model_config = configuration.model_config(
        input_file_pattern=FLAGS.input_file_pattern,
        vocab_size=FLAGS.vocab_size,
        batch_size=FLAGS.batch_size,
        word_embedding_dim=FLAGS.word_dim,
        pretrained_word_emb_file=FLAGS.pretrained_word_emb_file,
        word_emb_trainable=FLAGS.word_emb_trainable,
        encoder_dim=FLAGS.encoder_dim,
        skipgram_encoder=FLAGS.skipgram_encoder,
        sequence_decoder_pre=decoder_config.sequence_decoder_pre,
        sequence_decoder_cur=decoder_config.sequence_decoder_cur,
        sequence_decoder_post=decoder_config.sequence_decoder_post,
        skipgram_decoder_pre=decoder_config.skipgram_decoder_pre,
        skipgram_decoder_cur=decoder_config.skipgram_decoder_cur,
        skipgram_decoder_post=decoder_config.skipgram_decoder_post,
        share_weights_logits=FLAGS.share_weights_logits,
        normalise_decoder_losses=FLAGS.normalise_decoder_losses,
        skipgram_prefactor=FLAGS.skipgram_prefactor,
        sequence_prefactor=FLAGS.sequence_prefactor)
    training_config = configuration.training_config(
        number_of_steps=FLAGS.number_of_steps)

    tf.logging.info("Building training graph.")
    g = tf.Graph()
    with g.as_default():
        tf.set_random_seed(1234)
        model = skip_thoughts_model.SkipThoughtsModel(model_config,
                                                      mode="train")
        model.build()

        learning_rate = _setup_learning_rate(training_config,
                                             model.global_step)
        optimizer = tf.train.AdamOptimizer(learning_rate)

        train_tensor = tf.contrib.slim.learning.create_train_op(
            total_loss=model.total_loss,
            optimizer=optimizer,
            global_step=model.global_step,
            clip_gradient_norm=training_config.clip_gradient_norm,
            summarize_gradients=True,
            check_numerics=True)

        saver = tf.train.Saver()

    gpu_options = tf.GPUOptions(
        per_process_gpu_memory_fraction=FLAGS.gpu_fraction)

    tf.contrib.slim.learning.train(
        train_op=train_tensor,
        logdir=train_dir,
        graph=g,
        global_step=model.global_step,
        number_of_steps=training_config.number_of_steps,
        session_config=tf.ConfigProto(gpu_options=gpu_options),
        save_summaries_secs=training_config.save_summaries_secs,
        saver=saver,
        save_interval_secs=training_config.save_model_secs)