コード例 #1
0
 def testPtbRawData(self):
     tmpdir = tf.test.get_temp_dir()
     for suffix in "train", "valid", "test":
         filename = os.path.join(tmpdir, "ptb.%s.txt" % suffix)
         with tf.gfile.GFile(filename, "w") as fh:
             fh.write(self._string_data)
     # Smoke test
     output = reader.ptb_raw_data(tmpdir)
     self.assertEqual(len(output), 4)
コード例 #2
0
def main():
    print("hola mundo")
    # import
    import tensorflow as tf
    with tf.device('/gpu:1'):
        current_path = getcwd() + "\Dataset"
        train_data, valid_data, test_data, vocabulary = reader.ptb_raw_data(
            current_path)
        print("el fin")
コード例 #3
0
def main(_):
    for keep in conf.keep_probs:
        conf.keep_prob = keep
        print("keep_prob:" + str(conf.keep_prob))
        if not FLAGS.data_path:
            raise ValueError("Must set --data_path to data directory")
        gpus = [
            x.name for x in device_lib.list_local_devices()
            if x.device_type == "GPU"
        ]
        if FLAGS.num_gpus > len(gpus):
            raise ValueError(
                "Your machine has only %d gpus "
                "which is less than the requested --num_gpus=%d." %
                (len(gpus), FLAGS.num_gpus))

        raw_data = reader.ptb_raw_data(FLAGS.data_path)
        train_data, valid_data, test_data, _, dict_emb = raw_data

        config = get_config()
        # FLAGS.save_path = conf.src_path + "/Res/w2v_1w_400d_att_prePro"
        eval_config = get_config()
        eval_config.batch_size = 1
        # eval_config.num_steps = 1

        with tf.Graph().as_default() as graph:

            # tensorboard图
            # train_input = PTBInput(config=config, data=train_data, name="TrainInput")
            # m = PTBModel(is_training=True, config=config, input_=train_input, dict_emb=dict_emb)
            # file_writer = tf.summary.FileWriter(FLAGS.save_path, graph=graph)
            # file_writer.close()

            initializer = tf.random_uniform_initializer(
                -config.init_scale, config.init_scale)

            with tf.name_scope("Train"):
                train_input = PTBInput(config=config,
                                       data=train_data,
                                       name="TrainInput")
                with tf.variable_scope("Model",
                                       reuse=None,
                                       initializer=initializer):
                    m = PTBModel(is_training=True,
                                 config=config,
                                 input_=train_input,
                                 dict_emb=dict_emb)
                    file_writer = tf.summary.FileWriter(FLAGS.save_path,
                                                        graph=graph)
                    file_writer.close()
                tf.summary.scalar("Training Loss", m.cost)
                tf.summary.scalar("Learning Rate", m.lr)
                tf.summary.scalar("Training allCount", m.allCount)
                tf.summary.scalar("Training rightCount0", m.rightCountTopK[0])
                tf.summary.scalar("Training rightCount1", m.rightCountTopK[1])
                tf.summary.scalar("Training rightCount2", m.rightCountTopK[2])

            with tf.name_scope("Valid"):
                valid_input = PTBInput(config=config,
                                       data=valid_data,
                                       name="ValidInput")
                with tf.variable_scope("Model",
                                       reuse=True,
                                       initializer=initializer):
                    mvalid = PTBModel(is_training=False,
                                      config=config,
                                      input_=valid_input,
                                      dict_emb=dict_emb)
                tf.summary.scalar("Validation Loss", mvalid.cost)
                tf.summary.scalar("Validation allCount", mvalid.allCount)
                tf.summary.scalar("Validation rightCount0",
                                  mvalid.rightCountTopK[0])
                tf.summary.scalar("Validation rightCount1",
                                  mvalid.rightCountTopK[1])
                tf.summary.scalar("Validation rightCount2",
                                  mvalid.rightCountTopK[2])

            with tf.name_scope("Test"):
                test_input = PTBInput(config=eval_config,
                                      data=test_data,
                                      name="TestInput")
                with tf.variable_scope("Model",
                                       reuse=True,
                                       initializer=initializer):
                    mtest = PTBModel(is_training=False,
                                     config=eval_config,
                                     input_=test_input,
                                     dict_emb=dict_emb)
                tf.summary.scalar("Test allCount", mtest.allCount)
                tf.summary.scalar("Test rightCount0", mtest.rightCountTopK[0])
                tf.summary.scalar("Test rightCount1", mtest.rightCountTopK[1])
                tf.summary.scalar("Test rightCount2", mtest.rightCountTopK[2])

            models = {"Train": m, "Valid": mvalid, "Test": mtest}
            for name, model in models.items():
                model.export_ops(name)
            metagraph = tf.train.export_meta_graph()
            if tf.__version__ < "1.1.0" and FLAGS.num_gpus > 1:
                raise ValueError(
                    "num_gpus > 1 is not supported for TensorFlow versions "
                    "below 1.1.0")
            soft_placement = False
            if FLAGS.num_gpus > 1:
                soft_placement = True
                util.auto_parallel(metagraph, m)

        # with tf.Graph().as_default():

        # tf.train.import_meta_graph(metagraph)
            for model in models.values():
                model.import_ops()
            sv = tf.train.Supervisor(logdir=FLAGS.save_path,
                                     save_summaries_secs=3)
            config_proto = tf.ConfigProto(
                allow_soft_placement=soft_placement)  #对session进行参数配置
            with sv.managed_session(
                    config=config_proto
            ) as session:  #自动去logdir中找checkpoint,如果没有的话自动初始化
                for i in range(config.max_max_epoch):
                    lr_decay = config.lr_decay**max(i + 1 - config.max_epoch,
                                                    0.0)
                    m.assign_lr(session, config.learning_rate * lr_decay)

                    print("Epoch: %d Learning rate: %.3f" %
                          (i + 1, session.run(m.lr)))
                    train_perplexity, train_rightCountTopK, train_allCount , train_acc \
                      = run_epoch(session, m, eval_op=m.train_op,verbose=True)
                    print(
                        "Epoch: %d Train Perplexity: %.3f train_allCount: %s train_acc: %s rightCountTopK : %s"
                        % (i + 1, train_perplexity, train_allCount, train_acc,
                           train_rightCountTopK))
                    valid_perplexity, val_rightCountTopK, valid_allCount, valid_acc = run_epoch(
                        session, mvalid)
                    print(
                        "Epoch: %d Valid Perplexity: %.3f valid_allCount: %s valid_acc: %s rightCountTopK : %s"
                        % (i + 1, valid_perplexity, valid_allCount, valid_acc,
                           val_rightCountTopK))

                test_perplexity, test_rightCountTopK, test_allCount, test_acc = run_epoch(
                    session, mtest)
                print(
                    "Test Perplexity: %.3f test_allCount: %.3f test_acc: %s rightCountTopK : %s"
                    % (test_perplexity, test_allCount, test_acc,
                       test_rightCountTopK))

                if FLAGS.save_path:
                    print("Saving model to %s." % FLAGS.save_path)
                    sv.saver.save(session,
                                  FLAGS.save_path,
                                  global_step=sv.global_step)
        print("Finished!")
        i = datetime.datetime.now()
        print("当前的日期和时间是 %s" % i)
コード例 #4
0
        state = vals["final_state"]

        costs += cost
        iters += model.input.num_steps

        if verbose and step % (model.input.epoch_size // 10) == 10:
            print("%.3f perplexity:%.3f speed:%.0f wps" %
                  (step * 1.0 / model.input.epoch_size, np.exp(
                      costs / iters), iters * model.input.batch_size /
                   (time.time() - start_time)))

    return np.exp(costs / iters)


#  读取解压后的数据
raw_data = reader.ptb_raw_data('simple-examples/data/')
train_data, valid_data, test_data, _ = raw_data

config = SmallConfig()
eval_config = SmallConfig()
eval_config.batch_size = 1
eval_config.num_steps = 1

#创建默认的Graph,创建模型
with tf.Graph().as_default():
    initializer = tf.random_uniform_initializer(-config.init_scale,
                                                config.init_scale)

    with tf.name_scope("Train"):
        train_input = PTBInput(config=config,
                               data=train_data,
コード例 #5
0
def main(_):
    save_path = FLAGS.save_path + '/' + FLAGS.rnn_mode + '_g' + str(
        FLAGS.g) + '_lr' + str(FLAGS.lr)
    if not FLAGS.data_path:
        raise ValueError("Must set --data_path to PTB data directory")
    gpus = [
        x.name for x in device_lib.list_local_devices()
        if x.device_type == "GPU"
    ]
    if FLAGS.num_gpus > len(gpus):
        raise ValueError("Your machine has only %d gpus "
                         "which is less than the requested --num_gpus=%d." %
                         (len(gpus), FLAGS.num_gpus))

    raw_data = reader.ptb_raw_data(FLAGS.data_path)
    train_data, valid_data, test_data, _ = raw_data

    config = get_config()
    eval_config = get_config()
    eval_config.batch_size = 1
    eval_config.num_steps = config.num_steps  # originally is 1

    with tf.Graph().as_default():
        initializer = tf.random_uniform_initializer(-config.init_scale,
                                                    config.init_scale)

        with tf.name_scope("Train"):
            train_input = PTBInput(config=config,
                                   data=train_data,
                                   name="TrainInput")
            with tf.variable_scope("Model",
                                   reuse=None,
                                   initializer=initializer):
                m = PTBModel(is_training=True,
                             config=config,
                             input_=train_input)
            tf.summary.scalar("Training Loss", m.cost)
            tf.summary.scalar("Learning Rate", m.lr)

        with tf.name_scope("Valid"):
            valid_input = PTBInput(config=config,
                                   data=valid_data,
                                   name="ValidInput")
            with tf.variable_scope("Model",
                                   reuse=True,
                                   initializer=initializer):
                mvalid = PTBModel(is_training=False,
                                  config=config,
                                  input_=valid_input)
            tf.summary.scalar("Validation Loss", mvalid.cost)

        with tf.name_scope("Test"):
            test_input = PTBInput(config=eval_config,
                                  data=test_data,
                                  name="TestInput")
            with tf.variable_scope("Model",
                                   reuse=True,
                                   initializer=initializer):
                mtest = PTBModel(is_training=False,
                                 config=eval_config,
                                 input_=test_input)
            tf.summary.scalar("Test Loss", mtest.cost)

        models = {"Train": m, "Valid": mvalid, "Test": mtest}
        for name, model in models.items():
            model.export_ops(name)
        metagraph = tf.train.export_meta_graph()
        if tf.__version__ < "1.1.0" and FLAGS.num_gpus > 1:
            raise ValueError(
                "num_gpus > 1 is not supported for TensorFlow versions "
                "below 1.1.0")
        soft_placement = False
        if FLAGS.num_gpus > 1:
            soft_placement = True
            util.auto_parallel(metagraph, m)

    with tf.Graph().as_default():
        tf.train.import_meta_graph(metagraph)
        for model in models.values():
            model.import_ops()
        sv = tf.train.Supervisor(logdir=save_path)
        config_proto = tf.ConfigProto(allow_soft_placement=soft_placement)
        with sv.managed_session(config=config_proto) as session:
            # session = tf_debug.LocalCLIDebugWrapperSession(session)
            for i in range(config.max_max_epoch):
                lr_decay = config.lr_decay**max(i + 1 - config.max_epoch, 0.0)
                m.assign_lr(session, config.learning_rate * lr_decay)

                print("Epoch: %d Learning rate: %.3f" %
                      (i + 1, session.run(m.lr)))
                train_perplexity = run_epoch(session,
                                             m,
                                             eval_op=m.train_op,
                                             verbose=True)
                print("Epoch: %d Train Perplexity: %.3f" %
                      (i + 1, train_perplexity))
                valid_perplexity = run_epoch(session, mvalid)
                print("Epoch: %d Valid Perplexity: %.3f" %
                      (i + 1, valid_perplexity))

            test_perplexity = run_epoch(session, mtest)
            print("Test Perplexity: %.3f" % test_perplexity)

            with open(save_path + "/log.txt", "w+") as myfile:
                myfile.write("\nlearning rate: " + str(FLAGS.lr) + '\n')
                myfile.write("final Train Perplexity: " +
                             str(train_perplexity) + '\n')
                myfile.write("final Valid Perplexity: " +
                             str(valid_perplexity) + '\n')
                myfile.write("final Test Perplexity: " + str(test_perplexity) +
                             '\n')

            if save_path:
                print("Saving model to %s." % save_path)
                sv.saver.save(session, save_path, global_step=sv.global_step)
コード例 #6
0
ファイル: ptb_word_lm.py プロジェクト: theofpa/ran
def main(init_scale, learning_rate, max_grad_norm, num_layers, num_steps,
         hidden_size, max_epoch, max_max_epoch, keep_prob, lr_decay,
         batch_size, vocab_size, use_tanh, data_path, _run):
    #  if not FLAGS.data_path:
    #    raise ValueError("Must set --data_path to PTB data directory")

    raw_data = reader.ptb_raw_data(data_path)
    train_data, valid_data, test_data, _ = raw_data

    config = my_config_object()
    eval_config = my_config_object()
    eval_config.batch_size = 1
    eval_config.num_steps = 1

    with tf.Graph().as_default():
        initializer = tf.random_uniform_initializer(-config.init_scale,
                                                    config.init_scale)

        with tf.name_scope("Train"):
            train_input = PTBInput(config=config,
                                   data=train_data,
                                   name="TrainInput")
            with tf.variable_scope("Model",
                                   reuse=None,
                                   initializer=initializer):
                m = PTBModel(is_training=True,
                             config=config,
                             input_=train_input)
            tf.summary.scalar("Training Loss", m.cost)
            tf.summary.scalar("Learning Rate", m.lr)

        with tf.name_scope("Valid"):
            valid_input = PTBInput(config=config,
                                   data=valid_data,
                                   name="ValidInput")
            with tf.variable_scope("Model",
                                   reuse=True,
                                   initializer=initializer):
                mvalid = PTBModel(is_training=False,
                                  config=config,
                                  input_=valid_input)
            tf.summary.scalar("Validation Loss", mvalid.cost)

        with tf.name_scope("Test"):
            test_input = PTBInput(config=eval_config,
                                  data=test_data,
                                  name="TestInput")
            with tf.variable_scope("Model",
                                   reuse=True,
                                   initializer=initializer):
                mtest = PTBModel(is_training=False,
                                 config=eval_config,
                                 input_=test_input)

        sv = tf.train.Supervisor(logdir=FLAGS.save_path)
        with sv.managed_session() as session:
            for i in range(config.max_max_epoch):
                lr_decay = config.lr_decay**max(i + 1 - config.max_epoch, 0.0)
                m.assign_lr(session, config.learning_rate * lr_decay)

                print("Epoch: %d Learning rate: %.3f" %
                      (i + 1, session.run(m.lr)))
                train_perplexity = run_epoch(session,
                                             m,
                                             eval_op=m.train_op,
                                             verbose=True)
                print("Epoch: %d Train Perplexity: %.3f" %
                      (i + 1, train_perplexity))
                _run.log_scalar("training.perplexity", train_perplexity, i + 1)
                valid_perplexity = run_epoch(session, mvalid)
                print("Epoch: %d Valid Perplexity: %.3f" %
                      (i + 1, valid_perplexity))
                _run.log_scalar("validation.perplexity", valid_perplexity,
                                i + 1)

            test_perplexity = run_epoch(session, mtest)
            print("Test Perplexity: %.3f" % test_perplexity)

            if FLAGS.save_path:
                print("Saving model to %s." % FLAGS.save_path)
                sv.saver.save(session,
                              FLAGS.save_path,
                              global_step=sv.global_step)

            return test_perplexity