예제 #1
0
    def test_toy_model(self):
        if self.run_epoch is None or self.score_dataset is None:
            self.skipTest("RunEpochTester: run_epoch_fn and score_dataset_fn "
                          "must be provided.")

        self.assertIsNotNone(self.run_epoch)
        self.assertIsNotNone(self.score_dataset)

        with tf.Session(graph=self.lm.graph) as sess:
            tf.set_random_seed(42)
            sess.run(tf.global_variables_initializer())
            bi = utils.rnnlm_batch_generator(self.train_ids, 5, 10)
            self.run_epoch(self.lm,
                           sess,
                           bi,
                           learning_rate=0.01,
                           train=True,
                           verbose=True,
                           tick_s=1.0)
            train_loss = self.score_dataset(self.lm,
                                            sess,
                                            self.train_ids,
                                            name="Train set")
            test_loss = self.score_dataset(self.lm,
                                           sess,
                                           self.test_ids,
                                           name="Test set")
        # This is a *really* simple dataset, so you should have no trouble
        # getting almost perfect scores.
        self.assertFalse(train_loss is None)
        self.assertFalse(test_loss is None)
        self.assertLessEqual(train_loss, 0.1)
        self.assertLessEqual(test_loss, 0.2)
예제 #2
0
def score_dataset(lm, session, ids, name="Data"):
    # For scoring, we can use larger batches to speed things up.
    bi = utils.rnnlm_batch_generator(ids, batch_size=100, max_time=100)
    cost = run_epoch(lm, session, bi, 
                     learning_rate=0.0, train=False, 
                     verbose=False, tick_s=3600)
    print("{:s}: avg. loss: {:.03f}  (perplexity: {:.02f})".format(name, cost, np.exp(cost)))
    return cost
예제 #3
0
    def test_toy_model(self):
        if self.run_epoch is None or self.score_dataset is None:
            self.skipTest("RunEpochTester: run_epoch_fn and score_dataset_fn "
                          "must be provided.")

        self.assertIsNotNone(self.run_epoch)
        self.assertIsNotNone(self.score_dataset)

        with tf.Session(graph=self.lm.graph) as sess:
            tf.set_random_seed(42)
            sess.run(tf.global_variables_initializer())
            bi = utils.rnnlm_batch_generator(self.train_ids, 5, 10)
            self.run_epoch(self.lm, sess, bi, learning_rate=0.3,
                           train=True, verbose=True, tick_s=1.0)
            train_loss = self.score_dataset(self.lm, sess, self.train_ids,
                                            name="Train set")
            test_loss = self.score_dataset(self.lm, sess, self.test_ids,
                                           name="Test set")
        # This is a *really* simple dataset, so you should have no trouble
        # getting almost perfect scores.
        self.assertFalse(train_loss is None)
        self.assertFalse(test_loss is None)
        self.assertLessEqual(train_loss, 0.1)
        self.assertLessEqual(test_loss, 0.2)
예제 #4
0
def run_training(train_ids,
                 test_ids,
                 max_time=100,
                 batch_size=256,
                 learning_rate=0.002,
                 num_epochs=20,
                 model_params,
                 tf_savedir="/tmp/artificial_hotel_reviews/a4_model"):
    #V = len(words_to_ids.keys())
    # Training parameters
    ## add parameter sets for each attack/defense configuration
    #max_time = 25
    #batch_size = 100
    #learning_rate = 0.01
    #num_epochs = 10

    # Model parameters
    #model_params = dict(V=vocab.size,
    #H=200,
    #softmax_ns=200,
    #num_layers=2)
    #model_params = dict(V=len(words_to_ids.keys()),
    #H=1024,
    #softmax_ns=len(words_to_ids.keys()),
    #num_layers=2)
    #model_params = dict(V=V, H=H, softmax_ns=softmax_ns, num_layers=num_layers)

    #TF_SAVEDIR = "/tmp/artificial_hotel_reviews/a4_model"
    TF_SAVEDIR = tf_savedir
    checkpoint_filename = os.path.join(TF_SAVEDIR, "rnnlm")
    trained_filename = os.path.join(TF_SAVEDIR, "rnnlm_trained")

    # Will print status every this many seconds
    #print_interval = 5
    print_interval = 30

    lm = rnnlm.RNNLM(**model_params)
    lm.BuildCoreGraph()
    lm.BuildTrainGraph()

    # Explicitly add global initializer and variable saver to LM graph
    with lm.graph.as_default():
        initializer = tf.global_variables_initializer()
        saver = tf.train.Saver()

    # Clear old log directory
    shutil.rmtree(TF_SAVEDIR, ignore_errors=True)
    if not os.path.isdir(TF_SAVEDIR):
        os.makedirs(TF_SAVEDIR)

    with tf.Session(graph=lm.graph) as session:
        # Seed RNG for repeatability
        tf.set_random_seed(42)

        session.run(initializer)

        #check trainable variables
        #variables_names = [v.name for v in tf.trainable_variables()]
        #values = session.run(variables_names)
        #for k, v in zip(variables_names, values):
        #print("Variable: ", k)
        #print("Shape: ", v.shape)
        #print(v)

        for epoch in range(1, num_epochs + 1):
            t0_epoch = time.time()
            bi = utils.rnnlm_batch_generator(train_ids, batch_size, max_time)
            print("[epoch {:d}] Starting epoch {:d}".format(epoch, epoch))
            # Run a training epoch.
            run_epoch(lm,
                      session,
                      batch_iterator=bi,
                      train=True,
                      verbose=True,
                      tick_s=10,
                      learning_rate=learning_rate)

            print("[epoch {:d}] Completed in {:s}".format(
                epoch, utils.pretty_timedelta(since=t0_epoch)))

            # Save a checkpoint
            saver.save(session, checkpoint_filename, global_step=epoch)

            ##
            # score_dataset will run a forward pass over the entire dataset
            # and report perplexity scores. This can be slow (around 1/2 to
            # 1/4 as long as a full epoch), so you may want to comment it out
            # to speed up training on a slow machine. Be sure to run it at the
            # end to evaluate your score.
            #print("[epoch {:d}]".format(epoch), end=" ")
            #score_dataset(lm, session, train_ids, name="Train set")
            print("[epoch {:d}]".format(epoch), end=" ")
            score_dataset(lm, session, test_ids, name="Test set")
            print("")

        # Save final model
        saver.save(session, trained_filename)
        return trained_filename