コード例 #1
0
    def test_save_load(self):
        # Data
        x_batch = np.random.randint(0, VOCABULARY_SIZE,
                                    [BATCH_SIZE, SEQUENCE_LENGTH])
        y_batch = np.eye(2)[np.ones(BATCH_SIZE, dtype=np.int32)]

        _, checkpoint_path = tempfile.mkstemp()

        with self.graph.as_default(), self.sess.as_default():
            rnn = self._build_classifier()
            self.sess.run(tf.initialize_all_variables())
            saver = tf.train.Saver()
            feed_dict = {rnn.input_x: x_batch, rnn.input_y: y_batch}
            loss1 = self.sess.run(rnn.total_loss, feed_dict)
            saver.save(self.sess, checkpoint_path)

        # Save model hyperparameters
        f, path = tempfile.mkstemp()
        rnn.save_to_file(path)

        with tf.Graph().as_default(), tf.Session() as sess:
            rnn2 = RNNClassifier.load_from_file(path)
            x = tf.placeholder(tf.int32, [BATCH_SIZE, SEQUENCE_LENGTH])
            y = tf.placeholder(tf.float32, [BATCH_SIZE, 2])
            rnn2.build_graph(x, y)
            saver = tf.train.Saver()
            saver.restore(sess, checkpoint_path)
            feed_dict = {rnn2.input_x: x_batch, rnn2.input_y: y_batch}
            loss2 = sess.run(rnn2.total_loss, feed_dict)

        self.assertEqual(loss1, loss2)
コード例 #2
0
    def test_save_load(self):
        # Data
        x_batch = np.random.randint(0, VOCABULARY_SIZE, [BATCH_SIZE, SEQUENCE_LENGTH])
        y_batch = np.eye(2)[np.ones(BATCH_SIZE, dtype=np.int32)]

        _, checkpoint_path = tempfile.mkstemp()

        with self.graph.as_default(), self.sess.as_default():
            rnn = self._build_classifier()
            self.sess.run(tf.initialize_all_variables())
            saver = tf.train.Saver()
            feed_dict = {rnn.input_x: x_batch, rnn.input_y: y_batch}
            loss1 = self.sess.run(rnn.total_loss, feed_dict)
            saver.save(self.sess, checkpoint_path)

        # Save model hyperparameters
        f, path = tempfile.mkstemp()
        rnn.save_to_file(path)

        with tf.Graph().as_default(), tf.Session() as sess:
            rnn2 = RNNClassifier.load_from_file(path)
            x = tf.placeholder(tf.int32, [BATCH_SIZE, SEQUENCE_LENGTH])
            y = tf.placeholder(tf.float32, [BATCH_SIZE, 2])
            rnn2.build_graph(x, y)
            saver = tf.train.Saver()
            saver.restore(sess, checkpoint_path)
            feed_dict = {rnn2.input_x: x_batch, rnn2.input_y: y_batch}
            loss2 = sess.run(rnn2.total_loss, feed_dict)

        self.assertEqual(loss1, loss2)
コード例 #3
0
 def _build_classifier(self):
     with self.graph.as_default(), self.sess.as_default():
         tf.set_random_seed(42)
         x = tf.placeholder(tf.int32, [BATCH_SIZE, SEQUENCE_LENGTH])
         y = tf.placeholder(tf.float32, [BATCH_SIZE, 2])
         rnn = RNNClassifier(sequence_length=SEQUENCE_LENGTH,
                             vocabulary_size=VOCABULARY_SIZE,
                             num_classes=2,
                             batch_size=BATCH_SIZE,
                             backprop_truncate_after=SEQUENCE_LENGTH,
                             embedding_dim=128,
                             hidden_dim=128,
                             affine_dim=128,
                             cell_class="LSTM",
                             num_layers=2,
                             dropout_keep_prob_embedding=1.0,
                             dropout_keep_prob_affine=1.0,
                             dropout_keep_prob_cell_input=1.0,
                             dropout_keep_prob_cell_output=1.0)
         rnn.build_graph(x, y)
         return rnn
コード例 #4
0
 def _build_classifier(self):
     with self.graph.as_default(), self.sess.as_default():
         tf.set_random_seed(42)
         x = tf.placeholder(tf.int32, [BATCH_SIZE, SEQUENCE_LENGTH])
         y = tf.placeholder(tf.float32, [BATCH_SIZE, 2])
         rnn = RNNClassifier(
             sequence_length=SEQUENCE_LENGTH,
             vocabulary_size=VOCABULARY_SIZE,
             num_classes=2,
             batch_size=BATCH_SIZE,
             backprop_truncate_after=SEQUENCE_LENGTH,
             embedding_dim=128,
             hidden_dim=128,
             affine_dim=128,
             cell_class="LSTM",
             num_layers=2,
             dropout_keep_prob_embedding=1.0,
             dropout_keep_prob_affine=1.0,
             dropout_keep_prob_cell_input=1.0,
             dropout_keep_prob_cell_output=1.0,
         )
         rnn.build_graph(x, y)
         return rnn
コード例 #5
0
from tfmodels.data.mr import MRData
from tfmodels.data.imdb import IMDBData
from tfmodels.data.ymrjp import YMRJPData
from datetime import datetime
from tfmodels.models.rnn.rnn_classifier import RNNClassifier, RNNClassifierTrainer, RNNClassifierEvaluator

pp = pprint.PrettyPrinter(indent=2)

# Pick training data
tf.flags.DEFINE_boolean("data_mr", False, "Dataset: MR")
tf.flags.DEFINE_boolean("data_sst", False, "Dataset: SST")
tf.flags.DEFINE_boolean("data_ymrjp", False,
                        "Dataset: Yahoo Movie Reviews (Japanese)")

# Classifier parameters
RNNClassifier.add_flags()

# Training parameters
tf.flags.DEFINE_integer("random_state", 42,
                        "Random state initialization for reproducibility")
tf.flags.DEFINE_integer("max_sequence_length", 512,
                        "Examples will be padded/truncated to this length")
tf.flags.DEFINE_integer("num_epochs", 20, "Number of training epochs")
tf.flags.DEFINE_integer(
    "evaluate_every", 25,
    "Evaluate model on dev set after this number of steps")
tf.flags.DEFINE_integer(
    "checkpoint_every", 100,
    "Evaluate model on dev set after this number of steps")

# Session Parameters
コード例 #6
0
import tfmodels.data.utils
from tfmodels.data.mr import MRData
from tfmodels.data.imdb import IMDBData
from tfmodels.data.ymrjp import YMRJPData
from datetime import datetime
from tfmodels.models.rnn.rnn_classifier import RNNClassifier, RNNClassifierTrainer, RNNClassifierEvaluator

pp = pprint.PrettyPrinter(indent=2)

# Pick training data
tf.flags.DEFINE_boolean("data_mr", False, "Dataset: MR")
tf.flags.DEFINE_boolean("data_sst", False, "Dataset: SST")
tf.flags.DEFINE_boolean("data_ymrjp", False, "Dataset: Yahoo Movie Reviews (Japanese)")

# Classifier parameters
RNNClassifier.add_flags()

# Training parameters
tf.flags.DEFINE_integer("random_state", 42, "Random state initialization for reproducibility")
tf.flags.DEFINE_integer("max_sequence_length", 512, "Examples will be padded/truncated to this length")
tf.flags.DEFINE_integer("num_epochs", 20, "Number of training epochs")
tf.flags.DEFINE_integer("evaluate_every", 25, "Evaluate model on dev set after this number of steps")
tf.flags.DEFINE_integer("checkpoint_every", 100, "Evaluate model on dev set after this number of steps")

# Session Parameters
tf.flags.DEFINE_boolean("allow_soft_placement", False, "Allow soft device placement (e.g. no GPU)")
tf.flags.DEFINE_boolean("log_device_placement", False, "Log placement of ops on devices")

# Print parameters
FLAGS = tf.flags.FLAGS
FLAGS.batch_size