コード例 #1
0
    def test_save_load(self):
        # Data
        x_batch = np.random.randint(0, VOCABULARY_SIZE, [BATCH_SIZE, SEQUENCE_LENGTH])
        y_batch = np.eye(2)[np.ones(BATCH_SIZE, dtype=np.int32)]

        _, checkpoint_path = tempfile.mkstemp()

        with self.graph.as_default(), self.sess.as_default():
            cnn = self._build_classifier()
            self.sess.run(tf.initialize_all_variables())
            saver = tf.train.Saver()
            feed_dict = {cnn.input_x: x_batch, cnn.input_y: y_batch}
            loss1 = self.sess.run(cnn.total_loss, feed_dict)
            saver.save(self.sess, checkpoint_path)

        # Save model hyperparameters
        f, path = tempfile.mkstemp()
        cnn.save_to_file(path)

        with tf.Graph().as_default(), tf.Session() as sess:
            cnn2 = CNNClassifier.load_from_file(path)
            x = tf.placeholder(tf.int32, [BATCH_SIZE, SEQUENCE_LENGTH])
            y = tf.placeholder(tf.float32, [BATCH_SIZE, 2])
            cnn2.build_graph(x, y)
            saver = tf.train.Saver()
            saver.restore(sess, checkpoint_path)
            feed_dict = {cnn2.input_x: x_batch, cnn2.input_y: y_batch}
            loss2 = sess.run(cnn2.total_loss, feed_dict)

        self.assertEqual(loss1, loss2)
コード例 #2
0
 def _build_classifier(self):
     with self.graph.as_default(), self.sess.as_default():
         tf.set_random_seed(42)
         x = tf.placeholder(tf.int32, [None, SEQUENCE_LENGTH])
         y = tf.placeholder(tf.float32, [None, 2])
         cnn = CNNClassifier(sequence_length=SEQUENCE_LENGTH,
                             vocabulary_size=VOCABULARY_SIZE,
                             num_classes=2,
                             embedding_dim=64,
                             filter_sizes="2,3,4",
                             num_filters=10,
                             use_highway=False,
                             dropout_keep_prob_embedding=1.0,
                             dropout_keep_prob_features=1.0)
         cnn.build_graph(x, y)
         return cnn
コード例 #3
0
    def test_save_load(self):
        # Data
        x_batch = np.random.randint(0, VOCABULARY_SIZE,
                                    [BATCH_SIZE, SEQUENCE_LENGTH])
        y_batch = np.eye(2)[np.ones(BATCH_SIZE, dtype=np.int32)]

        _, checkpoint_path = tempfile.mkstemp()

        with self.graph.as_default(), self.sess.as_default():
            cnn = self._build_classifier()
            self.sess.run(tf.initialize_all_variables())
            saver = tf.train.Saver()
            feed_dict = {cnn.input_x: x_batch, cnn.input_y: y_batch}
            loss1 = self.sess.run(cnn.total_loss, feed_dict)
            saver.save(self.sess, checkpoint_path)

        # Save model hyperparameters
        f, path = tempfile.mkstemp()
        cnn.save_to_file(path)

        with tf.Graph().as_default(), tf.Session() as sess:
            cnn2 = CNNClassifier.load_from_file(path)
            x = tf.placeholder(tf.int32, [BATCH_SIZE, SEQUENCE_LENGTH])
            y = tf.placeholder(tf.float32, [BATCH_SIZE, 2])
            cnn2.build_graph(x, y)
            saver = tf.train.Saver()
            saver.restore(sess, checkpoint_path)
            feed_dict = {cnn2.input_x: x_batch, cnn2.input_y: y_batch}
            loss2 = sess.run(cnn2.total_loss, feed_dict)

        self.assertEqual(loss1, loss2)
コード例 #4
0
 def _build_classifier(self):
     with self.graph.as_default(), self.sess.as_default():
         tf.set_random_seed(42)
         x = tf.placeholder(tf.int32, [None, SEQUENCE_LENGTH])
         y = tf.placeholder(tf.float32, [None, 2])
         cnn = CNNClassifier(
             sequence_length=SEQUENCE_LENGTH,
             vocabulary_size=VOCABULARY_SIZE,
             num_classes=2,
             embedding_dim=64,
             filter_sizes="2,3,4",
             num_filters=10,
             use_highway=False,
             dropout_keep_prob_embedding=1.0,
             dropout_keep_prob_features=1.0)
         cnn.build_graph(x, y)
         return cnn
コード例 #5
0
from tfmodels.data.mr import MRData
from tfmodels.data.imdb import IMDBData
from tfmodels.data.ymrjp import YMRJPData
from datetime import datetime
from tfmodels.models.cnn.cnn_classifier import CNNClassifier, CNNClassifierTrainer, CNNClassifierEvaluator

pp = pprint.PrettyPrinter(indent=2)

# Pick training data
tf.flags.DEFINE_boolean("data_mr", False, "Dataset: MR")
tf.flags.DEFINE_boolean("data_sst", False, "Dataset: SST")
tf.flags.DEFINE_boolean("data_ymrjp", False,
                        "Dataset: Yahoo Movie Reviews (Japanese)")

# Classifier parameters
CNNClassifier.add_flags()

# Training parameters
tf.flags.DEFINE_integer("batch_size", 64,
                        "Size for one batch of training/dev examples")
tf.flags.DEFINE_integer("random_state", 42,
                        "Random state initialization for reproducibility")
tf.flags.DEFINE_integer("max_sequence_length", 512,
                        "Examples will be padded/truncated to this length")
tf.flags.DEFINE_integer("num_epochs", 20, "Number of training epochs")
tf.flags.DEFINE_integer(
    "evaluate_every", 50,
    "Evaluate model on dev set after this number of steps")
tf.flags.DEFINE_integer(
    "checkpoint_every", 50,
    "Evaluate model on dev set after this number of steps")
コード例 #6
0
import tfmodels.data.utils
from tfmodels.data.mr import MRData
from tfmodels.data.imdb import IMDBData
from tfmodels.data.ymrjp import YMRJPData
from datetime import datetime
from tfmodels.models.cnn.cnn_classifier import CNNClassifier, CNNClassifierTrainer, CNNClassifierEvaluator

pp = pprint.PrettyPrinter(indent=2)

# Pick training data
tf.flags.DEFINE_boolean("data_mr", False, "Dataset: MR")
tf.flags.DEFINE_boolean("data_sst", False, "Dataset: SST")
tf.flags.DEFINE_boolean("data_ymrjp", False, "Dataset: Yahoo Movie Reviews (Japanese)")

# Classifier parameters
CNNClassifier.add_flags()

# Training parameters
tf.flags.DEFINE_integer("batch_size", 64, "Size for one batch of training/dev examples")
tf.flags.DEFINE_integer("random_state", 42, "Random state initialization for reproducibility")
tf.flags.DEFINE_integer("max_sequence_length", 512, "Examples will be padded/truncated to this length")
tf.flags.DEFINE_integer("num_epochs", 20, "Number of training epochs")
tf.flags.DEFINE_integer("evaluate_every", 50, "Evaluate model on dev set after this number of steps")
tf.flags.DEFINE_integer("checkpoint_every", 50, "Evaluate model on dev set after this number of steps")

# Session Parameters
tf.flags.DEFINE_boolean("allow_soft_placement", False, "Allow soft device placement (e.g. no GPU)")
tf.flags.DEFINE_boolean("log_device_placement", False, "Log placement of ops on devices")

# Print parameters
FLAGS = tf.flags.FLAGS