示例#1
0
    def __init__(self,
                 model_params,
                 logdir,
                 tasks,
                 task_types,
                 train=True,
                 verbosity=None):
        """Constructs the computational graph.

    Args:
      train: whether model is in train mode
      model_params: dictionary of model parameters
      logdir: Location to save data

    This function constructs the computational graph for the model. It relies
    subclassed methods (build/cost) to construct specific graphs.
    """
        self.graph = tf.Graph()
        self.model_params = model_params
        self.logdir = logdir
        self.tasks = tasks
        self.task_types = task_types
        self.num_tasks = len(task_types)
        self.verbosity = verbosity

        # Lazily created by _get_shared_session().
        self._shared_session = None

        # Guard variable to make sure we don't Restore() this model
        # from a disk checkpoint more than once.
        self._restored_model = False

        # Cache of TensorFlow scopes, to prevent '_1' appended scope names
        # when subclass-overridden methods use the same scopes.
        self._name_scopes = {}

        # Path to save checkpoint files, which matches the
        # replicated supervisor's default path.
        self._save_path = os.path.join(logdir, 'model.ckpt')

        with self.graph.as_default():
            model_ops.set_training(train)
            self.placeholder_root = 'placeholders'
            with tf.name_scope(self.placeholder_root) as scope:
                self.placeholder_scope = scope

        self.setup()
        if train:
            self.add_training_cost()
            self.merge_updates()
        else:
            self.add_output_ops()  # add softmax heads
示例#2
0
 def _CheckBatchNormalization(self, features, convolution, mean, variance,
                              mask=None):
   model_ops.set_training(True)
   epsilon = 0.001
   with self.test_session() as sess:
     features_t = tf.constant(features, dtype=tf.float32)
     batch_norm_t = model_ops.BatchNormalize(
         features_t, convolution=convolution, epsilon=epsilon, mask=mask)
     sess.run(tf.initialize_all_variables())
     batch_norm, beta, gamma = sess.run(
         [batch_norm_t] + tf.trainable_variables())
     expected = gamma * (features - mean) / np.sqrt(variance + epsilon) + beta
     self.assertAllClose(batch_norm, np.ma.filled(expected, 0),
                         rtol=1e-5, atol=1e-5)
示例#3
0
  def __init__(self, model_params, logdir, task_types, train=True,
               verbosity=None):
    """Constructs the computational graph.

    Args:
      train: whether model is in train mode
      model_params: dictionary of model parameters
      logdir: Location to save data

    This function constructs the computational graph for the model. It relies
    subclassed methods (build/cost) to construct specific graphs.
    """
    self.graph = tf.Graph() 
    self.model_params = model_params
    self.logdir = logdir
    self.task_types = task_types
    self.num_tasks = len(task_types)
    self.verbosity = verbosity

    # Lazily created by _get_shared_session().
    self._shared_session = None

    # Guard variable to make sure we don't Restore() this model
    # from a disk checkpoint more than once.
    self._restored_model = False

    # Cache of TensorFlow scopes, to prevent '_1' appended scope names
    # when subclass-overridden methods use the same scopes.
    self._name_scopes = {}

    # Path to save checkpoint files, which matches the
    # replicated supervisor's default path.
    self._save_path = os.path.join(logdir, 'model.ckpt')

    with self.graph.as_default():
      model_ops.set_training(train)
      self.placeholder_root = 'placeholders'
      with tf.name_scope(self.placeholder_root) as scope:
        self.placeholder_scope = scope
        self.valid = tf.placeholder(tf.bool,
                                    shape=[model_params["batch_size"]],
                                    name='valid')

    self.setup()
    if train:
      self.add_training_cost()
      self.merge_updates()
    else:
      self.add_output_ops()  # add softmax heads
示例#4
0
    def testBatchNormalizationInference(self):
        # create a simple batch-normalized model
        model_ops.set_training(True)
        epsilon = 0.001
        decay = 0.95
        checkpoint = os.path.join(self.root, 'my-checkpoint')
        with self.test_session() as sess:
            features = np.random.random((2, 3, 2, 3))
            features_t = tf.constant(features, dtype=tf.float32)
            # create variables for beta, gamma, and moving mean and variance
            model_ops.BatchNormalize(features_t,
                                     convolution=False,
                                     epsilon=epsilon,
                                     decay=decay)
            sess.run(tf.initialize_all_variables())
            updates = tf.group(
                *tf.get_default_graph().get_collection('updates'))
            sess.run(updates)  # update moving mean and variance
            expected_mean, expected_variance, _, _ = tf.all_variables()
            expected_mean = expected_mean.eval()
            expected_variance = expected_variance.eval()

            # save a checkpoint
            saver = tf.train.Saver()
            saver.save(sess, checkpoint)

        super(ModelOpsTest, self).setUp()  # reset the default graph

        # check that the moving mean and variance are used for evaluation
        # get a new set of features to verify that the correct mean and var are used
        model_ops.set_training(False)
        with self.test_session() as sess:
            new_features = np.random.random((2, 3, 2, 3))
            new_features_t = tf.constant(new_features, dtype=tf.float32)
            batch_norm_t = model_ops.BatchNormalize(new_features_t,
                                                    convolution=False,
                                                    epsilon=epsilon,
                                                    decay=decay)
            saver = tf.train.Saver()
            saver.restore(sess, checkpoint)
            batch_norm, mean, variance, beta, gamma = sess.run(
                [batch_norm_t] + tf.all_variables())
            self.assertAllClose(mean, expected_mean)
            self.assertAllClose(variance, expected_variance)
            expected = (gamma *
                        (new_features - mean) / np.sqrt(variance + epsilon) +
                        beta)
            self.assertAllClose(batch_norm, expected)
示例#5
0
  def testBatchNormalizationInference(self):
    # create a simple batch-normalized model
    model_ops.set_training(True)
    epsilon = 0.001
    decay = 0.95
    checkpoint = os.path.join(self.root, 'my-checkpoint')
    with self.test_session() as sess:
      features = np.random.random((2, 3, 2, 3))
      features_t = tf.constant(features, dtype=tf.float32)
      # create variables for beta, gamma, and moving mean and variance
      model_ops.BatchNormalize(
          features_t, convolution=False, epsilon=epsilon, decay=decay)
      sess.run(tf.initialize_all_variables())
      updates = tf.group(*tf.get_default_graph().get_collection('updates'))
      sess.run(updates)  # update moving mean and variance
      expected_mean, expected_variance, _, _ = tf.all_variables()
      expected_mean = expected_mean.eval()
      expected_variance = expected_variance.eval()

      # save a checkpoint
      saver = tf.train.Saver()
      saver.save(sess, checkpoint)

    super(ModelOpsTest, self).setUp()  # reset the default graph

    # check that the moving mean and variance are used for evaluation
    # get a new set of features to verify that the correct mean and var are used
    model_ops.set_training(False)
    with self.test_session() as sess:
      new_features = np.random.random((2, 3, 2, 3))
      new_features_t = tf.constant(new_features, dtype=tf.float32)
      batch_norm_t = model_ops.BatchNormalize(
          new_features_t, convolution=False, epsilon=epsilon, decay=decay)
      saver = tf.train.Saver()
      saver.restore(sess, checkpoint)
      batch_norm, mean, variance, beta, gamma = sess.run(
          [batch_norm_t] + tf.all_variables())
      self.assertAllClose(mean, expected_mean)
      self.assertAllClose(variance, expected_variance)
      expected = (gamma * (new_features - mean) /
                  np.sqrt(variance + epsilon) + beta)
      self.assertAllClose(batch_norm, expected)
示例#6
0
 def _CheckBatchNormalization(self,
                              features,
                              convolution,
                              mean,
                              variance,
                              mask=None):
     model_ops.set_training(True)
     epsilon = 0.001
     with self.test_session() as sess:
         features_t = tf.constant(features, dtype=tf.float32)
         batch_norm_t = model_ops.BatchNormalize(features_t,
                                                 convolution=convolution,
                                                 epsilon=epsilon,
                                                 mask=mask)
         sess.run(tf.initialize_all_variables())
         batch_norm, beta, gamma = sess.run([batch_norm_t] +
                                            tf.trainable_variables())
         expected = gamma * (features - mean) / np.sqrt(variance +
                                                        epsilon) + beta
         self.assertAllClose(batch_norm,
                             np.ma.filled(expected, 0),
                             rtol=1e-5,
                             atol=1e-5)
示例#7
0
  def GetModel(self, train=True):
    model_ops.set_training(train)

    # dummy variable for testing Restore
    tf.Variable(tf.constant(10.0, shape=[1]), name='v0')
示例#8
0
    def GetModel(self, train=True):
        model_ops.set_training(train)

        # dummy variable for testing Restore
        tf.Variable(tf.constant(10.0, shape=[1]), name='v0')