예제 #1
0
  def testPartitionedVariable(self):
    save_path = os.path.join(self.get_temp_dir(), 'partitioned_variable')
    var_name = 'my_partitioned_var'

    g1 = tf.Graph()
    with g1.as_default():

      def initializer1(shape, dtype, partition_info):
        _ = partition_info  # Not used for creation.
        return tf.constant(True, dtype, shape)

      partitioned_var1 = tf.create_partitioned_variables(
          [1 << 3, 10], [4, 1], initializer1, dtype=tf.bool, name=var_name)

      with self.test_session(graph=g1) as session:
        with tf.device('/cpu:0'):
          tf.global_variables_initializer().run()
          pv1 = session.run(partitioned_var1)
          save = tf.train.Saver(partitioned_var1)
          save.save(session, save_path)

    g2 = tf.Graph()
    with g2.as_default():
      initializer2 = initializers.restore_initializer(save_path, var_name, '')
      partitioned_var2 = tf.create_partitioned_variables(
          [1 << 3, 10], [4, 1], initializer2, dtype=tf.bool, name=var_name)
      with self.test_session(graph=g2) as session:
        tf.global_variables_initializer().run()
        pv2 = session.run(partitioned_var2)

    self.assertAllEqual(pv1, pv2)
  def testPartitionedVariable(self):
    save_path = os.path.join(self.get_temp_dir(), 'partitioned_variable')
    var_name = 'my_partitioned_var'

    g1 = tf.Graph()
    with g1.as_default():

      def initializer1(shape, dtype, partition_info):
        _ = partition_info  # Not used for creation.
        return tf.constant(True, dtype, shape)

      partitioned_var1 = tf.create_partitioned_variables(
          [1 << 3, 10], [4, 1], initializer1, dtype=tf.bool, name=var_name)

      with self.test_session(graph=g1) as session:
        with tf.device('/cpu:0'):
          tf.global_variables_initializer().run()
          pv1 = session.run(partitioned_var1)
          save = tf.train.Saver(partitioned_var1)
          save.save(session, save_path)

    g2 = tf.Graph()
    with g2.as_default():
      initializer2 = initializers.restore_initializer(save_path, var_name, '')
      partitioned_var2 = tf.create_partitioned_variables(
          [1 << 3, 10], [4, 1], initializer2, dtype=tf.bool, name=var_name)
      with self.test_session(graph=g2) as session:
        tf.global_variables_initializer().run()
        pv2 = session.run(partitioned_var2)

    self.assertAllEqual(pv1, pv2)
 def testRandomInitializer(self):
     # Sanity check that the slices uses a different seed when using a random
     # initializer function.
     with self.test_session():
         var0, var1 = tf.create_partitioned_variables([20, 12], [1, 2], tf.random_uniform_initializer())
         tf.global_variables_initializer().run()
         val0, val1 = var0.eval().flatten(), var1.eval().flatten()
         self.assertTrue(np.linalg.norm(val0 - val1) > 1e-6)
     # Negative test that proves that slices have the same values if
     # the random initializer uses a seed.
     with self.test_session():
         var0, var1 = tf.create_partitioned_variables([20, 12], [1, 2], tf.random_uniform_initializer(seed=201))
         tf.global_variables_initializer().run()
         val0, val1 = var0.eval().flatten(), var1.eval().flatten()
         self.assertAllClose(val0, val1)
예제 #4
0
  def testInitFromPartitionVar(self):
    checkpoint_dir = self.get_temp_dir()
    with self.test_session() as session:
      v1 = _create_partition_checkpoints(session, checkpoint_dir)

    # New graph and session.
    with tf.Graph().as_default() as g:
      with self.test_session(graph=g) as session:
        with tf.variable_scope("some_scope"):
          # TODO(ipolosukhin): Enable this when get_variable partitioning works.
          # Currently get_variable with partitioner doesn't return Variable,
          # but returns a concat op.
#           my1 = tf.get_variable(
#               "my1", [100, 100],
#               partitioner=tf.variable_axis_size_partitioner(axis=0,
#                                                          max_shard_bytes=100))
          my1 = tf.create_partitioned_variables(
              shape=[100, 100], slicing=[5, 1], name="my1",
              initializer=tf.truncated_normal_initializer(0.5))

        checkpoints.init_from_checkpoint(checkpoint_dir, {
            "some_scope/my1": "var1",
        })

        session.run(tf.initialize_all_variables())
        my1_values = session.run(my1)
        self.assertAllEqual(my1_values, v1)
예제 #5
0
    def testInitFromPartitionVar(self):
        checkpoint_dir = self.get_temp_dir()
        with self.test_session() as session:
            v1 = _create_partition_checkpoints(session, checkpoint_dir)

        # New graph and session.
        with tf.Graph().as_default() as g:
            with self.test_session(graph=g) as session:
                with tf.variable_scope("some_scope"):
                    # TODO(ipolosukhin): Enable this when get_variable partitioning works.
                    # Currently get_variable with partitioner doesn't return Variable,
                    # but returns a concat op.
                    #           my1 = tf.get_variable(
                    #               "my1", [100, 100],
                    #               partitioner=tf.variable_axis_size_partitioner(axis=0,
                    #                                                          max_shard_bytes=100))
                    my1 = tf.create_partitioned_variables(
                        shape=[100, 100],
                        slicing=[5, 1],
                        name="my1",
                        initializer=tf.truncated_normal_initializer(0.5))

                checkpoints.init_from_checkpoint(checkpoint_dir, {
                    "some_scope/my1": "var1",
                })

                session.run(tf.initialize_all_variables())
                my1_values = session.run(my1)
                self.assertAllEqual(my1_values, v1)
 def testRandomInitUnevenPartitions(self):
     with self.test_session():
         rnd = tf.Variable(tf.random_uniform([20, 43], dtype=tf.float64))
         var_lists = [
             tf.create_partitioned_variables(rnd.get_shape(), [1, i], rnd.initialized_value()) for i in xrange(1, 10)
         ]
         tf.global_variables_initializer().run()
         rnd_val = rnd.eval()
         # Only check the slice save specs for the first 5 tf.
         save_specs = [
             # One slice
             ["20 43 0,20:0,43"],
             # Two slices
             ["20 43 0,20:0,22", "20 43 0,20:22,21"],
             # Three slices
             ["20 43 0,20:0,15", "20 43 0,20:15,14", "20 43 0,20:29,14"],
             # Four slices
             ["20 43 0,20:0,11", "20 43 0,20:11,11", "20 43 0,20:22,11", "20 43 0,20:33,10"],
             # Five slices
             ["20 43 0,20:0,9", "20 43 0,20:9,9", "20 43 0,20:18,9", "20 43 0,20:27,8", "20 43 0,20:35,8"],
         ]
         for i, vs in enumerate(var_lists):
             var_val = tf.concat(1, vs).eval()
             self.assertAllClose(rnd_val, var_val)
             self.assertEqual([tf.float64] * len(vs), [v.dtype.base_dtype for v in vs])
             if i < len(save_specs):
                 self._TestSaveSpec(vs, save_specs[i])
예제 #7
0
 def testRandomInitializer(self):
     # Sanity check that the slices uses a different seed when using a random
     # initializer function.
     with self.test_session():
         var0, var1 = tf.create_partitioned_variables(
             [20, 12], [1, 2], tf.random_uniform_initializer())
         tf.initialize_all_variables().run()
         val0, val1 = var0.eval().flatten(), var1.eval().flatten()
         self.assertTrue(np.linalg.norm(val0 - val1) > 1e-6)
     # Negative test that proves that slices have the same values if
     # the random initializer uses a seed.
     with self.test_session():
         var0, var1 = tf.create_partitioned_variables(
             [20, 12], [1, 2], tf.random_uniform_initializer(seed=201))
         tf.initialize_all_variables().run()
         val0, val1 = var0.eval().flatten(), var1.eval().flatten()
         self.assertAllClose(val0, val1)
 def testDegenerate(self):
     with self.test_session():
         rnd = tf.Variable(tf.random_uniform([10, 43]))
         vs = tf.create_partitioned_variables(rnd.get_shape(), [1, 1], rnd.initialized_value())
         tf.global_variables_initializer().run()
         val = tf.concat(0, vs).eval()
         rnd = rnd.eval()
         self.assertAllClose(rnd, val)
         self._TestSaveSpec(vs, ["10 43 0,10:0,43"])
 def testVecConstantInit(self):
   with self.test_session():
     rnd_par = tf.constant([1, 2, 3, 4])
     vs = tf.create_partitioned_variables([4], [4], rnd_par)
     tf.initialize_all_variables().run()
     val = tf.concat(0, vs).eval()
     rnd = rnd_par.eval()
     self.assertAllClose(rnd, val)
     self.assertEqual([tf.int32] * 4, [v.dtype.base_dtype for v in vs])
     self._TestSaveSpec(vs, ["4 0,1", "4 1,1", "4 2,1", "4 3,1"])
예제 #10
0
 def testConstantInit(self):
     with self.test_session():
         rnd_par = tf.constant([[1, 2, 3, 4], [5, 6, 7, 8]])
         vs = tf.create_partitioned_variables([2, 4], [1, 2], rnd_par)
         tf.initialize_all_variables().run()
         val = tf.concat(1, vs).eval()
         rnd = rnd_par.eval()
         self.assertAllClose(rnd, val)
         self.assertEqual([tf.int32] * 2, [v.dtype.base_dtype for v in vs])
         self._TestSaveSpec(vs, ["2 4 0,2:0,2", "2 4 0,2:2,2"])
예제 #11
0
 def testVecConstantInit(self):
     with self.test_session():
         rnd_par = tf.constant([1, 2, 3, 4])
         vs = tf.create_partitioned_variables([4], [4], rnd_par)
         tf.initialize_all_variables().run()
         val = tf.concat(0, vs).eval()
         rnd = rnd_par.eval()
         self.assertAllClose(rnd, val)
         self.assertEqual([tf.int32] * 4, [v.dtype.base_dtype for v in vs])
         self._TestSaveSpec(vs, ["4 0,1", "4 1,1", "4 2,1", "4 3,1"])
예제 #12
0
 def testDegenerate(self):
     with self.test_session():
         rnd = tf.Variable(tf.random_uniform([10, 43]))
         vs = tf.create_partitioned_variables(rnd.get_shape(), [1, 1],
                                              rnd.initialized_value())
         tf.initialize_all_variables().run()
         val = tf.concat(0, vs).eval()
         rnd = rnd.eval()
         self.assertAllClose(rnd, val)
         self._TestSaveSpec(vs, ["10 43 0,10:0,43"])
 def testConstantInit(self):
   with self.test_session():
     rnd_par = tf.constant([[1, 2, 3, 4], [5, 6, 7, 8]])
     vs = tf.create_partitioned_variables([2, 4], [1, 2], rnd_par)
     tf.initialize_all_variables().run()
     val = tf.concat(1, vs).eval()
     rnd = rnd_par.eval()
     self.assertAllClose(rnd, val)
     self.assertEqual([tf.int32] * 2, [v.dtype.base_dtype for v in vs])
     self._TestSaveSpec(vs, ["2 4 0,2:0,2", "2 4 0,2:2,2"])
 def testIotaInitializer(self):
     self.assertAllClose([0.0, 1.0, 2.0, 3.0], _IotaInitializer([4]))
     self.assertAllClose([[0.0, 1.0], [0.0, 10.0], [0.0, 100.0], [0.0, 1000.0]], _IotaInitializer([4, 2]))
     with self.test_session():
         vs = tf.create_partitioned_variables([13, 5], [3, 1], _IotaInitializer)
         tf.global_variables_initializer().run()
         slice0 = _IotaInitializer([5, 5])
         slice1 = _IotaInitializer([4, 5])
         slice2 = _IotaInitializer([4, 5])
         val = tf.concat(0, vs).eval()
         self.assertAllClose(slice0 + slice1 + slice2, val)
         self._TestSaveSpec(vs, ["13 5 0,5:0,5", "13 5 5,4:0,5", "13 5 9,4:0,5"])
예제 #15
0
    def _random_weights(self, size=50, num_shards=1):
        assert size > 0
        assert num_shards > 0
        assert num_shards <= size

        embedding_weights = tf.create_partitioned_variables(
            shape=[size],
            slicing=[num_shards],
            initializer=tf.truncated_normal_initializer(mean=0.0,
                                                        stddev=1.0,
                                                        dtype=tf.float32))
        for w in embedding_weights:
            w.initializer.run()
        return embedding_weights
예제 #16
0
  def _random_weights(self, size=50, num_shards=1):
    assert size > 0
    assert num_shards > 0
    assert num_shards <= size

    embedding_weights = tf.create_partitioned_variables(
        shape=[size],
        slicing=[num_shards],
        initializer=tf.truncated_normal_initializer(mean=0.0,
                                                    stddev=1.0,
                                                    dtype=tf.float32))
    for w in embedding_weights:
      w.initializer.run()
    return embedding_weights
 def testLargePartitionedVariables(self):
     save_path = os.path.join(self.get_temp_dir(), "large_variable")
     var_name = "my_var"
     # Saving large partition variable.
     with tf.Session("", graph=tf.Graph()) as sess:
         with tf.device("/cpu:0"):
             # Create a partitioned variable which is larger than int32 size but
             # split into smaller sized variables.
             init = lambda shape, dtype: tf.constant(True, dtype, shape)
             partitioned_var = tf.create_partitioned_variables(
                 [1 << 31], [4], init, dtype=tf.bool, name=var_name)
             tf.initialize_all_variables().run()
             save = tf.train.Saver(partitioned_var)
             val = save.save(sess, save_path)
             self.assertEqual(save_path, val)
 def testLargePartitionedVariables(self):
   save_path = os.path.join(self.get_temp_dir(), "large_variable")
   var_name = "my_var"
   # Saving large partition variable.
   with tf.Session("", graph=tf.Graph()) as sess:
     with tf.device("/cpu:0"):
       # Create a partitioned variable which is larger than int32 size but
       # split into smaller sized variables.
       init = lambda shape, dtype: tf.constant(True, dtype, shape)
       partitioned_var = tf.create_partitioned_variables(
           [1 << 31], [4], init, dtype=tf.bool, name=var_name)
       tf.initialize_all_variables().run()
       save = tf.train.Saver(partitioned_var)
       val = save.save(sess, save_path)
       self.assertEqual(save_path, val)
 def testSliceSizeOne(self):
     with self.test_session():
         rnd = tf.Variable(tf.random_uniform([10, 43]))
         vs = tf.create_partitioned_variables(rnd.get_shape(), [10, 1],
                                              rnd.initialized_value())
         tf.global_variables_initializer().run()
         val = tf.concat_v2(vs, 0).eval()
         rnd = rnd.eval()
         self.assertAllClose(rnd, val)
         self._TestSaveSpec(vs, [
             "10 43 0,1:0,43", "10 43 1,1:0,43", "10 43 2,1:0,43",
             "10 43 3,1:0,43", "10 43 4,1:0,43", "10 43 5,1:0,43",
             "10 43 6,1:0,43", "10 43 7,1:0,43", "10 43 8,1:0,43",
             "10 43 9,1:0,43"
         ])
예제 #20
0
 def testIotaInitializer(self):
     self.assertAllClose([0., 1., 2., 3.], _IotaInitializer([4]))
     self.assertAllClose([[0., 1.], [0., 10.], [0., 100.], [0., 1000.]],
                         _IotaInitializer([4, 2]))
     with self.test_session():
         vs = tf.create_partitioned_variables([13, 5], [3, 1],
                                              _IotaInitializer)
         tf.initialize_all_variables().run()
         slice0 = _IotaInitializer([5, 5])
         slice1 = _IotaInitializer([4, 5])
         slice2 = _IotaInitializer([4, 5])
         val = tf.concat(0, vs).eval()
         self.assertAllClose(slice0 + slice1 + slice2, val)
         self._TestSaveSpec(
             vs, ["13 5 0,5:0,5", "13 5 5,4:0,5", "13 5 9,4:0,5"])
def _create_partition_checkpoints(sess, checkpoint_dir):
  checkpoint_prefix = os.path.join(checkpoint_dir, "model")
  checkpoint_state_name = "checkpoint"
  # TODO(ipolosukhin): Enable this when get_variable partitioning works.
#   v1 = tf.get_variable("var1", [100, 100],
#                        partitioner=tf.variable_axis_size_partitioner(axis=0,
#                                                         max_shard_bytes=512))
  v1 = tf.create_partitioned_variables(
      shape=[100, 100], slicing=[5, 1], name="var1",
      initializer=tf.truncated_normal_initializer(0.5))
  sess.run(tf.initialize_all_variables())
  v1_value = sess.run(v1)
  saver = tf.train.Saver()
  saver.save(sess, checkpoint_prefix, global_step=0,
             latest_filename=checkpoint_state_name)
  return v1_value
예제 #22
0
def _create_partition_checkpoints(sess, checkpoint_dir):
  checkpoint_prefix = os.path.join(checkpoint_dir, "model")
  checkpoint_state_name = "checkpoint"
  # TODO(ipolosukhin): Enable this when get_variable partitioning works.
#   v1 = tf.get_variable("var1", [100, 100],
#                        partitioner=tf.variable_axis_size_partitioner(axis=0,
#                                                         max_shard_bytes=512))
  v1 = tf.create_partitioned_variables(
      shape=[100, 100], slicing=[5, 1], name="var1",
      initializer=tf.truncated_normal_initializer(0.5))
  sess.run(tf.initialize_all_variables())
  v1_value = sess.run(v1)
  saver = tf.train.Saver()
  saver.save(sess, checkpoint_prefix, global_step=0,
             latest_filename=checkpoint_state_name)
  return v1_value
예제 #23
0
  def _random_weights(self, vocab_size=4, embed_dim=4, num_shards=1):
    assert vocab_size > 0
    assert embed_dim > 0
    assert num_shards > 0
    assert num_shards <= vocab_size

    embedding_weights = tf.create_partitioned_variables(
        shape=[vocab_size, embed_dim],
        slicing=[num_shards, 1],
        initializer=tf.truncated_normal_initializer(mean=0.0,
                                                    stddev=1.0 /
                                                    math.sqrt(vocab_size),
                                                    dtype=tf.float32))
    for w in embedding_weights:
      w.initializer.run()
    embedding_weights = [w.eval() for w in embedding_weights]
    return embedding_weights
예제 #24
0
    def _random_weights(self, vocab_size=4, embed_dim=4, num_shards=1):
        assert vocab_size > 0
        assert embed_dim > 0
        assert num_shards > 0
        assert num_shards <= vocab_size

        embedding_weights = tf.create_partitioned_variables(
            shape=[vocab_size, embed_dim],
            slicing=[num_shards, 1],
            initializer=tf.truncated_normal_initializer(mean=0.0,
                                                        stddev=1.0 /
                                                        math.sqrt(vocab_size),
                                                        dtype=tf.float32))
        for w in embedding_weights:
            w.initializer.run()
        embedding_weights = [w.eval() for w in embedding_weights]
        return embedding_weights
예제 #25
0
 def testRandomInitValue(self):
     with self.test_session():
         rnd = tf.Variable(tf.random_uniform([200, 40]))
         vs = tf.create_partitioned_variables(rnd.get_shape(), [1, 10],
                                              rnd.initialized_value())
         tf.initialize_all_variables().run()
         val = tf.concat(1, vs).eval()
         rnd = rnd.eval()
         self.assertAllClose(rnd, val)
         self.assertEqual([tf.float32] * 10,
                          [v.dtype.base_dtype for v in vs])
         self._TestSaveSpec(vs, [
             "200 40 0,200:0,4", "200 40 0,200:4,4", "200 40 0,200:8,4",
             "200 40 0,200:12,4", "200 40 0,200:16,4", "200 40 0,200:20,4",
             "200 40 0,200:24,4", "200 40 0,200:28,4", "200 40 0,200:32,4",
             "200 40 0,200:36,4"
         ])
예제 #26
0
 def testName(self):
     with self.test_session():
         rnd_par = tf.constant([[1, 2, 3, 4], [5, 6, 7, 8]])
         with tf.variable_scope("hi"):
             vs1 = tf.create_partitioned_variables([2, 4], [1, 2], rnd_par)
             vs2 = tf.create_partitioned_variables([2, 4], [1, 2], rnd_par)
         tf.initialize_all_variables().run()
         var1_name = vs1[0]._save_slice_info.full_name
         var2_name = vs2[0]._save_slice_info.full_name
         self.assertEqual("hi/PartitionedVariable", var1_name)
         self.assertEqual("hi/PartitionedVariable_1", var2_name)
         self.assertEqual(var1_name + "/part_0:0", vs1[0].name)
         self.assertEqual(var1_name + "/part_1:0", vs1[1].name)
         self.assertEqual(var2_name + "/part_0:0", vs2[0].name)
         self.assertEqual(var2_name + "/part_1:0", vs2[1].name)
     # Test same variable.
     with self.test_session():
         rnd_par = tf.constant([[1, 2, 3, 4], [5, 6, 7, 8]])
         with tf.variable_scope("hola") as vs:
             vs1 = tf.create_partitioned_variables([2, 4], [1, 2],
                                                   rnd_par,
                                                   dtype=tf.int32)
         with tf.variable_scope(vs, reuse=True):
             vs2 = tf.create_partitioned_variables([2, 4], [1, 2],
                                                   rnd_par,
                                                   dtype=tf.int32)
         tf.initialize_all_variables().run()
         var1_name = vs1[0]._save_slice_info.full_name
         var2_name = vs2[0]._save_slice_info.full_name
         self.assertEqual("hola/PartitionedVariable", var1_name)
         self.assertEqual("hola/PartitionedVariable", var2_name)
         self.assertEqual(var1_name + "/part_0:0", vs1[0].name)
         self.assertEqual(var1_name + "/part_1:0", vs1[1].name)
         self.assertEqual(var2_name + "/part_0:0", vs2[0].name)
         self.assertEqual(var2_name + "/part_1:0", vs2[1].name)
     # Test name_scope
     with self.test_session():
         rnd_par = tf.constant([[1, 2, 3, 4], [5, 6, 7, 8]])
         with tf.name_scope("ola"):
             vs1 = tf.create_partitioned_variables([2, 4], [1, 2], rnd_par)
             vs2 = tf.create_partitioned_variables([2, 4], [1, 2], rnd_par)
         tf.initialize_all_variables().run()
         var1_name = vs1[0]._save_slice_info.full_name
         var2_name = vs2[0]._save_slice_info.full_name
         # Currently, the name scope 'ola' has no effect.
         self.assertEqual("PartitionedVariable", var1_name)
         self.assertEqual("PartitionedVariable_1", var2_name)
         self.assertEqual(var1_name + "/part_0:0", vs1[0].name)
         self.assertEqual(var1_name + "/part_1:0", vs1[1].name)
         self.assertEqual(var2_name + "/part_0:0", vs2[0].name)
         self.assertEqual(var2_name + "/part_1:0", vs2[1].name)
 def testSliceSizeOne(self):
   with self.test_session():
     rnd = tf.Variable(tf.random_uniform([10, 43]))
     vs = tf.create_partitioned_variables(
         rnd.get_shape(), [10, 1], rnd.initialized_value())
     tf.initialize_all_variables().run()
     val = tf.concat(0, vs).eval()
     rnd = rnd.eval()
     self.assertAllClose(rnd, val)
     self._TestSaveSpec(vs, ["10 43 0,1:0,43",
                             "10 43 1,1:0,43",
                             "10 43 2,1:0,43",
                             "10 43 3,1:0,43",
                             "10 43 4,1:0,43",
                             "10 43 5,1:0,43",
                             "10 43 6,1:0,43",
                             "10 43 7,1:0,43",
                             "10 43 8,1:0,43",
                             "10 43 9,1:0,43"])
 def testName(self):
   with self.test_session():
     rnd_par = tf.constant([[1, 2, 3, 4], [5, 6, 7, 8]])
     with tf.variable_scope("hi"):
       vs1 = tf.create_partitioned_variables([2, 4], [1, 2], rnd_par)
       vs2 = tf.create_partitioned_variables([2, 4], [1, 2], rnd_par)
     tf.initialize_all_variables().run()
     var1_name = vs1[0]._save_slice_info.full_name
     var2_name = vs2[0]._save_slice_info.full_name
     self.assertEqual("hi/PartitionedVariable", var1_name)
     self.assertEqual("hi/PartitionedVariable_1", var2_name)
     self.assertEqual(var1_name + "/part_0:0", vs1[0].name)
     self.assertEqual(var1_name + "/part_1:0", vs1[1].name)
     self.assertEqual(var2_name + "/part_0:0", vs2[0].name)
     self.assertEqual(var2_name + "/part_1:0", vs2[1].name)
   # Test same variable.
   with self.test_session():
     rnd_par = tf.constant([[1, 2, 3, 4], [5, 6, 7, 8]])
     with tf.variable_scope("hola") as vs:
       vs1 = tf.create_partitioned_variables(
           [2, 4], [1, 2], rnd_par, dtype=tf.int32)
     with tf.variable_scope(vs, reuse=True):
       vs2 = tf.create_partitioned_variables(
           [2, 4], [1, 2], rnd_par, dtype=tf.int32)
     tf.initialize_all_variables().run()
     var1_name = vs1[0]._save_slice_info.full_name
     var2_name = vs2[0]._save_slice_info.full_name
     self.assertEqual("hola/PartitionedVariable", var1_name)
     self.assertEqual("hola/PartitionedVariable", var2_name)
     self.assertEqual(var1_name + "/part_0:0", vs1[0].name)
     self.assertEqual(var1_name + "/part_1:0", vs1[1].name)
     self.assertEqual(var2_name + "/part_0:0", vs2[0].name)
     self.assertEqual(var2_name + "/part_1:0", vs2[1].name)
   # Test name_scope
   with self.test_session():
     rnd_par = tf.constant([[1, 2, 3, 4], [5, 6, 7, 8]])
     with tf.name_scope("ola"):
       vs1 = tf.create_partitioned_variables([2, 4], [1, 2], rnd_par)
       vs2 = tf.create_partitioned_variables([2, 4], [1, 2], rnd_par)
     tf.initialize_all_variables().run()
     var1_name = vs1[0]._save_slice_info.full_name
     var2_name = vs2[0]._save_slice_info.full_name
     # Currently, the name scope 'ola' has no effect.
     self.assertEqual("PartitionedVariable", var1_name)
     self.assertEqual("PartitionedVariable_1", var2_name)
     self.assertEqual(var1_name + "/part_0:0", vs1[0].name)
     self.assertEqual(var1_name + "/part_1:0", vs1[1].name)
     self.assertEqual(var2_name + "/part_0:0", vs2[0].name)
     self.assertEqual(var2_name + "/part_1:0", vs2[1].name)
 def testRandomInitValue(self):
   with self.test_session():
     rnd = tf.Variable(tf.random_uniform([200, 40]))
     vs = tf.create_partitioned_variables(
         rnd.get_shape(), [1, 10], rnd.initialized_value())
     tf.initialize_all_variables().run()
     val = tf.concat(1, vs).eval()
     rnd = rnd.eval()
     self.assertAllClose(rnd, val)
     self.assertEqual([tf.float32] * 10, [v.dtype.base_dtype for v in vs])
     self._TestSaveSpec(vs, ["200 40 0,200:0,4",
                             "200 40 0,200:4,4",
                             "200 40 0,200:8,4",
                             "200 40 0,200:12,4",
                             "200 40 0,200:16,4",
                             "200 40 0,200:20,4",
                             "200 40 0,200:24,4",
                             "200 40 0,200:28,4",
                             "200 40 0,200:32,4",
                             "200 40 0,200:36,4"])
예제 #30
0
 def testRandomInitUnevenPartitions(self):
   with self.test_session():
     rnd = tf.Variable(
         tf.random_uniform([20, 43], dtype=tf.float64))
     var_lists = [
         tf.create_partitioned_variables(
             rnd.get_shape(), [1, i],
             rnd.initialized_value())
         for i in xrange(1, 10)]
     tf.global_variables_initializer().run()
     rnd_val = rnd.eval()
     # Only check the slice save specs for the first 5 tf.
     save_specs = [
         # One slice
         ["20 43 0,20:0,43"],
         # Two slices
         ["20 43 0,20:0,22",
          "20 43 0,20:22,21"],
         # Three slices
         ["20 43 0,20:0,15",
          "20 43 0,20:15,14",
          "20 43 0,20:29,14"],
         # Four slices
         ["20 43 0,20:0,11",
          "20 43 0,20:11,11",
          "20 43 0,20:22,11",
          "20 43 0,20:33,10"],
         # Five slices
         ["20 43 0,20:0,9",
          "20 43 0,20:9,9",
          "20 43 0,20:18,9",
          "20 43 0,20:27,8",
          "20 43 0,20:35,8"]]
     for i, vs in enumerate(var_lists):
       var_val = tf.concat(1, vs).eval()
       self.assertAllClose(rnd_val, var_val)
       self.assertEqual(
           [tf.float64] * len(vs), [v.dtype.base_dtype for v in vs])
       if i < len(save_specs):
         self._TestSaveSpec(vs, save_specs[i])
예제 #31
0
  def _random_ids_and_weights(self, vocab_size=4, embed_dim=4, num_shards=1):
    assert vocab_size > 0
    assert embed_dim > 0
    assert num_shards > 0
    assert num_shards <= vocab_size

    embedding_weights = tf.create_partitioned_variables(
        shape=[vocab_size, embed_dim],
        slicing=[num_shards, 1],
        initializer=tf.truncated_normal_initializer(
            mean=0.0,
            stddev=1.0 / math.sqrt(vocab_size),
            dtype=tf.float32))
    for w in embedding_weights:
      w.initializer.run()
    embedding_weights = [w.eval() for w in embedding_weights]

    # Each row demonstrates a test case:
    #   Row 0: multiple valid ids, 1 invalid id, weighted mean
    #   Row 1: all ids are invalid (leaving no valid ids after pruning)
    #   Row 2: no ids to begin with
    #   Row 3: single id
    #   Row 4: all ids have <=0 weight
    indices = [[0, 0], [0, 1], [0, 2], [1, 0], [3, 0], [4, 0], [4, 1]]
    ids = [0, 1, -1, -1, 2, 0, 1]
    weights = [1.0, 2.0, 1.0, 1.0, 3.0, 0.0, -0.5]
    shape = [5, 4]

    sparse_ids = tf.SparseTensor(
        tf.constant(indices, tf.int64),
        tf.constant(ids, tf.int64),
        tf.constant(shape, tf.int64))

    sparse_weights = tf.SparseTensor(
        tf.constant(indices, tf.int64),
        tf.constant(weights, tf.float32),
        tf.constant(shape, tf.int64))

    return embedding_weights, sparse_ids, sparse_weights
 def testSomeErrors(self):
   with self.test_session():
     rnd = tf.Variable(tf.random_uniform([10, 43]))
     with self.assertRaises(ValueError):
       tf.create_partitioned_variables([10], [1, 1], rnd.initialized_value())
     with self.assertRaises(ValueError):
       tf.create_partitioned_variables([10, 20], [1], rnd.initialized_value())
     with self.assertRaises(ValueError):
       tf.create_partitioned_variables([10, 43], [1], rnd.initialized_value())
     with self.assertRaises(ValueError):
       tf.create_partitioned_variables(
           [10, 43], [1, 2, 3], rnd.initialized_value())
     with self.assertRaises(ValueError):
       tf.create_partitioned_variables(
           [10, 43], [11, 1], rnd.initialized_value())
     with self.assertRaises(ValueError):
       tf.create_partitioned_variables(
           [10, 43], [20, 1], rnd.initialized_value())
     with self.assertRaises(ValueError):
       tf.create_partitioned_variables(
           [10, 43], [1, 50], rnd.initialized_value())
예제 #33
0
shape = [5,4]

sparse_ids = tf.SparseTensor(
    tf.constant(indices,tf.int64), tf.constant(ids,tf.int64),
    tf.constant(shape,tf.int64))

sparse_weights = tf.SparseTensor(
    tf.constant(indices,tf.int64),tf.constant(weights,tf.float32),
    tf.constant(shape,tf.int64))

vocab_size = 4
embed_dim=4
num_shards = 1
embedding_weights = tf.create_partitioned_variables(
    shape=[vocab_size,embed_dim],
    slicing = [num_shards,1],
    initializer = tf.truncated_normal_initializer(mean=0.0,
                                                  stddev = 1.0/math.sqrt(vocab_size),dtype = tf.float32))
for w in embedding_weights:
    w.initializer.run()
embedding_weights = [w.eval() for w in embedding_weights]

embedding_lookup_result = (tf.contrib.layers.safe_embedding_lookup_sparse(
    embedding_weights,sparse_ids,sparse_weights).eval())



#############性能分析器tfprof
#通过以下命令安装tfprof命令行的工具
bazel build -c opt tensorflow/contrib/tfprof/...
#通过以下命令查询帮助文件
예제 #34
0
# use the metrics-dict to construct the validation_monitor
validation_monitor = tf.contrib.learn.monitors.ValidationMonitor(
    iris_test.data,
    iris_test.target,  # offer the data and target to estimate the model
    every_n_steps=50,  # run this monitor every 50 steps
    metrics=validation_metrics,
    early_stopping_metric=
    'loss',  # early stopping depending on the 'loss' metric
    early_stopping_metric_minimize=
    True,  # if True, we should minimize the early_stopping_metric
    early_stopping_rounds=200)
# next, we construct a DNNClassifier,
# which has 3 layers and the number of hidden units of each layer are 10,15,10
# note that there we can assign multiple monitors to monitor different functions
classifier = tf.contrib.learn.DNNClassifier(
    feature_columns=feature_columns,
    hidden_units=[10, 15, 10],
    n_classes=3,
    model_dir='',
    config=tf.contrib.learn.RunConfig(save_checkpoints_secs=2))
classifier.fit(x=iris_trian.data,
               y=iris_train.target,
               steps=10000,
               monitors=[validation_monitor])
accuracy_score = classifier.evaluate(x=iris_test.data,
                                     y=iris_test.target)['accuracy']
# corresponding to validation_metrics dict.

tf.create_partitioned_variables()
예제 #35
0
 def testSomeErrors(self):
     with self.test_session():
         rnd = tf.Variable(tf.random_uniform([10, 43]))
         with self.assertRaises(ValueError):
             tf.create_partitioned_variables([10], [1, 1],
                                             rnd.initialized_value())
         with self.assertRaises(ValueError):
             tf.create_partitioned_variables([10, 20], [1],
                                             rnd.initialized_value())
         with self.assertRaises(ValueError):
             tf.create_partitioned_variables([10, 43], [1],
                                             rnd.initialized_value())
         with self.assertRaises(ValueError):
             tf.create_partitioned_variables([10, 43], [1, 2, 3],
                                             rnd.initialized_value())
         with self.assertRaises(ValueError):
             tf.create_partitioned_variables([10, 43], [11, 1],
                                             rnd.initialized_value())
         with self.assertRaises(ValueError):
             tf.create_partitioned_variables([10, 43], [20, 1],
                                             rnd.initialized_value())
         with self.assertRaises(ValueError):
             tf.create_partitioned_variables([10, 43], [1, 50],
                                             rnd.initialized_value())