Пример #1
0
 def testUnknown(self):
     tf_val = gen_state_ops.variable(shape=[3, 4, 7],
                                     dtype=dtypes.float32,
                                     name="tf_val",
                                     container="",
                                     shared_name="")
     self.assertIs(None, tensor_util.constant_value(tf_val))
Пример #2
0
 def testUnknown(self):
   tf_val = gen_state_ops.variable(
       shape=[3, 4, 7],
       dtype=dtypes.float32,
       name="tf_val",
       container="",
       shared_name="")
   self.assertIs(None, tensor_util.constant_value(tf_val))
Пример #3
0
def variable_op(shape, dtype, name="Variable", set_shape=True, container="",
                shared_name=""):
  """Deprecated. Used variable_op_v2 instead."""
  if not set_shape:
    shape = tensor_shape.unknown_shape()
  ret = gen_state_ops.variable(shape=shape, dtype=dtype, name=name,
                               container=container, shared_name=shared_name)
  # TODO(mrry): Move this to where it is used, so we can get rid of this op
  #   wrapper?
  if set_shape:
    ret.set_shape(shape)
  return ret
Пример #4
0
def variable_op(shape, dtype, name="Variable", set_shape=True, container="",
                shared_name=""):
  """Deprecated. Used variable_op_v2 instead."""
  if not set_shape:
    shape = tensor_shape.unknown_shape()
  ret = gen_state_ops.variable(shape=shape, dtype=dtype, name=name,
                               container=container, shared_name=shared_name)
  # TODO(mrry): Move this to where it is used, so we can get rid of this op
  #   wrapper?
  if set_shape:
    ret.set_shape(shape)
  return ret
Пример #5
0
  def testTwoDeviceFunctions(self):
    with ops.Graph().as_default() as g:
      var_0 = gen_state_ops.variable(
          shape=[1],
          dtype=dtypes.float32,
          name="var_0",
          container="",
          shared_name="")
      with g.device(TestDeviceFuncPinVariableToCpu):
        var_1 = gen_state_ops.variable(
            shape=[1],
            dtype=dtypes.float32,
            name="var_1",
            container="",
            shared_name="")
      var_2 = gen_state_ops.variable(
          shape=[1],
          dtype=dtypes.float32,
          name="var_2",
          container="",
          shared_name="")
      var_3 = gen_state_ops.variable(
          shape=[1],
          dtype=dtypes.float32,
          name="var_3",
          container="",
          shared_name="")
      with g.device(TestDeviceFuncPinVariableToCpu):
        var_4 = gen_state_ops.variable(
            shape=[1],
            dtype=dtypes.float32,
            name="var_4",
            container="",
            shared_name="")
        with g.device("/device:GPU:0"):
          var_5 = gen_state_ops.variable(
              shape=[1],
              dtype=dtypes.float32,
              name="var_5",
              container="",
              shared_name="")
        var_6 = gen_state_ops.variable(
            shape=[1],
            dtype=dtypes.float32,
            name="var_6",
            container="",
            shared_name="")

    self.assertDeviceEqual(var_0.device, None)
    self.assertDeviceEqual(var_1.device, "/device:CPU:0")
    self.assertDeviceEqual(var_2.device, None)
    self.assertDeviceEqual(var_3.device, None)
    self.assertDeviceEqual(var_4.device, "/device:CPU:0")
    self.assertDeviceEqual(var_5.device, "/device:GPU:0")
    self.assertDeviceEqual(var_6.device, "/device:CPU:0")
Пример #6
0
  def testTwoDeviceFunctions(self):
    with ops.Graph().as_default() as g:
      var_0 = gen_state_ops.variable(
          shape=[1],
          dtype=dtypes.float32,
          name="var_0",
          container="",
          shared_name="")
      with g.device(test_device_func_pin_variable_to_cpu):
        var_1 = gen_state_ops.variable(
            shape=[1],
            dtype=dtypes.float32,
            name="var_1",
            container="",
            shared_name="")
      var_2 = gen_state_ops.variable(
          shape=[1],
          dtype=dtypes.float32,
          name="var_2",
          container="",
          shared_name="")
      var_3 = gen_state_ops.variable(
          shape=[1],
          dtype=dtypes.float32,
          name="var_3",
          container="",
          shared_name="")
      with g.device(test_device_func_pin_variable_to_cpu):
        var_4 = gen_state_ops.variable(
            shape=[1],
            dtype=dtypes.float32,
            name="var_4",
            container="",
            shared_name="")
        with g.device("/device:GPU:0"):
          var_5 = gen_state_ops.variable(
              shape=[1],
              dtype=dtypes.float32,
              name="var_5",
              container="",
              shared_name="")
        var_6 = gen_state_ops.variable(
            shape=[1],
            dtype=dtypes.float32,
            name="var_6",
            container="",
            shared_name="")

    self.assertDeviceEqual(var_0.device, None)
    self.assertDeviceEqual(var_1.device, "/device:CPU:0")
    self.assertDeviceEqual(var_2.device, None)
    self.assertDeviceEqual(var_3.device, None)
    self.assertDeviceEqual(var_4.device, "/device:CPU:0")
    self.assertDeviceEqual(var_5.device, "/device:GPU:0")
    self.assertDeviceEqual(var_6.device, "/device:CPU:0")
 def testDecay(self):
   initial_lr = 0.1
   k = 10
   decay_rate = 0.96
   step = gen_state_ops.variable(
       shape=[], dtype=dtypes.int32, name="step", container="", shared_name="")
   assign_step = state_ops.assign(step, 0)
   increment_step = state_ops.assign_add(step, 1)
   decayed_lr = learning_rate_decay.natural_exp_decay(initial_lr, step,
                                                      k, decay_rate)
   with self.test_session():
     assign_step.op.run()
     for i in range(k+1):
       expected = initial_lr * math.exp(-i / k * decay_rate)
       self.assertAllClose(decayed_lr.eval(), expected, 1e-6)
       increment_step.op.run()
 def testStaircase(self):
   with self.test_session():
     step = gen_state_ops.variable(shape=[], dtype=dtypes.int32,
                                   name="step", container="", shared_name="")
     assign_100 = state_ops.assign(step, 100)
     assign_1 = state_ops.assign(step, 1)
     assign_2 = state_ops.assign(step, 2)
     decayed_lr = learning_rate_decay.exponential_decay(.1, step, 3, 0.96,
                                                        staircase=True)
     # No change to learning rate
     assign_1.op.run()
     self.assertAllClose(decayed_lr.eval(), .1, 1e-6)
     assign_2.op.run()
     self.assertAllClose(decayed_lr.eval(), .1, 1e-6)
     # Decayed learning rate
     assign_100.op.run()
     expected = .1 * 0.96 ** (100 // 3)
     self.assertAllClose(decayed_lr.eval(), expected, 1e-6)
Пример #9
0
 def testAverageVariablesDeviceAssignment(self):
     with ops.device("/job:dev_v0"):
         v0 = variables.Variable(10.0, name="v0")
     with ops.device("/job:dev_v1"):
         v1 = gen_state_ops.variable(shape=[1],
                                     dtype=dtypes.float32,
                                     name="v1",
                                     container="",
                                     shared_name="")
         v1.set_shape([1])
     tensor2 = v0 + v1
     ema = moving_averages.ExponentialMovingAverage(0.25, name="foo_avg")
     with ops.device("/job:default"):
         ema.apply([v0, v1, tensor2])
     self.assertDeviceEqual("/job:dev_v0", ema.average(v0).device)
     self.assertDeviceEqual("/job:dev_v1", ema.average(v1).device)
     # However, the colocation property is maintained.
     self.assertEqual([b"loc:@v1"], ema.average(v1).op.colocation_groups())
     self.assertDeviceEqual("/job:default", ema.average(tensor2).device)
 def testStaircase(self):
   initial_lr = 0.1
   k = 10
   decay_rate = 0.96
   step = gen_state_ops.variable(
       shape=[], dtype=dtypes.int32, name="step", container="", shared_name="")
   assign_step = state_ops.assign(step, 0)
   increment_step = state_ops.assign_add(step, 1)
   decayed_lr = learning_rate_decay.inverse_time_decay(initial_lr,
                                                       step,
                                                       k,
                                                       decay_rate,
                                                       staircase=True)
   with self.test_session():
     assign_step.op.run()
     for i in range(k+1):
       expected = initial_lr / (1 + decay_rate * (i // k))
       self.assertAllClose(decayed_lr.eval(), expected, 1e-6)
       increment_step.op.run()
 def testAverageVariablesDeviceAssignment(self):
   with ops.device("/job:dev_v0"):
     v0 = variables.Variable(10.0, name="v0")
   with ops.device("/job:dev_v1"):
     v1 = gen_state_ops.variable(
         shape=[1],
         dtype=dtypes.float32,
         name="v1",
         container="",
         shared_name="")
     v1.set_shape([1])
   tensor2 = v0 + v1
   ema = moving_averages.ExponentialMovingAverage(0.25, name="foo_avg")
   with ops.device("/job:default"):
     ema.apply([v0, v1, tensor2])
   self.assertDeviceEqual("/job:dev_v0", ema.average(v0).device)
   self.assertDeviceEqual("/job:dev_v1", ema.average(v1).device)
   # However, the colocation property is maintained.
   self.assertEqual([b"loc:@v1"], ema.average(v1).op.colocation_groups())
   self.assertDeviceEqual("/job:default", ema.average(tensor2).device)
Пример #12
0
 def testContainer(self):
   with ops.Graph().as_default():
     v0 = variables.Variable([0])
     with ops.container("l1"):
       v1 = variables.Variable([1])
       with ops.container("l2"):
         v2 = variables.Variable([2])
         special_v = gen_state_ops.variable(
             shape=[1],
             dtype=dtypes.float32,
             name="VariableInL3",
             container="l3",
             shared_name="")
       v3 = variables.Variable([3])
     v4 = variables.Variable([4])
   self.assertEqual(compat.as_bytes(""), v0.op.get_attr("container"))
   self.assertEqual(compat.as_bytes("l1"), v1.op.get_attr("container"))
   self.assertEqual(compat.as_bytes("l2"), v2.op.get_attr("container"))
   self.assertEqual(compat.as_bytes("l3"), special_v.op.get_attr("container"))
   self.assertEqual(compat.as_bytes("l1"), v3.op.get_attr("container"))
   self.assertEqual(compat.as_bytes(""), v4.op.get_attr("container"))
 def testContainer(self):
   with ops.Graph().as_default():
     v0 = variables.Variable([0])
     with ops.container("l1"):
       v1 = variables.Variable([1])
       with ops.container("l2"):
         v2 = variables.Variable([2])
         special_v = gen_state_ops.variable(
             shape=[1],
             dtype=dtypes.float32,
             name="VariableInL3",
             container="l3",
             shared_name="")
       v3 = variables.Variable([3])
     v4 = variables.Variable([4])
   self.assertEqual(compat.as_bytes(""), v0.op.get_attr("container"))
   self.assertEqual(compat.as_bytes("l1"), v1.op.get_attr("container"))
   self.assertEqual(compat.as_bytes("l2"), v2.op.get_attr("container"))
   self.assertEqual(compat.as_bytes("l3"), special_v.op.get_attr("container"))
   self.assertEqual(compat.as_bytes("l1"), v3.op.get_attr("container"))
   self.assertEqual(compat.as_bytes(""), v4.op.get_attr("container"))
Пример #14
0
 def testRefDtype(self):
     with context.graph_mode(), self.cached_session():
         x = gen_state_ops.variable(shape=[1], dtype=dtypes.float32)
         result = math_ops.cast(x, dtypes.float32)
         self.assertEqual(x.dtype, dtypes.float32_ref)
         self.assertEqual(result.dtype, dtypes.float32)