Exemplo n.º 1
0
 def test_validating_dataset_input_tensors_with_dtype_mismatch(self):
     with self.cached_session():
         strategy = mirrored_strategy.MirroredStrategy(
             ['/device:GPU:0', '/device:CPU:0'])
         a = constant_op.constant([1, 2], shape=(1, 2), dtype=dtypes.int32)
         b = constant_op.constant([1, 2],
                                  shape=(1, 2),
                                  dtype=dtypes.float64)
         x = values.DistributedValues({
             '/device:CPU:0': a,
             '/device:GPU:0': b
         })
         y = values.DistributedValues({
             '/device:CPU:0': a,
             '/device:GPU:0': a
         })
         with strategy.scope():
             # Removed device and input tensor dtype details from the error message
             # since the order of the device and the corresponding input tensor dtype
             # is not deterministic over different runs.
             with self.assertRaisesRegexp(
                     ValueError, 'Input tensor dtypes do not match for '
                     'distributed tensor inputs '
                     'DistributedValues:.+'):
                 distributed_training_utils.validate_distributed_dataset_inputs(
                     strategy, x, y)
Exemplo n.º 2
0
 def testCanonicalization(self):
     canonical_cpu = ["/job:localhost/replica:0/task:0/device:CPU:0"]
     v = values.DistributedValues({"": 42})
     self.assertEqual(canonical_cpu, list(v._index.keys()))
     v = values.DistributedValues({"/device:CPU:0": 42})
     self.assertEqual(canonical_cpu, list(v._index.keys()))
     v = values.DistributedValues({"/cpu:0": 42})
     self.assertEqual(canonical_cpu, list(v._index.keys()))
     v = values.DistributedValues({"/CPU:0": 42})
     self.assertEqual(canonical_cpu, list(v._index.keys()))
     with self.assertRaises(AssertionError):
         v = values.DistributedValues({"/device:cpu:0": 42})
Exemplo n.º 3
0
 def testGetEager(self):
   with ops.device("/device:CPU:0"):
     one = constant_op.constant(1)
     two = constant_op.constant(2)
     v = values.DistributedValues({"/device:CPU:0": one, "/device:GPU:0": two})
     self.assertEqual(two, v.get("/device:GPU:0"))
     self.assertEqual(one, v.get())
     with self.assertRaises(ValueError):
       self.assertIsNone(v.get("/device:GPU:2"))
Exemplo n.º 4
0
 def testGetGraph(self):
   with context.graph_mode(), \
       ops.Graph().as_default(), \
       ops.device("/device:CPU:0"):
     one = constant_op.constant(1)
     two = constant_op.constant(2)
     v = values.DistributedValues({"/device:CPU:0": one, "/device:GPU:0": two})
     self.assertEqual(two, v.get("/device:GPU:0"))
     self.assertEqual(one, v.get())
     with self.assertRaises(ValueError):
       self.assertIsNone(v.get("/device:GPU:2"))
 def testIsTensorLikeWithAConstant(self):
   with context.graph_mode(), \
        ops.Graph().as_default(), \
        ops.device("/device:CPU:0"):
     one = constant_op.constant(1)
     two = 2.0
     v = values.DistributedValues({"/device:CPU:0": one, "/device:GPU:0": two})
     self.assertEqual(two, v.get("/device:GPU:0"))
     self.assertEqual(one, v.get())
     self.assertFalse(v.is_tensor_like)
     self.assertFalse(tensor_util.is_tensor(v))
Exemplo n.º 6
0
  def testNonMatchingVariableCreation(self):
    self._skip_eager_if_gpus_less_than(1)

    def model_fn(name):
      v = variable_scope.variable(1.0, name=name)
      distribute_lib.get_tower_context().merge_call(lambda _: _)
      return v

    dist = mirrored_strategy.MirroredStrategy(
        ["/device:GPU:0", "/device:CPU:0"])

    with dist.scope():
      names = values.DistributedValues({
          "/device:CPU:0": "foo",
          "/device:GPU:0": "bar"
      })
      with self.assertRaises(RuntimeError):
        _ = dist.call_for_each_tower(model_fn, names, run_concurrently=False)