예제 #1
0
def get_device_map_from(destinations):
  if isinstance(destinations, (value_lib.DistributedValues,
                               value_lib.LogicalDeviceSpec)):
    return destinations.device_map, destinations.logical_device
  if isinstance(destinations, six.string_types):
    device = device_util.resolve(destinations)
  else:
    device = destinations.device
  return value_lib.SingleDeviceMap(device), 0
예제 #2
0
 def __init__(self, container_strategy, device):
   super(OneDeviceExtended, self).__init__(container_strategy)
   self._device = device
   self._default_device = device
   self._input_device = device_util.canonicalize("/device:CPU:0")
   worker_device_pairs = [(self._input_device, [self._device])]
   device_map = values.SingleDeviceMap(device)
   self._input_workers = input_lib.InputWorkers(
       device_map, worker_device_pairs)
예제 #3
0
 def testFunctionCanReturnPerReplica(self):
     f = def_function.function(lambda x: x)
     x = values.PerReplica(values.SingleDeviceMap("CPU"),
                           (constant_op.constant(1.), ))
     y = f(x)
     self.assertIsNot(x, y)
     for a, b in zip(x._to_components(), y._to_components()):
         self.assertAllEqual(a, b)
     self.assertEqual(x._component_metadata(), y._component_metadata())
예제 #4
0
 def testShapeInvariantToComponentsExplicitShape(self):
     v1 = constant_op.constant([1., 1., 1.])
     v2 = constant_op.constant([2., 2., 2.])
     per_replica = values.PerReplica(values.SingleDeviceMap("CPU"),
                                     (v1, v2))
     shape = [None]
     self.assertEqual(
         per_replica._shape_invariant_to_components(shape=shape),
         (shape, shape))
예제 #5
0
 def __init__(self, container_strategy, device):
     super(OneDeviceExtended, self).__init__(container_strategy)
     self._device = device_util.resolve(device)
     suffix_loc = self._device.rfind("/")
     self._input_device = self._device[:suffix_loc] + "/device:CPU:0"
     worker_device_pairs = [(self._input_device, [self._device])]
     device_map = values.SingleDeviceMap(device)
     self._input_workers = input_lib.InputWorkers(device_map,
                                                  worker_device_pairs)
예제 #6
0
  def testTypeSpec(self):
    device_map = values.SingleDeviceMap("CPU")
    vals = (constant_op.constant(1.),)
    per_replica = values.PerReplica(device_map, vals)

    spec = per_replica._type_spec
    self.assertEqual(spec._value_specs,
                     (tensor_spec.TensorSpec([], dtypes.float32),))
    self.assertEqual(spec._device_map, per_replica.device_map)
    self.assertEqual(spec._logical_device, per_replica.logical_device)
예제 #7
0
    def testCondWithValuesNotConvertibleToTensor(self):
        device_map = values.SingleDeviceMap("CPU")
        per_replica_1 = values.PerReplica(device_map, (set(["a"]), ))
        per_replica_2 = values.PerReplica(device_map, (set(["b", "c"]), ))
        condition = array_ops.placeholder(dtypes.bool, [])

        with self.assertRaisesRegex(TypeError,
                                    "Could not build a TypeSpec for"):
            control_flow_ops.cond(condition, lambda: per_replica_1,
                                  lambda: per_replica_2)
예제 #8
0
    def testFetchOnFrozenGraph(self):
        with context.graph_mode():
            v = values.TPUMirroredVariable(
                strategy=None,
                device_map=values.SingleDeviceMap("/cpu:0"),
                values=[variables_lib.Variable(42.)],
                aggregation=None)

            self.evaluate(variables_lib.global_variables_initializer())
            ops.get_default_graph().finalize()
            self.assertEqual(42., self.evaluate(v))
예제 #9
0
  def testTypeSpecRoundTrip(self):
    device_map = values.SingleDeviceMap("CPU")
    vals = (constant_op.constant(1.),)
    per_replica = values.PerReplica(device_map, vals)

    spec = per_replica._type_spec
    tensor_list = spec._to_components(per_replica)
    reconstructed = spec._from_components(tensor_list)

    self.assertEqual(per_replica.device_map, reconstructed.device_map)
    self.assertEqual(per_replica.logical_device, reconstructed.logical_device)
    self.assertAllEqual(per_replica.values, reconstructed.values)
예제 #10
0
  def testCondWithValuesConvertibleToTensor(self):
    device_map = values.SingleDeviceMap("CPU")
    per_replica_1 = values.PerReplica(device_map, ("a",))
    per_replica_2 = values.PerReplica(device_map, ("b",))
    condition = array_ops.placeholder_with_default(True, [])

    result = control_flow_ops.cond(
        condition, lambda: per_replica_1, lambda: per_replica_2)

    self.assertEqual(per_replica_1.device_map, result.device_map)
    self.assertEqual(per_replica_1.logical_device, result.logical_device)
    self.assertLen(result.values, 1)
    self.assertAllEqual(result.values[0], "a")
def _make_tensor_into_per_replica(input_tensor):
  """Converts a single tensor into a PerReplica object."""
  if isinstance(input_tensor, (tuple, list)):
    raise ValueError("Cannot convert `input_tensor` to a `PerReplica` object, "
                     "got %r but expected a object that is not a tuple or list."
                     % (input_tensor,))
  if isinstance(input_tensor, value_lib.PerReplica):
    return input_tensor

  try:
    device = input_tensor.device
  except AttributeError:
    raise ValueError("Cannot convert `input_tensor` to a `PerReplica` object "
                     "because it doesn't have device set.")

  device_map = value_lib.SingleDeviceMap(device)
  return value_lib.PerReplica(device_map, (input_tensor,))
예제 #12
0
    def test_supports_distributed_variables(self):
        device_map = distributed_values.SingleDeviceMap("/CPU:0")
        mirrored = distributed_values.MirroredVariable(
            None, device_map, [variables.Variable(1.)],
            variables.VariableAggregation.SUM)
        tpu = distributed_values.TPUMirroredVariable(
            strategy=None,
            device_map=device_map,
            values=[variables.Variable(42.)],
            aggregation=None)
        aggregating = distributed_values.AggregatingVariable(
            strategy=None, v=variables.Variable(1.), aggregation=None)

        m = module.Module()
        m.a = mirrored
        m.b = tpu
        m.c = aggregating
        self.assertEqual(m.variables, (mirrored, tpu, aggregating))
예제 #13
0
 def testIsGraphTensor(self):
   per_replica = values.PerReplica(values.SingleDeviceMap("CPU"),
                                   (constant_op.constant(1.),))
   self.assertEqual(per_replica._is_graph_tensor,
                    not context.executing_eagerly())
예제 #14
0
 def testShapeInvariantToComponents(self):
   v1 = constant_op.constant(1.)
   v2 = constant_op.constant(2.)
   per_replica = values.PerReplica(values.SingleDeviceMap("CPU"), (v1, v2))
   self.assertEqual(per_replica._shape_invariant_to_components(),
                    (v1.shape, v2.shape))