def testFunctionWithResourcesOnDifferentDevices(self):
    if not test_util.is_gpu_available():
      self.skipTest("No GPUs available.")

    with ops.device("/cpu:0"):
      v_cpu_zero = resource_variable_ops.ResourceVariable(
          [0.0, 1.0, 2.0], name="v_cpu_zero")

    with ops.device("/cpu:1"):
      v_cpu_one = resource_variable_ops.ResourceVariable(
          [0.0, 1.0, 2.0], name="v_cpu_one")

    with ops.device("/gpu:0"):
      v_gpu = resource_variable_ops.ResourceVariable(
          [0.0, 1.0, 2.0], name="v_gpu")

    def sum_gather():
      cpu_result = math_ops.reduce_sum(array_ops.gather(v_cpu_zero, [1, 2]))
      also_cpu_result = math_ops.reduce_sum(array_ops.gather(v_cpu_one, [1, 2]))
      gpu_result = math_ops.reduce_sum(array_ops.gather(v_gpu, [1, 2]))
      return cpu_result, also_cpu_result, gpu_result

    defined = function.Defun()(sum_gather)
    with self.test_session(
        config=config_pb2.ConfigProto(
            allow_soft_placement=False,
            log_device_placement=True,
            device_count={"CPU": 2})) as sess:
      self.evaluate(variables.global_variables_initializer())
      expected = self.evaluate(sum_gather())
      result = sess.run(
          functional_ops.partitioned_call(
              args=defined.captured_inputs, f=defined))
      self.assertAllEqual(expected, result)
  def testShardsRunOnRequestedDevices(self):
    config = config_pb2.ConfigProto(device_count={"CPU": 4})

    @function.Defun()
    def Body():
      # Serialize DT_RESOURCE handles as DT_STRINGs, which encode the device on
      # which the resource was created, so that we can verify that ops were
      # actually run on the requested devices.
      #
      # TODO(akshayka): Provide a cleaner, more idiomatic API for obtaining the
      # name of the device on which a resource lives / for determining the
      # device on which an op ran.
      with ops.device("/cpu:0"):
        s1 = iterator_ops.Iterator.from_structure(
            (dtypes.float32,)).string_handle()
      with ops.device("/cpu:1"):
        s2 = iterator_ops.Iterator.from_structure(
            (dtypes.float32,)).string_handle()
      with ops.device("/cpu:2"):
        s3 = iterator_ops.Iterator.from_structure(
            (dtypes.float32,)).string_handle()
      return s1, s2, s3

    with self.test_session(config=config, use_gpu=True) as sess:
      outputs = sess.run(functional_ops.partitioned_call(args=[], f=Body))
    self.assertIn(compat.as_bytes("CPU:0"), outputs[0])
    self.assertIn(compat.as_bytes("CPU:1"), outputs[1])
    self.assertIn(compat.as_bytes("CPU:2"), outputs[2])
Exemplo n.º 3
0
  def testShardsRunOnRequestedDevices(self):
    config = config_pb2.ConfigProto(device_count={"CPU": 4})

    @function.Defun()
    def Body():
      # Serialize DT_RESOURCE handles as DT_STRINGs, which encode the device on
      # which the resource was created, so that we can verify that ops were
      # actually run on the requested devices.
      #
      # TODO(akshayka): Provide a cleaner, more idiomatic API for obtaining the
      # name of the device on which a resource lives / for determining the
      # device on which an op ran.
      with ops.device("/cpu:0"):
        s1 = iterator_ops.Iterator.from_structure(
            (dtypes.float32,)).string_handle()
      with ops.device("/cpu:1"):
        s2 = iterator_ops.Iterator.from_structure(
            (dtypes.float32,)).string_handle()
      with ops.device("/cpu:2"):
        s3 = iterator_ops.Iterator.from_structure(
            (dtypes.float32,)).string_handle()
      return s1, s2, s3

    with self.test_session(config=config, use_gpu=True) as sess:
      outputs = sess.run(functional_ops.partitioned_call(args=[], f=Body))
    self.assertIn(compat.as_bytes("CPU:0"), outputs[0])
    self.assertIn(compat.as_bytes("CPU:1"), outputs[1])
    self.assertIn(compat.as_bytes("CPU:2"), outputs[2])
Exemplo n.º 4
0
  def testFunctionWithResourcesOnDifferentDevices(self):
    if not test_util.is_gpu_available():
      self.skipTest("No GPUs available.")

    with ops.device("/cpu:0"):
      v_cpu_zero = resource_variable_ops.ResourceVariable(
          [0.0, 1.0, 2.0], name="v_cpu_zero")

    with ops.device("/cpu:1"):
      v_cpu_one = resource_variable_ops.ResourceVariable(
          [0.0, 1.0, 2.0], name="v_cpu_one")

    with ops.device("/gpu:0"):
      v_gpu = resource_variable_ops.ResourceVariable(
          [0.0, 1.0, 2.0], name="v_gpu")

    def sum_gather():
      cpu_result = math_ops.reduce_sum(array_ops.gather(v_cpu_zero, [1, 2]))
      also_cpu_result = math_ops.reduce_sum(array_ops.gather(v_cpu_one, [1, 2]))
      gpu_result = math_ops.reduce_sum(array_ops.gather(v_gpu, [1, 2]))
      return cpu_result, also_cpu_result, gpu_result

    defined = function.Defun()(sum_gather)
    with self.test_session(
        config=config_pb2.ConfigProto(
            allow_soft_placement=False,
            log_device_placement=True,
            device_count={"CPU": 2})) as sess:
      sess.run(variables.global_variables_initializer())
      expected = sess.run(sum_gather())
      result = sess.run(
          functional_ops.partitioned_call(
              args=defined.captured_inputs, f=defined))
      self.assertAllEqual(expected, result)
  def testExecutorTypeAttrExecutorNotFound(self):
    @function.Defun(dtypes.int32)
    def AddFive(x):
      return x + 5

    op = functional_ops.partitioned_call(
        args=[constant_op.constant([1, 2, 3], dtype=dtypes.int32)],
        f=AddFive,
        executor_type="NON_EXISTENT_EXECUTOR")
    with self.assertRaisesRegex(errors.NotFoundError, "NON_EXISTENT_EXECUTOR"):
      self.evaluate(op)
  def testExecutorTypeAttrExecutorNotFound(self):
    @function.Defun(dtypes.int32)
    def AddFive(x):
      return x + 5

    op = functional_ops.partitioned_call(
        args=[constant_op.constant([1, 2, 3], dtype=dtypes.int32)],
        f=AddFive,
        executor_type="NON_EXISTENT_EXECUTOR")
    with self.assertRaisesRegexp(errors.NotFoundError,
                                 "NON_EXISTENT_EXECUTOR"):
      self.evaluate(op)
  def testBasicNoDeviceAnnotations(self):

    @function.Defun(*[dtypes.float32] * 2)
    def Body(x, y):
      a = x + x
      b = y + y
      return a + b

    output, = self.evaluate(
        functional_ops.partitioned_call(
            args=[constant_op.constant(1.),
                  constant_op.constant(2.)], f=Body))
    self.assertEqual(output, 6.)
Exemplo n.º 8
0
  def testBasicNoDeviceAnnotations(self):

    @function.Defun(*[dtypes.float32] * 2)
    def Body(x, y):
      a = x + x
      b = y + y
      return a + b

    output, = self.evaluate(
        functional_ops.partitioned_call(
            args=[constant_op.constant(1.),
                  constant_op.constant(2.)], f=Body))
    self.assertEqual(output, 6.)
  def testAssignAddResourceVariable(self):

    v = resource_variable_ops.ResourceVariable(1.0)

    @function.Defun()
    def AssignAdd():
      v.assign_add(1.0)

    op = functional_ops.partitioned_call(
        args=AssignAdd.captured_inputs, f=AssignAdd)
    _ = self.evaluate(variables.global_variables_initializer())
    _ = self.evaluate(op)
    value = self.evaluate(v.read_value())
    self.assertEqual(value, 2.0)
Exemplo n.º 10
0
  def testAssignAddResourceVariable(self):

    v = resource_variable_ops.ResourceVariable(1.0)

    @function.Defun()
    def AssignAdd():
      v.assign_add(1.0)

    op = functional_ops.partitioned_call(
        args=AssignAdd.captured_inputs, f=AssignAdd)
    _ = self.evaluate(variables.global_variables_initializer())
    _ = self.evaluate(op)
    value = self.evaluate(v.read_value())
    self.assertEqual(value, 2.0)
Exemplo n.º 11
0
  def testBasicMultiDeviceGPU(self):
    if not test_util.is_gpu_available():
      return

    @function.Defun(*[dtypes.float32] * 2)
    def Body(x, y):
      with ops.device("/gpu:0"):
        a = x + x
        b = y + y
      with ops.device("/cpu:0"):
        c = a + b
        return c

    output, = self.evaluate(
        functional_ops.partitioned_call(
            args=[constant_op.constant(1.),
                  constant_op.constant(2.)], f=Body))
    self.assertEqual(output, 6.)
Exemplo n.º 12
0
  def testBasicMultiDeviceGPU(self):
    if not test_util.is_gpu_available():
      return

    @function.Defun(*[dtypes.float32] * 2)
    def Body(x, y):
      with ops.device("/gpu:0"):
        a = x + x
        b = y + y
      with ops.device("/cpu:0"):
        c = a + b
        return c

    output, = self.evaluate(
        functional_ops.partitioned_call(
            args=[constant_op.constant(1.),
                  constant_op.constant(2.)], f=Body))
    self.assertEqual(output, 6.)
Exemplo n.º 13
0
  def testBasicMultiDevice(self):
    config = config_pb2.ConfigProto(device_count={"CPU": 3})

    @function.Defun(*[dtypes.float32] * 2)
    def Body(x, y):
      # if x = 1, y = 2, ...
      with ops.device("/cpu:0"):
        # a:= 1 + 1 = 2
        a = x + x
      with ops.device("/cpu:1"):
        # b:= 2 + 2 = 4
        b = a + y
      with ops.device("/cpu:2"):
        # c:= 2 + 4 = 6
        c = a + b
      # a + b + c = 2 + 4 + 6 = 12
      return a + b + c

    with self.test_session(config=config):
      output, = functional_ops.partitioned_call(
          args=[constant_op.constant(1.),
                constant_op.constant(2.)], f=Body)
      self.assertEqual(output.eval(), 12.)
Exemplo n.º 14
0
  def testBasicMultiDevice(self):
    config = config_pb2.ConfigProto(device_count={"CPU": 3})

    @function.Defun(*[dtypes.float32] * 2)
    def Body(x, y):
      # if x = 1, y = 2, ...
      with ops.device("/cpu:0"):
        # a:= 1 + 1 = 2
        a = x + x
      with ops.device("/cpu:1"):
        # b:= 2 + 2 = 4
        b = a + y
      with ops.device("/cpu:2"):
        # c:= 2 + 4 = 6
        c = a + b
      # a + b + c = 2 + 4 + 6 = 12
      return a + b + c

    with self.test_session(config=config):
      output, = functional_ops.partitioned_call(
          args=[constant_op.constant(1.),
                constant_op.constant(2.)], f=Body)
      self.assertEqual(output.eval(), 12.)
Exemplo n.º 15
0
    def testFunctionWithResourcesOnDifferentDevices(self):
        # TODO(akshayka): Remove the `skipTest` once we can whitelist ops as
        # safe to be invoked with resources on different devices.
        self.skipTest("The Placer disallows ops with resource inputs "
                      "on different devices.")

        with ops.device("/cpu:0"):
            v_cpu_zero = resource_variable_ops.ResourceVariable(
                [0.0, 1.0, 2.0], name="v_cpu_zero")

        with ops.device("/cpu:1"):
            v_cpu_one = resource_variable_ops.ResourceVariable(
                [0.0, 1.0, 2.0], name="v_cpu_one")

        with ops.device("/gpu:0"):
            v_gpu = resource_variable_ops.ResourceVariable([0.0, 1.0, 2.0],
                                                           name="v_gpu")

        def sum_gather():
            cpu_result = math_ops.reduce_sum(
                array_ops.gather(v_cpu_zero, [1, 2]))
            also_cpu_result = math_ops.reduce_sum(
                array_ops.gather(v_cpu_one, [1, 2]))
            gpu_result = math_ops.reduce_sum(array_ops.gather(v_gpu, [1, 2]))
            return cpu_result, also_cpu_result, gpu_result

        defined = function.Defun()(sum_gather)
        with self.test_session(config=config_pb2.ConfigProto(
                allow_soft_placement=False,
                log_device_placement=True,
                device_count={"CPU": 2})) as sess:
            sess.run(variables.global_variables_initializer())
            expected = sess.run(sum_gather())
            result = sess.run(
                functional_ops.partitioned_call(args=defined.captured_inputs,
                                                f=defined))
            self.assertAllEqual(expected, result)