def testSoftPlacement(self): self.assertEqual(config.get_soft_device_placement(), True) @def_function.function def mod(): with ops.device('/device:GPU:0'): a = constant_op.constant(1.0) b = constant_op.constant(1.0) return math_ops.mod(a, b) # Since soft placement is enabled, the mod operation should work with CPU mod() config.set_soft_device_placement(False) self.assertEqual(config.get_soft_device_placement(), False) self.assertEqual(config.get_soft_device_placement(), context.context().soft_device_placement) # Since soft placement is disabled, the mod operation should fail on GPU with self.assertRaises(errors.InvalidArgumentError): mod() config.set_soft_device_placement(True) self.assertEqual(config.get_soft_device_placement(), True) self.assertEqual(config.get_soft_device_placement(), context.context().soft_device_placement) # Since soft placement is re-enabled, the mod operation should work with CPU mod()
def setUp(self): super(HardDevicePlacementTest, self).setUp() context._reset_context() config.set_soft_device_placement(enabled=False) context.context().log_device_placement = True self.assertEqual(config.get_soft_device_placement(), False) self.assertEqual(context.context().soft_device_placement, False)
def test_spmd_with_summary(self): original_device_placement = config.get_soft_device_placement() config.set_soft_device_placement(True) strategy, _ = get_tpu_strategy(enable_spmd=True) summary_dir = self.get_temp_dir() writer = summary_ops.create_file_writer_v2(summary_dir) with strategy.scope(): step = variables.Variable(0, dtype=dtypes.int64) @def_function.function def run(): with writer.as_default(): summary_ops.scalar("result", step * 2, step=step) step.assign_add(1) for _ in range(10): strategy.run(run, args=()) for val in step.values: for var in val.variables: self.assertAllEqual(10, var) config.set_soft_device_placement(original_device_placement)
def setUp(self): super(HardDevicePlacementTest, self).setUp() context._context = None ops.enable_eager_execution_internal() config.set_soft_device_placement(enabled=False) context.context().log_device_placement = True self.assertEqual(config.get_soft_device_placement(), False) self.assertEqual(context.context().soft_device_placement, False)
def testEnableSoftPlacement(self): self.assertEqual(config.get_soft_device_placement(), False) config.set_soft_device_placement(True) self.assertEqual(config.get_soft_device_placement(), True) self.assertEqual( config.get_soft_device_placement(), context.context().soft_device_placement) config.set_soft_device_placement(False) self.assertEqual(config.get_soft_device_placement(), False) self.assertEqual( config.get_soft_device_placement(), context.context().soft_device_placement) constant_op.constant(1) with self.assertRaises(RuntimeError): config.set_soft_device_placement(True) with self.assertRaises(RuntimeError): config.set_soft_device_placement(False)
def testSoftPlacement(self): if context.executing_eagerly(): self.assertTrue(config.get_soft_device_placement()) else: self.assertFalse(config.get_soft_device_placement()) def mod(): with ops.device('/device:GPU:0'): a = constant_op.constant(1.0) b = constant_op.constant(1.0) return math_ops.mod(a, b) config.set_soft_device_placement(True) self.assertEqual(config.get_soft_device_placement(), True) self.assertEqual(config.get_soft_device_placement(), context.context().soft_device_placement) # Since soft placement is enabled, the mod operation should fallback to CPU # with pure eager execution as well as functions mod() def_function.function(mod)() config.set_soft_device_placement(False) self.assertEqual(config.get_soft_device_placement(), False) self.assertEqual(config.get_soft_device_placement(), context.context().soft_device_placement) # Since soft placement is disabled, the mod operation should fail on GPU # with pure eager execution as well as functions with self.assertRaises(errors.InvalidArgumentError): mod() with self.assertRaises(errors.InvalidArgumentError): def_function.function(mod)()
def testSoftPlacement(self): if context.executing_eagerly(): self.assertTrue(config.get_soft_device_placement()) else: self.assertFalse(config.get_soft_device_placement()) def test_attr(): with ops.device('/device:GPU:0'): return test_ops.test_attr(T=dtypes.float32, name='test_attr') config.set_soft_device_placement(True) self.assertEqual(config.get_soft_device_placement(), True) self.assertEqual(config.get_soft_device_placement(), context.context().soft_device_placement) # Since soft placement is enabled, the test_attr operation should fallback # to CPU with pure eager execution as well as functions test_attr() def_function.function(test_attr)() config.set_soft_device_placement(False) self.assertEqual(config.get_soft_device_placement(), False) self.assertEqual(config.get_soft_device_placement(), context.context().soft_device_placement) # Since soft placement is disabled, the test_attr operation should fail on # GPU with pure eager execution as well as functions with self.assertRaises(errors.InvalidArgumentError): test_attr() with self.assertRaises(errors.InvalidArgumentError): def_function.function(test_attr)()
def testSoftPlacement(self): if context.executing_eagerly(): self.assertTrue(config.get_soft_device_placement()) else: self.assertFalse(config.get_soft_device_placement()) @def_function.function def mod(): with ops.device('/device:GPU:0'): a = constant_op.constant(1.0) b = constant_op.constant(1.0) return math_ops.mod(a, b) config.set_soft_device_placement(True) self.assertEqual(config.get_soft_device_placement(), True) self.assertEqual( config.get_soft_device_placement(), context.context().soft_device_placement) # Since soft placement is enabled, the mod operation should work with CPU mod() config.set_soft_device_placement(False) self.assertEqual(config.get_soft_device_placement(), False) self.assertEqual( config.get_soft_device_placement(), context.context().soft_device_placement) # Since soft placement is disabled, the mod operation should fail on GPU with self.assertRaises(errors.InvalidArgumentError): mod()
def setUp(self): super(HardDevicePlacementTest, self).setUp() context._reset_context() config.set_soft_device_placement(enabled=False) context.context().log_device_placement = True cpus = context.context().list_physical_devices('CPU') # Set 2 virtual CPUs context.context().set_logical_device_configuration(cpus[0], [ context.LogicalDeviceConfiguration(), context.LogicalDeviceConfiguration() ]) self.assertEqual(config.get_soft_device_placement(), False) self.assertEqual(context.context().soft_device_placement, False)
def _recreate_variable(self, proto): name = proto.name if proto.name else None if name is not None: dbg_name = name else: dbg_name = "<variable loaded from saved model>" synchronization, aggregation, trainable = ( variables.validate_synchronization_aggregation_trainable( proto.synchronization, proto.aggregation, proto.trainable, name=dbg_name)) def uninitialized_variable_creator(next_creator, **kwargs): """A variable creator that creates uninitialized variables.""" del next_creator return resource_variable_ops.UninitializedVariable(**kwargs) # Create a variable_creator_scope that creates uninitialized variables with # a lower priority such that a potential distributed variable_creator_scope # can take precedence. with ops.get_default_graph()._variable_creator_scope( # pylint: disable=protected-access uninitialized_variable_creator, priority=50): saved_device = proto.device load_with_device = (self._save_options.experimental_variable_policy ._save_variable_devices() and config.get_soft_device_placement() and saved_device) if load_with_device: with ops.device(saved_device): return variables.Variable(shape=proto.shape, dtype=proto.dtype, name=name, trainable=trainable, synchronization=synchronization, aggregation=aggregation), setattr else: return variables.Variable(shape=proto.shape, dtype=proto.dtype, name=name, trainable=trainable, synchronization=synchronization, aggregation=aggregation), setattr