def test_graph_mode_isolation(self): with context.graph_mode(): # Even if we've (accidentally) called IsolateTest in Graph mode, it should # provide Eager isolation. with test_util.IsolateTest(): with context.eager_mode(): first_container_variable = resource_variable_ops.ResourceVariable( name="first_container_variable", initial_value=1) with context.eager_mode(): with self.assertRaises(ValueError): first_container_variable.read_value()
def compute_output_shape(self, input_shape): if self._output_shape is None: # Make use of existing autocomputation but provide Lambda-specific # error message. This is always safe to run even when the outer context # is Graph mode because Lambda layers don't have side effects such as # `add_loss`. with context.eager_mode(): try: return super(Lambda, self).compute_output_shape(input_shape) except NotImplementedError: raise NotImplementedError( 'We could not automatically infer the shape of the Lambda\'s ' 'output. Please specify `output_shape` for this Lambda.') if callable(self._output_shape): output_shapes = self._output_shape(input_shape) return tf_utils.convert_shapes(output_shapes, to_tuples=False) # Output shapes are passed directly and don't include batch dimension. input_tensor_shape = tf_utils.convert_shapes(input_shape, to_tuples=False) batch_size = nest.flatten(input_tensor_shape)[0][0] if input_shape else None def _add_batch(shape): return tensor_shape.TensorShape([batch_size] + shape.as_list()) output_shapes = tf_utils.convert_shapes(self._output_shape, to_tuples=False) return nest.map_structure(_add_batch, output_shapes)
def decorated(self, **kwargs): """A wrapped test method that treats some arguments in a special way.""" mode = kwargs.pop("mode", "graph") distribution = kwargs.get("distribution", None) required_tpu = kwargs.pop("required_tpu", False) required_gpus = kwargs.pop("required_gpus", None) if distribution: assert required_gpus is None, ( "Do not use `required_gpus` and `distribution` together.") assert required_tpu is False, ( "Do not use `required_tpu` and `distribution` together.") required_gpus = distribution.required_gpus required_tpu = distribution.required_tpu if required_tpu and not TPU_TEST: self.skipTest("Test requires a TPU, but it's not available.") if not required_tpu and TPU_TEST: self.skipTest("Test that doesn't require a TPU.") if not required_gpus: if GPU_TEST: self.skipTest("Test that doesn't require GPUs.") elif context.num_gpus() < required_gpus: self.skipTest( "{} GPUs are not available for this test. {} GPUs are available". format(required_gpus, context.num_gpus())) # At this point, `kwargs` doesn't have `required_gpus` or `required_tpu` # that the user might have specified. `kwargs` still has `mode`, which # the test is allowed to accept or ignore. requested_arguments = tf_inspect.getfullargspec(test_method).args missing_arguments = set(list(kwargs.keys()) + ["self"]).difference( set(requested_arguments + ["mode"])) if missing_arguments: raise ValueError("The test is missing arguments {} .".format( missing_arguments)) kwargs_to_pass = {} for arg in requested_arguments: if arg == "self": kwargs_to_pass[arg] = self else: kwargs_to_pass[arg] = kwargs[arg] if mode == "eager": with ops.Graph().as_default(), context.eager_mode(): if distribution: kwargs_to_pass["distribution"] = distribution.strategy test_method(**kwargs_to_pass) elif mode == "graph": with ops.Graph().as_default(), context.graph_mode(): if distribution: kwargs_to_pass["distribution"] = distribution.strategy test_method(**kwargs_to_pass) else: raise ValueError( "'mode' has to be either 'eager' or 'graph' and not {}".format( mode))
def testDatasetEagerIteration(self, execution_mode): with context.eager_mode(), context.execution_mode(execution_mode): val = 0 dataset = dataset_ops.Dataset.range(10) for foo in dataset: self.assertEqual(val, foo.numpy()) val += 1
def test_unique_name_raise_error_in_eager(self): with context.eager_mode(): with self.assertRaisesRegexp( ValueError, "unique_name_ cannot be used when eager exeuction is enabled."): template.make_template( "_", variable_scoped_function, unique_name_="s1")
def testAnonymousVarsInInit(self): class Model(training.Model): def __init__(self): super(Model, self).__init__() self.w = resource_variable_ops.ResourceVariable(0.0) self.b = resource_variable_ops.ResourceVariable(0.0) self.vars = [self.w, self.b] def call(self, x): return x * self.w + self.b with context.eager_mode(): model = Model() optimizer = adam.AdamOptimizer(learning_rate=0.05) checkpoint_directory = self.get_temp_dir() checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt") checkpoint = util.Checkpoint( model=model, optimizer=optimizer) for _ in range(2): checkpoint.save(checkpoint_prefix) with backprop.GradientTape() as tape: loss = (constant_op.constant(1.) - model(constant_op.constant(1.))) ** 2 grad = tape.gradient(loss, model.vars) optimizer.apply_gradients( [(g, v) for g, v in zip(grad, model.vars)])
def _enter_graph(g): if context.executing_eagerly(): with g.as_default(), context.eager_mode(): yield else: with g.as_default(): yield
def test_callable_evaluate(self): def model(): return resource_variable_ops.ResourceVariable( name="same_name", initial_value=1) + 1 with context.eager_mode(): self.assertEqual(2, self.evaluate(model))
def test_alpha_share_layer(self): """Test invoking AlphaShareLayer in eager mode.""" with context.eager_mode(): with tfe.IsolateTest(): batch_size = 10 length = 6 input1 = np.random.rand(batch_size, length).astype(np.float32) input2 = np.random.rand(batch_size, length).astype(np.float32) layer = layers.AlphaShareLayer() result = layer(input1, input2) assert input1.shape == result[0].shape assert input2.shape == result[1].shape # Creating a second layer should produce different results, since it has # different random weights. layer2 = layers.AlphaShareLayer() result2 = layer2(input1, input2) assert not np.allclose(result[0], result2[0]) assert not np.allclose(result[1], result2[1]) # But evaluating the first layer again should produce the same result as before. result3 = layer(input1, input2) assert np.allclose(result[0], result3[0]) assert np.allclose(result[1], result3[1])
def test_flatten(self): """Test invoking Flatten in eager mode.""" with context.eager_mode(): with tfe.IsolateTest(): input = np.random.rand(5, 10, 4).astype(np.float32) result = layers.Flatten()(input) assert result.shape == (5, 40)
def test_max_pool_3d(self): """Test invoking MaxPool3D in eager mode.""" with context.eager_mode(): with tfe.IsolateTest(): input = np.random.rand(2, 4, 6, 8, 2).astype(np.float32) result = layers.MaxPool3D()(input) assert result.shape == (2, 2, 3, 4, 2)
def test_vina_free_energy(self): """Test invoking VinaFreeEnergy in eager mode.""" with context.eager_mode(): with tfe.IsolateTest(): n_atoms = 5 m_nbrs = 1 ndim = 3 nbr_cutoff = 1 start = 0 stop = 4 X = np.random.rand(n_atoms, ndim).astype(np.float32) Z = np.random.randint(0, 2, (n_atoms)).astype(np.float32) layer = layers.VinaFreeEnergy(n_atoms, m_nbrs, ndim, nbr_cutoff, start, stop) result = layer(X, Z) assert len(layer.variables) == 6 assert result.shape == tuple() # Creating a second layer should produce different results, since it has # different random weights. layer2 = layers.VinaFreeEnergy(n_atoms, m_nbrs, ndim, nbr_cutoff, start, stop) result2 = layer2(X, Z) assert not np.allclose(result, result2) # But evaluating the first layer again should produce the same result as before. result3 = layer(X, Z) assert np.allclose(result, result3)
def test_conv_3d_transpose(self): """Test invoking Conv3DTranspose in eager mode.""" with context.eager_mode(): with tfe.IsolateTest(): length = 4 width = 5 depth = 6 in_channels = 2 filters = 3 kernel_size = 2 stride = 2 batch_size = 10 input = np.random.rand(batch_size, length, width, depth, in_channels).astype(np.float32) layer = layers.Conv3DTranspose( filters, kernel_size=kernel_size, stride=stride) result = layer(input) assert result.shape == (batch_size, length * stride, width * stride, depth * stride, filters) assert len(layer.variables) == 2 # Creating a second layer should produce different results, since it has # different random weights. layer2 = layers.Conv3DTranspose( filters, kernel_size=kernel_size, stride=stride) result2 = layer2(input) assert not np.allclose(result, result2) # But evaluating the first layer again should produce the same result as before. result3 = layer(input) assert np.allclose(result, result3)
def test_max_pool_1d(self): """Test invoking MaxPool1D in eager mode.""" with context.eager_mode(): with tfe.IsolateTest(): input = np.random.rand(4, 6, 8).astype(np.float32) result = layers.MaxPool1D(strides=2)(input) assert result.shape == (4, 3, 8)
def test_time_series_dense(self): """Test invoking TimeSeriesDense in eager mode.""" with context.eager_mode(): with tfe.IsolateTest(): in_dim = 2 out_dim = 3 n_steps = 6 batch_size = 10 input = np.random.rand(batch_size, n_steps, in_dim).astype(np.float32) layer = layers.TimeSeriesDense(out_dim) result = layer(input) assert result.shape == (batch_size, n_steps, out_dim) assert len(layer.variables) == 2 # Creating a second layer should produce different results, since it has # different random weights. layer2 = layers.TimeSeriesDense(out_dim) result2 = layer2(input) assert not np.allclose(result, result2) # But evaluating the first layer again should produce the same result as before. result3 = layer(input) assert np.allclose(result, result3)
def test_constant(self): """Test invoking Constant in eager mode.""" with context.eager_mode(): with tfe.IsolateTest(): value = np.random.rand(5, 4).astype(np.float32) result = layers.Constant(value)() assert np.array_equal(result, value)
def test_conv_1d(self): """Test invoking Conv1D in eager mode.""" with context.eager_mode(): with tfe.IsolateTest(): width = 5 in_channels = 2 filters = 3 kernel_size = 2 batch_size = 10 input = np.random.rand(batch_size, width, in_channels).astype( np.float32) layer = layers.Conv1D(filters, kernel_size) result = layer(input) self.assertEqual(result.shape[0], batch_size) self.assertEqual(result.shape[2], filters) assert len(layer.variables) == 2 # Creating a second layer should produce different results, since it has # different random weights. layer2 = layers.Conv1D(filters, kernel_size) result2 = layer2(input) assert not np.allclose(result, result2) # But evaluating the first layer again should produce the same result as before. result3 = layer(input) assert np.allclose(result, result3)
def test_squeeze(self): """Test invoking Squeeze in eager mode.""" with context.eager_mode(): with tfe.IsolateTest(): input = np.random.rand(5, 1, 4).astype(np.float32) result = layers.Squeeze()(input) assert result.shape == (5, 4)
def testEagerIteratorAsync(self): with context.eager_mode(), context.execution_mode(context.ASYNC): val = 0 dataset = dataset_ops.Dataset.range(10) for foo in dataset: self.assertEqual(val, foo.numpy()) val += 1
def test_cast(self): """Test invoking Cast in eager mode.""" with context.eager_mode(): with tfe.IsolateTest(): input = np.random.rand(5, 3) result = layers.Cast(dtype=tf.float32)(input) assert result.dtype == tf.float32
def _process_asset(trackable_asset, asset_info, resource_map): """Add `trackable_asset` to `asset_info` and `resource_map`.""" original_variable = trackable_asset.asset_path with context.eager_mode(): original_path = original_variable.numpy() path = builder_impl.get_asset_filename_to_add( asset_filepath=original_path, asset_filename_map=asset_info.asset_filename_map) # TODO(andresp): Instead of mapping 1-1 between trackable asset # and asset in the graph def consider deduping the assets that # point to the same file. asset_path_initializer = array_ops.placeholder( shape=original_variable.shape, dtype=dtypes.string, name="asset_path_initializer") asset_variable = resource_variable_ops.ResourceVariable( asset_path_initializer) asset_info.asset_filename_map[path] = original_path asset_def = meta_graph_pb2.AssetFileDef() asset_def.filename = path asset_def.tensor_info.name = asset_path_initializer.name asset_info.asset_defs.append(asset_def) asset_info.asset_initializers_by_resource[original_variable.handle] = ( asset_variable.initializer) asset_info.asset_index[trackable_asset] = len(asset_info.asset_defs) - 1 resource_map[original_variable.handle] = asset_variable.handle
def test_sparse_categorical_accuracy_float_eager(self): """Tests that floats passed in via Eager return results. See b/113504761.""" with context.eager_mode(): metric = metrics.sparse_categorical_accuracy y_true = np.arange(6, dtype=np.float32).reshape([6, 1]) y_pred = np.arange(36).reshape([6, 6]) self.assertAllEqual(metric(y_true, y_pred), [0., 0., 0., 0., 0., 1.])
def testAssignDifferentShapesEager(self): with context.eager_mode(): with variable_scope.variable_scope("foo"): var = variable_scope.get_variable("x", shape=[1, 1], dtype=dtypes.float32) assign = var.assign(np.zeros(shape=[2, 2])) self.evaluate(assign)
def test_lstm(self): """Test invoking LSTM in eager mode.""" with context.eager_mode(): with tfe.IsolateTest(): batch_size = 10 n_hidden = 7 in_channels = 4 n_steps = 6 input = np.random.rand(batch_size, n_steps, in_channels).astype( np.float32) layer = layers.LSTM(n_hidden, batch_size) result, state = layer(input) assert result.shape == (batch_size, n_steps, n_hidden) assert len(layer.variables) == 2 # Creating a second layer should produce different results, since it has # different random weights. layer2 = layers.LSTM(n_hidden, batch_size) result2, state2 = layer2(input) assert not np.allclose(result, result2) # But evaluating the first layer again should produce the same result as before. result3, state3 = layer(input) assert np.allclose(result, result3) # But if we specify a different starting state, that should produce a # different result. result4, state4 = layer(input, initial_state=state3) assert not np.allclose(result, result4)
def decorated(self, **kwargs): """Decorated the test method.""" with context.graph_mode(): with self.test_session(graph, config, use_gpu, force_gpu): f(self, **kwargs) if reset_test: # This decorator runs the wrapped test twice. # Reset the test environment between runs. self.tearDown() self.setUp() def run_eager_mode(self, **kwargs): if force_gpu: gpu_name = gpu_device_name() if not gpu_name: gpu_name = "/device:GPU:0" with context.device(gpu_name): f(self) elif use_gpu: # TODO(xpan): Support softplacement and gpu by default when available. f(self, **kwargs) else: with context.device("/device:CPU:0"): f(self, **kwargs) if assert_no_eager_garbage: run_eager_mode = assert_no_new_tensors( assert_no_garbage_created(run_eager_mode)) with context.eager_mode(): with IsolateTest(): run_eager_mode(self, **kwargs)
def testSlotsUniqueEager(self): with context.eager_mode(): v1 = variables.Variable(1.) v2 = variables.Variable(1.) opt = rmsprop.RMSprop(1., momentum=0., centered=False) opt.minimize(lambda: v1 + v2, var_list=[v1, v2]) # There should be iteration, and one unique slot variable for v1 and v2. self.assertEqual(3, len(set(opt.variables()))) self.assertEqual( self.evaluate(opt.variables()[0]), self.evaluate(opt.iterations)) opt = rmsprop.RMSprop(learning_rate=1., momentum=0.2, centered=False) opt.minimize(lambda: v1 + v2, var_list=[v1, v2]) # There should be iteration, and two unique slot variables for v1 and v2. self.assertEqual(5, len(set(opt.variables()))) self.assertEqual( self.evaluate(opt.variables()[0]), self.evaluate(opt.iterations)) opt = rmsprop.RMSprop(learning_rate=1., momentum=0.2, centered=True) opt.minimize(lambda: v1 + v2, var_list=[v1, v2]) # There should be iteration, and three unique slot variables for v1 and v2 self.assertEqual(7, len(set(opt.variables()))) self.assertEqual( self.evaluate(opt.variables()[0]), self.evaluate(opt.iterations))
def testIteratorResourceCleanup(self): filename = os.path.join(self.get_temp_dir(), "text.txt") with open(filename, "wt") as f: for i in range(3): f.write("%d\n" % (i,)) with context.eager_mode(): first_iterator = iter(readers.TextLineDataset(filename)) self.assertEqual(b"0", next(first_iterator).numpy()) second_iterator = iter(readers.TextLineDataset(filename)) self.assertEqual(b"0", next(second_iterator).numpy()) # Eager kernel caching is based on op attributes, which includes the # Dataset's output shape. Create a different kernel to test that they # don't create resources with the same names. different_kernel_iterator = iter( readers.TextLineDataset(filename).repeat().batch(16)) self.assertEqual([16], next(different_kernel_iterator).shape) # Remove our references to the Python Iterator objects, which (assuming no # reference cycles) is enough to trigger DestroyResourceOp and close the # partially-read files. del first_iterator del second_iterator del different_kernel_iterator if not psutil_import_succeeded: self.skipTest( "psutil is required to check that we've closed our files.") open_files = psutil.Process().open_files() self.assertNotIn(filename, [open_file.path for open_file in open_files])
def decorated(self): """Decorated the test method.""" with context.graph_mode(): with self.test_session(graph, config, use_gpu, force_gpu): f(self) def run_eager_mode(): if force_gpu: gpu_name = gpu_device_name() if not gpu_name: gpu_name = "/device:GPU:0" with context.device(gpu_name): f(self) elif use_gpu: # TODO(xpan): Support softplacement and gpu by default when available. f(self) else: with context.device("/device:CPU:0"): f(self) with context.eager_mode(): if graph is None: run_eager_mode() else: with graph.as_default(): run_eager_mode()
def test_transpose(self): """Test invoking Transpose in eager mode.""" with context.eager_mode(): with tfe.IsolateTest(): input = np.random.rand(5, 10, 4).astype(np.float32) result = layers.Transpose((1, 2, 0))(input) assert result.shape == (10, 4, 5)
def from_saved_model(cls, saved_model_dir, signature_keys=None, tags=None): """Creates a TFLiteConverter object from a SavedModel directory. Args: saved_model_dir: SavedModel directory to convert. signature_keys: List of keys identifying SignatureDef containing inputs and outputs. Elements should not be duplicated. By default the `signatures` attribute of the MetaGraphdef is used. (default saved_model.signatures) tags: Set of tags identifying the MetaGraphDef within the SavedModel to analyze. All tags in the tag set must be present. (default set(SERVING)) Returns: TFLiteConverter object. Raises: Invalid signature keys. """ # Ensures any graphs created in Eager mode are able to run. This is required # in order to create a tf.estimator.Exporter that exports a TFLite model. with context.eager_mode(): saved_model = _load(saved_model_dir, tags) if not signature_keys: signature_keys = saved_model.signatures funcs = [] for key in signature_keys: if key not in saved_model.signatures: raise ValueError("Invalid signature key '{}' found. Valid keys are " "'{}'.".format(key, ",".join(saved_model.signatures))) funcs.append(saved_model.signatures[key]) return cls(funcs, saved_model)
def testEagerBool(self): with context.eager_mode(): v = resource_variable_ops.ResourceVariable(False, name="bool_test") self.assertAllEqual(bool(v), False)
def testBasicCallableParams(self): with context.eager_mode(): self.doTestBasic(use_resource=True, use_callable_params=True)
def testEagerInitializedValue(self): with context.eager_mode(): variable = resource_variable_ops.ResourceVariable(1.0, name="eager-init") self.assertAllEqual(variable.numpy(), 1.0) self.assertAllEqual(variable.initialized_value().numpy(), 1.0)
def get_stats(model_name): pipeline_config = 'saved_models/inference_models/' + model_name + '/pipeline.config' model_dir = 'saved_models/inference_models/' + model_name + '/checkpoint/' # Load pipeline config and build a detection model configs = config_util.get_configs_from_pipeline_file(pipeline_config) model_config = configs['model'] detection_model = model_builder.build(model_config=model_config, is_training=False) # Restore checkpoint ckpt = tf.compat.v2.train.Checkpoint(model=detection_model) ckpt.restore(os.path.join(model_dir, 'ckpt-0')).expect_partial() detect_fn = get_model_detection_function(detection_model) image_np = load_image_into_numpy_array("image.png") input_tensor = tf.convert_to_tensor(np.expand_dims(image_np, 0), dtype=tf.float32) # Number of parameters variables = tf.train.list_variables(model_dir) total_parameters = 0 for variable in variables: # shape is an array of tf.Dimension shape = variable[1] variable_parameters = 1 for dim in shape: variable_parameters *= dim total_parameters += variable_parameters # Memory usage with context.eager_mode(): context.enable_run_metadata() detections, predictions_dict, shapes = detect_fn(input_tensor) opts = tf.compat.v1.profiler.ProfileOptionBuilder.time_and_memory() profiler = tf.compat.v1.profiler.Profiler() metadata = context.export_run_metadata() profiler.add_step(0, metadata) context.disable_run_metadata() tm = profiler.profile_graph(opts) memory = tm.total_requested_bytes # Number of flops full_model = detect_fn.get_concrete_function( image=tf.TensorSpec(input_tensor.shape, input_tensor.dtype)) frozen_func = convert_variables_to_constants_v2(full_model) # frozen_func.graph.as_graph_def() # layers = [op.name for op in frozen_func.graph.get_operations()] stats = tf.compat.v1.profiler.profile( graph=frozen_func.graph, run_meta=metadata, cmd='op', options=tf.compat.v1.profiler.ProfileOptionBuilder.float_operation()) flops = stats.total_float_ops stats = { 'model_name': model_name, 'parameters': total_parameters, 'flops': flops, 'memory': memory } return stats
def test_permits_extra_non_trainable_variables_eager(self): with context.eager_mode(): tmpl = template.make_template("s", function_with_side_create, trainable=False) self.assertEqual(tmpl(name="1"), tmpl(name="2"))
def _v2_function_and_kerastensors_test(f, test_or_class, *args, **kwargs): with context.eager_mode(): with testing_utils.run_eagerly_scope(False): with testing_utils.use_keras_tensors_scope(True): f(test_or_class, *args, **kwargs)
def framework_iterator(config=None, frameworks=("tf", "eager", "torch"), session=False): """An generator that allows for looping through n frameworks for testing. Provides the correct config entries ("use_pytorch" and "eager") as well as the correct eager/non-eager contexts for tf. Args: config (Optional[dict]): An optional config dict to alter in place depending on the iteration. frameworks (Tuple[str]): A list/tuple of the frameworks to be tested. Allowed are: "tf", "eager", and "torch". session (bool): If True, enter a tf.Session() and yield that as well in the tf-case (otherwise, yield (fw, None)). Yields: str: If enter_session is False: The current framework ("tf", "eager", "torch") used. Tuple(str, Union[None,tf.Session]: If enter_session is True: A tuple of the current fw and the tf.Session if fw="tf". """ config = config or {} frameworks = [frameworks] if isinstance(frameworks, str) else frameworks for fw in frameworks: # Skip non-installed frameworks. if fw == "torch" and not torch: logger.warning( "framework_iterator skipping torch (not installed)!") continue if fw != "torch" and not tf: logger.warning("framework_iterator skipping {} (tf not " "installed)!".format(fw)) continue elif fw == "eager" and not eager_mode: logger.warning("framework_iterator skipping eager (could not " "import `eager_mode` from tensorflow.python)!") continue assert fw in ["tf", "eager", "torch", None] # Do we need a test session? sess = None if fw == "tf" and session is True: sess = tf.Session() sess.__enter__() print("framework={}".format(fw)) config["eager"] = fw == "eager" config["use_pytorch"] = fw == "torch" eager_ctx = None if fw == "eager": eager_ctx = eager_mode() eager_ctx.__enter__() assert tf.executing_eagerly() elif fw == "tf": assert not tf.executing_eagerly() yield fw if session is False else (fw, sess) # Exit any context we may have entered. if eager_ctx: eager_ctx.__exit__(None, None, None) elif sess: sess.__exit__(None, None, None)
def testMapDtypeEager(self): with context.eager_mode(): dtype = map_fn.map_fn(lambda x: constant_op.constant(""), constant_op.constant([]), dtype=dtypes.string).dtype self.assertEqual(dtype, dtypes.string)
def _v2_function_test(f, test_or_class, *args, **kwargs): with context.eager_mode(): with testing_utils.run_eagerly_scope(False): f(test_or_class, *args, **kwargs)
def testScatterSubStateOps(self): with context.eager_mode(): v = resource_variable_ops.ResourceVariable([1.0, 2.0], name="sub") state_ops.scatter_sub(v, [1], [3]) self.assertAllEqual([1.0, -1.0], v.numpy())
def testScatterUpdateCast(self): with context.eager_mode(): v = resource_variable_ops.ResourceVariable([1.0, 2.0], name="update") state_ops.scatter_update(v, [1], [3]) self.assertAllEqual([1.0, 3.0], v.numpy())
def testEagerNameNotIdentity(self): with context.eager_mode(): v0 = resource_variable_ops.ResourceVariable(1.0, name="a") v1 = resource_variable_ops.ResourceVariable(2.0, name="a") self.assertAllEqual(v0.numpy(), 1.0) self.assertAllEqual(v1.numpy(), 2.0)
def testEagerNameNotNeeded(self): with context.eager_mode(): v0 = resource_variable_ops.ResourceVariable(1.0) self.assertAllEqual(v0.numpy(), 1.0)
def testCountUpToFunction(self): with context.eager_mode(): v = resource_variable_ops.ResourceVariable(0, name="upto") self.assertAllEqual(state_ops.count_up_to(v, 1), 0) with self.assertRaises(errors.OutOfRangeError): state_ops.count_up_to(v, 1)
def testGPUInt64(self): if not context.context().num_gpus(): return with context.eager_mode(), context.device("gpu:0"): v = resource_variable_ops.ResourceVariable(1, dtype=dtypes.int64) self.assertAllEqual(1, v.numpy())
def testEagerNoUseResource(self): with context.eager_mode(): v = variables.Variable(1.0) self.assertTrue(isinstance(v, resource_variable_ops.ResourceVariable))
def testHandleNumpy(self): with context.eager_mode(): with self.assertRaises(ValueError): resource_variable_ops.ResourceVariable( 1.0, name="handle-numpy").handle.numpy()
def decorated(self, **kwargs): """A wrapped test method that treats some arguments in a special way.""" mode = kwargs.pop("mode", "graph") distribution = kwargs.get("distribution", None) required_tpu = kwargs.pop("required_tpu", False) required_gpus = kwargs.pop("required_gpus", None) if distribution: assert required_gpus is None, ( "Do not use `required_gpus` and `distribution` together.") assert required_tpu is False, ( "Do not use `required_tpu` and `distribution` together.") required_gpus = distribution.required_gpus required_tpu = distribution.required_tpu if required_tpu and not TPU_TEST: self.skipTest("Test requires a TPU, but it's not available.") if not required_tpu and TPU_TEST: self.skipTest("Test that doesn't require a TPU.") if not required_gpus: if GPU_TEST: self.skipTest("Test that doesn't require GPUs.") elif context.num_gpus() < required_gpus: # TODO(priyag): Consider allowing tests in graph mode using soft # placement. self.skipTest( "{} GPUs are not available for this test. {} GPUs are available" .format(required_gpus, context.num_gpus())) # At this point, `kwargs` doesn't have `required_gpus` or `required_tpu` # that the user might have specified. `kwargs` still has `mode`, which # the test is allowed to accept or ignore. requested_arguments = tf_inspect.getfullargspec(test_method).args missing_arguments = set(list(kwargs.keys()) + ["self"]).difference( set(requested_arguments + ["mode"])) if missing_arguments: raise ValueError( "The test is missing arguments {} .".format(missing_arguments)) kwargs_to_pass = {} for arg in requested_arguments: if arg == "self": kwargs_to_pass[arg] = self else: kwargs_to_pass[arg] = kwargs[arg] if mode == "eager": with context.eager_mode(): if distribution: kwargs_to_pass["distribution"] = distribution.strategy test_method(**kwargs_to_pass) elif mode == "graph": with ops.Graph().as_default(), context.graph_mode(): if distribution: kwargs_to_pass["distribution"] = distribution.strategy test_method(**kwargs_to_pass) else: raise ValueError( "'mode' has to be either 'eager' or 'graph' and not {}".format( mode))
def testUnprintableHandle(self): with context.eager_mode(): handle = resource_variable_ops.var_handle_op( dtype=dtypes.int32, shape=[1], name="foo") self.assertIn("<unprintable>", str(handle)) self.assertIn("<unprintable>", repr(handle))