Exemple #1
0
 def testOptionsProtoRoundTrip(self):
     options = options_lib.Options()
     options.autotune.enabled = True
     options.autotune.cpu_budget = 10
     options.autotune.ram_budget = 20
     options.deterministic = True
     options.experimental_external_state_policy = (
         options_lib.ExternalStatePolicy.FAIL)
     options.experimental_distribute.auto_shard_policy = (
         options_lib.AutoShardPolicy.DATA)
     options.experimental_distribute.num_devices = 1000
     options.experimental_optimization.apply_default_optimizations = True
     options.experimental_optimization.filter_fusion = True
     options.experimental_optimization.map_and_batch_fusion = True
     options.experimental_optimization.map_and_filter_fusion = True
     options.experimental_optimization.map_fusion = True
     options.experimental_optimization.map_parallelization = True
     options.experimental_optimization.noop_elimination = True
     options.experimental_optimization.parallel_batch = True
     options.experimental_optimization.shuffle_and_repeat_fusion = True
     options.experimental_slack = True
     options.threading.max_intra_op_parallelism = 30
     options.threading.private_threadpool_size = 40
     pb = options._to_proto()
     result = options_lib.Options()
     result._from_proto(pb)
     self.assertEqual(options, result)
Exemple #2
0
 def testOptionsHaveDefaults(self):
     options1 = options_lib.Options()
     options2 = options_lib.Options()
     self.assertIsNot(options1.experimental_optimization,
                      options2.experimental_optimization)
     self.assertIsNot(options1.threading, options2.threading)
     self.assertEqual(options1.experimental_optimization,
                      options_lib.OptimizationOptions())
     self.assertEqual(options1.threading, options_lib.ThreadingOptions())
Exemple #3
0
 def testExperimentalThreadingOptionsOverride(self):
   options = options_lib.Options()
   self.assertEqual(options.threading, options.experimental_threading)
   options.threading.max_intra_op_parallelism = 20
   options.experimental_threading.max_intra_op_parallelism = 40
   pb = options._to_proto()
   result = options_lib.Options()
   result._from_proto(pb)
   self.assertEqual(result.experimental_threading.max_intra_op_parallelism,
                    result.threading.max_intra_op_parallelism)
Exemple #4
0
 def testOptionsMergeOptionsFromMultipleInputs(self):
     options1 = options_lib.Options()
     options1.autotune.enabled = True
     options2 = options_lib.Options()
     options2.deterministic = True
     ds1 = dataset_ops.Dataset.range(0).with_options(options1)
     ds2 = dataset_ops.Dataset.range(0).with_options(options2)
     ds = dataset_ops.Dataset.zip((ds1, ds2))
     options = self._get_options(ds)
     self.assertTrue(options.autotune.enabled)
     self.assertTrue(options.deterministic)
Exemple #5
0
 def testExperimentalDeterministicOverride(self):
   options = options_lib.Options()
   self.assertEqual(options.deterministic, options.experimental_deterministic)
   options.experimental_deterministic = False
   pb = options._to_proto()
   result = options_lib.Options()
   result._from_proto(pb)
   self.assertFalse(result.deterministic)
   self.assertEqual(result.deterministic, result.experimental_deterministic)
   result.experimental_deterministic = True
   self.assertTrue(result.deterministic)
   self.assertEqual(result.deterministic, result.experimental_deterministic)
Exemple #6
0
 def testMutatingOptionsRaiseValueError(self):
     ds = dataset_ops.Dataset.range(0)
     options1 = options_lib.Options()
     options1.experimental_slack = True
     options2 = options_lib.Options()
     options2.autotune.enabled = True
     ds = ds.with_options(options1)
     ds = ds.map(lambda x: 2 * x)
     ds = ds.with_options(options2)
     dataset_options = ds.options()
     with self.assertRaises(ValueError):
         dataset_options.deterministic = True
Exemple #7
0
 def testOptionsTwiceDifferentOptions(self):
     options1 = options_lib.Options()
     options1.autotune.enabled = True
     options2 = options_lib.Options()
     options2.deterministic = False
     ds = dataset_ops.Dataset.range(0)
     ds = ds.with_options(options1)
     ds = ds.with_options(options2)
     options = self._get_options(ds)
     self.assertTrue(options.autotune.enabled)
     # Explicitly check that flag is False since assertFalse allows None
     self.assertIs(options.deterministic, False)
Exemple #8
0
 def testOptionsTwiceSameOption(self):
   if sys.version_info >= (3, 8) and platform.system() == "Windows":
     # TODO(b/165013260): Fix this
     self.skipTest("Test is currently broken on Windows with Python 3.8")
   options1 = options_lib.Options()
   options1.autotune.enabled = False
   options2 = options_lib.Options()
   options2.autotune.enabled = True
   ds = dataset_ops.Dataset.range(0)
   ds = ds.with_options(options1)
   ds = ds.with_options(options2)
   self.assertTrue(self._get_options(ds).autotune.enabled)
Exemple #9
0
    def testOptimizationInjectPrefetch(self, existing_prefetch, autotune,
                                       set_env):
        if set_env:
            os.environ["TF_DATA_EXPERIMENT_OPT_IN"] = "inject_prefetch"
            os.environ["TF_JOB_NAME"] = "test_job"

        dataset = dataset_ops.Dataset.range(5)
        dataset = dataset.map(lambda x: x + 1,
                              num_parallel_calls=dataset_ops.AUTOTUNE)
        if existing_prefetch:
            dataset = dataset.prefetch(1)
        if autotune and set_env and not existing_prefetch:
            dataset = dataset.apply(testing.assert_next(["Prefetch", "Root"]))
        else:
            dataset = dataset.apply(testing.assert_next(["Root"]))

        options = options_lib.Options()
        options.autotune.enabled = autotune
        dataset = dataset.with_options(options)

        self.assertDatasetProduces(dataset, expected_output=list(range(1, 6)))

        if set_env:
            del os.environ["TF_DATA_EXPERIMENT_OPT_IN"]
            del os.environ["TF_JOB_NAME"]
Exemple #10
0
 def testThreadingOptionsBackwardCompatibility(self):
     opts = options_lib.Options()
     opts.threading.max_intra_op_parallelism = 20
     self.assertEqual(opts.experimental_threading.max_intra_op_parallelism,
                      20)
     opts.experimental_threading.private_threadpool_size = 80
     self.assertEqual(opts.threading.private_threadpool_size, 80)
Exemple #11
0
  def testStatefulExternalPolicy(self):
    checkpoint_directory = self.get_temp_dir()
    checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
    dataset = dataset_ops.Dataset.range(4)

    def fn(x):
      return x * x

    dataset = dataset.map(
        lambda x: script_ops.eager_py_func(fn, [x], dtypes.int64))

    options = options_lib.Options()
    options.experimental_external_state_policy = (
        options_lib.ExternalStatePolicy.WARN)
    dataset = dataset.with_options(options)

    iterator = iter(dataset)
    get_next = iterator.get_next
    checkpoint = trackable_utils.Checkpoint(iterator=iterator)
    self.assertEqual(0, get_next().numpy())
    self.assertEqual(1, get_next().numpy())
    save_path = checkpoint.save(checkpoint_prefix)
    self.assertEqual(4, get_next().numpy())
    self.assertEqual(9, get_next().numpy())
    checkpoint.restore(save_path).run_restore_ops()
    self.assertEqual(4, get_next().numpy())
    self.assertEqual(9, get_next().numpy())
    with self.assertRaises(errors.OutOfRangeError):
      get_next()
    def test_stateful_ops_map_and_batch(self, use_function,
                                        use_legacy_map_and_batch):
        with test_util.deterministic_ops():

            v = variables.Variable(0.)

            def map_fn(x):
                v.assign_add(1.)
                return (x, v.read_value())

            if use_function:
                map_fn = def_function.function(map_fn)

            dataset = dataset_ops.Dataset.range(5)
            dataset = dataset.apply(testing.assert_next(["MapAndBatch"]))
            if use_legacy_map_and_batch:
                dataset = dataset.apply(
                    batching.map_and_batch(map_fn, 2, num_parallel_calls=5))
            else:
                dataset = dataset.map(map_fn, num_parallel_calls=5)
                dataset = dataset.batch(2)
            options = options_lib.Options()
            options.experimental_optimization.apply_default_optimizations = False
            options.experimental_optimization.map_and_batch_fusion = True
            dataset = dataset.with_options(options)
            self.evaluate(variables.global_variables_initializer())
            expected_output = [
                (np.array([0, 1]), np.array([1, 2])),
                (np.array([2, 3]), np.array([3, 4])),
                (np.array([4]), np.array([5])),
            ]
            self.assertDatasetProduces(dataset,
                                       expected_output=expected_output,
                                       requires_initialization=True)
  def testDeterminism(self, local_determinism, global_determinism):
    num_elements = 1000
    batches = []
    for i in range(num_elements):
      example_i = example(features=features({
          "a": int64_feature([i]),
      }))
      batches.append([example_i.SerializeToString()])

    test_features = {"a": parsing_ops.FixedLenFeature((), dtype=dtypes.int64)}
    dataset = dataset_ops.Dataset.from_tensor_slices(batches)
    dataset = dataset.apply(
        contrib_parsing_ops.parse_example_dataset(
            test_features,
            num_parallel_calls=10,
            deterministic=local_determinism))

    opts = options_lib.Options()
    opts.deterministic = global_determinism
    dataset = dataset.with_options(opts)

    expected = list(range(num_elements))
    actual = [elem["a"][0] for elem in self.getDatasetOutput(dataset)]

    require_order = local_determinism or (local_determinism is None and
                                          global_determinism)
    if require_order:
      self.assertAllEqual(expected, actual)
    else:
      self.assertCountEqual(expected, actual)
 def testAssertNext(self):
     dataset = dataset_ops.Dataset.from_tensors(0).apply(
         testing.assert_next(["Map"])).map(lambda x: x)
     options = options_lib.Options()
     options.experimental_optimization.apply_default_optimizations = False
     dataset = dataset.with_options(options)
     self.assertDatasetProduces(dataset, expected_output=[0])
Exemple #15
0
    def testNoopElimination(self, init_dataset_fn, transformation,
                            expected_name):
        """Runs a noop elimination test case.

    Args:
      init_dataset_fn: Function to create the initial dataset
      transformation: Transformation to apply
      expected_name: Name of the transformation if it is not eliminated
    """
        dataset = init_dataset_fn()

        if expected_name:
            dataset = dataset.apply(
                testing.assert_next([expected_name, "FiniteTake"]))
        else:
            dataset = dataset.apply(testing.assert_next(["FiniteTake"]))

        dataset = dataset.apply(transformation)
        dataset = dataset.take(1)
        options = options_lib.Options()
        options.experimental_optimization.apply_default_optimizations = False
        options.experimental_optimization.noop_elimination = True
        dataset = dataset.with_options(options)

        # Run the first iteration for the side effect of checking the assertion.
        get_next = self.getNext(dataset)
        self.evaluate(get_next())
    def testExternalStatePolicyFail(self):
        with ops.device(self._device0):
            dataset0 = dataset_ops.Dataset.range(100).map(
                lambda _: random_ops.random_uniform(  # pylint:disable=g-long-lambda
                    [],
                    minval=1,
                    maxval=10,
                    dtype=dtypes.float32))
            opt = options_lib.Options()
            opt.experimental_external_state_policy = (
                options_lib.ExternalStatePolicy.FAIL)
            dataset0 = dataset0.with_options(opt)
        with self.assertRaises(errors.FailedPreconditionError):
            replicated_ds = distribute.replicate(
                dataset0, [self._device1, self._device2])
            dataset1 = replicated_ds[self._device1]
            dataset2 = replicated_ds[self._device2]

            with ops.device(self._device0):
                get_next0 = self.getNext(dataset0)
            with ops.device(self._device1):
                get_next1 = self.getNext(dataset1)
            with ops.device(self._device2):
                get_next2 = self.getNext(dataset2)

            for _ in range(100):
                self.evaluate(get_next0())
                self.evaluate(get_next1())
                self.evaluate(get_next2())
Exemple #17
0
    def testOptimizationWithCapturedRefVar(self, dataset_fn):
        """Tests that default optimizations are disabled with ref variables."""
        variable = variable_scope.get_variable("v",
                                               initializer=0,
                                               use_resource=False)
        assign_op = variable.assign_add(1)
        unoptimized_dataset = dataset_fn(variable)

        options = options_lib.Options()
        options.experimental_optimization.apply_default_optimizations = False
        options.experimental_optimization.noop_elimination = True
        options.experimental_optimization.map_and_batch_fusion = True
        optimized_dataset = unoptimized_dataset.with_options(options)
        optimized_it = dataset_ops.make_initializable_iterator(
            optimized_dataset)

        # Check that outputs are the same in the optimized and unoptimized cases,
        # when the variable value is changing.
        unoptimized_it = dataset_ops.make_initializable_iterator(
            unoptimized_dataset)
        with ops.control_dependencies([assign_op]):
            unoptimized_output = unoptimized_it.get_next()
            optimized_output = optimized_it.get_next()

        self.evaluate(variable.initializer)
        self.evaluate((unoptimized_it.initializer, optimized_it.initializer))
        while True:
            try:
                unoptimized, optimized = self.evaluate(
                    (unoptimized_output, optimized_output))
                self.assertEqual(unoptimized, optimized)
            except errors.OutOfRangeError:
                break
Exemple #18
0
    def _benchmark_filter_parallelization(self, chain_length,
                                          optimize_dataset):

        dataset = dataset_ops.Dataset.from_tensors(5).repeat()
        for _ in range(chain_length):
            dataset = dataset.filter(
                lambda x: math_ops.greater_equal(x - 5, 0))
        if optimize_dataset:
            options = options_lib.Options()
            options.experimental_optimization.apply_default_optimizations = False
            options.experimental_optimization.filter_parallelization = True
            dataset = dataset.with_options(options)

        opt_mark = "opt" if optimize_dataset else "noopt"
        self.run_and_report_benchmark(
            dataset=dataset,
            num_elements=100,
            iters=10,
            warmup=True,
            extras={
                "model_name": "optimize.benchmark.4",
                "parameters": "%d.%s" % (chain_length, optimize_dataset),
            },
            name="filter_parallelization_{}_chain_length_{}".format(
                opt_mark, chain_length))
Exemple #19
0
    def test_deterministic_attribute(self, local_determinism,
                                     global_determinism):
        self._set_seed()
        with test_util.deterministic_ops():

            def sleep(x):
                time.sleep(0.1)
                return x

            def map_function(x):
                if math_ops.equal(x, 0):
                    return script_ops.py_func(sleep, [x],
                                              x.dtype,
                                              stateful=False)
                else:
                    return x

            dataset = dataset_ops.Dataset.range(100)
            dataset = dataset.map(map_function,
                                  num_parallel_calls=2,
                                  deterministic=local_determinism)
            opts = options_lib.Options()
            opts.deterministic = global_determinism
            dataset = dataset.with_options(opts)

            self.assertDatasetProduces(dataset, expected_output=range(100))
Exemple #20
0
    def benchmark_map_and_batch(self):
        """Measures the performance of parallelized batching."""
        shapes = [(), (10, ), (10, 10), (10, 10, 10), (224, 224, 3)]
        batch_size_values = [1, 32, 64, 128, 1024]

        for shape in shapes:
            for batch_size in batch_size_values:

                dataset = dataset_ops.Dataset.range(1000000000)
                dense_value = random_ops.random_normal(shape=shape)

                dataset = dataset.apply(
                    batching.map_and_batch(lambda _: dense_value, batch_size))  # pylint: disable=cell-var-from-loop
                options = options_lib.Options()
                options.experimental_optimization.apply_default_optimizations = False
                dataset = dataset.with_options(options)

                self.run_and_report_benchmark(
                    dataset=dataset,
                    num_elements=batch_size,
                    iters=100,
                    warmup=True,
                    extras={
                        "model_name": "map_and_batch.benchmark.1",
                        "parameters": "%d.%s" % (batch_size, str(shape))
                    },
                    name="num_elements_%d_batch_size_%d" %
                    (np.prod(shape), batch_size))
    def testReplicateAndShardProduceDisjointData(self, shuffle,
                                                 sharding_policy):
        dataset = dataset_ops.Dataset.list_files(self._filenames,
                                                 shuffle=shuffle)
        dataset = dataset.flat_map(core_readers.TFRecordDataset)

        graph_def = dataset._as_serialized_graph(
            strip_device_assignment=True,
            external_state_policy=options_lib.ExternalStatePolicy.WARN)

        options = options_lib.Options()
        options.experimental_distribute.auto_shard_policy = sharding_policy

        ds1 = distribute._RemoteDataset(graph_def, "/device:CPU:0",
                                        dataset.element_spec)
        ds2 = distribute._RemoteDataset(graph_def, "/device:CPU:0",
                                        dataset.element_spec)

        ds1 = ds1.with_options(options)
        ds2 = ds2.with_options(options)

        ds1 = distribute._AutoShardDataset(ds1, 2, 0)
        ds2 = distribute._AutoShardDataset(ds2, 2, 1)

        elems1 = set(self.getAllDatasetElements(ds1))
        elems2 = set(self.getAllDatasetElements(ds2))

        self.assertEmpty(elems1.intersection(elems2))
    def test_text_line_dataset(self, use_function):
        self._set_seed()
        with test_util.deterministic_ops():

            def write_nums_to_file(filename, numbers):
                path = os.path.join(self.get_temp_dir(), filename)
                with open(path, "w") as f:
                    f.write("\n".join(str(n) for n in numbers))
                return path

            f1 = write_nums_to_file("f1", (1, 2, 3))
            f2 = write_nums_to_file("f2", (4, 5, 6))
            f3 = write_nums_to_file("f3", (7, 8, 9))

            def interleave_fn(filename):
                return reader_ops.TextLineDataset(filename)

            if use_function:
                interleave_fn = def_function.function(interleave_fn)

            dataset = dataset_ops.Dataset.from_tensor_slices([f1, f2, f3])
            dataset = dataset.apply(testing.assert_next(["ParallelInterleave"
                                                         ]))
            dataset = dataset.interleave(interleave_fn,
                                         cycle_length=3,
                                         num_parallel_calls=3)
            options = options_lib.Options()
            options.experimental_optimization.apply_default_optimizations = False
            dataset = dataset.with_options(options)

            self.assertDatasetProduces(
                dataset,
                expected_output=["1", "4", "7", "2", "5", "8", "3", "6", "9"])
    def test_stateful_ops_interleave(self, use_function,
                                     use_legacy_interleave):
        with test_util.deterministic_ops():

            v = variables.Variable(0.)

            def map_fn(x):
                v.assign_add(1.)
                return (x, v.read_value())

            def interleave_fn(x):
                del x
                return dataset_ops.Dataset.range(2).map(map_fn)

            if use_function:
                map_fn = def_function.function(map_fn)
                interleave_fn = def_function.function(interleave_fn)

            dataset = dataset_ops.Dataset.range(5)
            if use_legacy_interleave:
                dataset = dataset.apply(
                    interleave_ops.parallel_interleave(interleave_fn,
                                                       cycle_length=5))
            else:
                dataset = dataset.interleave(interleave_fn,
                                             cycle_length=5,
                                             num_parallel_calls=3)
            options = options_lib.Options()
            options.experimental_optimization.apply_default_optimizations = False
            dataset = dataset.with_options(options)
            self.evaluate(variables.global_variables_initializer())
            expected_output = list(zip([0] * 5 + [1] * 5, range(1, 11)))
            self.assertDatasetProduces(dataset,
                                       expected_output=expected_output,
                                       requires_initialization=True)
    def testFilterFusion(self, function, predicates):
        dataset = dataset_ops.Dataset.range(5).apply(
            testing.assert_next(["Map", "Filter",
                                 "MemoryCacheImpl"])).map(function)
        for predicate in predicates:
            dataset = dataset.filter(predicate)

        dataset = dataset.cache()
        options = options_lib.Options()
        options.experimental_optimization.apply_default_optimizations = False
        options.experimental_optimization.filter_fusion = True
        dataset = dataset.with_options(options)
        expected_output = []
        for x in range(5):
            r = function(x)
            filtered = False
            for predicate in predicates:
                if isinstance(r, tuple):
                    b = predicate(*r)  # Pass tuple as multiple arguments.
                else:
                    b = predicate(r)
                if not self.evaluate(b):
                    filtered = True
                    break

            if not filtered:
                expected_output.append(r)
        self.assertDatasetProduces(dataset, expected_output=expected_output)
    def test_no_stateful_ops_interleave(self, use_function,
                                        use_legacy_interleave):
        self._set_seed()
        with test_util.deterministic_ops():

            def interleave_fn(x):
                del x
                return dataset_ops.Dataset.range(2)

            if use_function:
                interleave_fn = def_function.function(interleave_fn)

            dataset = dataset_ops.Dataset.range(5)
            if use_legacy_interleave:
                dataset = dataset.apply(
                    testing.assert_next(["LegacyParallelInterleaveV2"]))
                dataset = dataset.apply(
                    interleave_ops.parallel_interleave(interleave_fn,
                                                       cycle_length=5))
            else:
                dataset = dataset.apply(
                    testing.assert_next(["ParallelInterleave"]))
                dataset = dataset.interleave(interleave_fn,
                                             cycle_length=5,
                                             num_parallel_calls=3)
            options = options_lib.Options()
            options.experimental_optimization.apply_default_optimizations = False
            dataset = dataset.with_options(options)
            self.evaluate(variables.global_variables_initializer())
            self.assertDatasetProduces(dataset,
                                       expected_output=[0] * 5 + [1] * 5)
    def benchmark_resample_performance(self):
        init_dist = [0.25, 0.25, 0.25, 0.25]
        target_dist = [0.0, 0.0, 0.0, 1.0]
        num_classes = len(init_dist)
        # We don't need many samples to test a dirac-delta target distribution
        num_samples = 1000
        data_np = np.random.choice(num_classes, num_samples, p=init_dist)
        # Prepare the dataset
        dataset = dataset_ops.Dataset.from_tensor_slices(data_np).repeat()
        # Reshape distribution via rejection sampling.
        dataset = dataset.apply(
            resampling.rejection_resample(class_func=lambda x: x,
                                          target_dist=target_dist,
                                          initial_dist=init_dist,
                                          seed=142))
        options = options_lib.Options()
        options.experimental_optimization.apply_default_optimizations = False
        dataset = dataset.with_options(options)

        wall_time = self.run_benchmark(dataset=dataset,
                                       num_elements=num_samples,
                                       iters=10,
                                       warmup=True)
        resample_time = wall_time * num_samples

        self.report_benchmark(iters=10,
                              wall_time=resample_time,
                              extras={
                                  "model_name":
                                  "rejection_resample.benchmark.1",
                                  "parameters": "%d" % num_samples,
                              },
                              name="resample_{}".format(num_samples))
Exemple #27
0
 def testNoErrorWithoutPrefetch(self):
     """The rewrite should not fail if there is no prefetch() in the pipeline."""
     dataset = dataset_ops.Dataset.range(10)
     options = options_lib.Options()
     options.experimental_slack = True
     dataset = dataset.with_options(options)
     self.assertDatasetProduces(dataset, range(10))
    def run_benchmark(self,
                      dataset,
                      num_elements,
                      iters=1,
                      warmup=True,
                      apply_default_optimizations=False,
                      session_config=None):
        """Benchmarks the dataset.

    Runs the dataset `iters` times. In each iteration, the benchmark measures
    the time it takes to go through `num_elements` elements of the dataset.

    Args:
      dataset: Dataset to benchmark.
      num_elements: Number of dataset elements to iterate through each benchmark
        iteration.
      iters: Number of times to repeat the timing.
      warmup: If true, warms up the session caches by running an untimed run.
      apply_default_optimizations: Determines whether default optimizations
        should be applied.
      session_config: A ConfigProto protocol buffer with configuration options
        for the session. Applicable only for benchmarking in graph mode.

    Returns:
      A float, representing the per-element wall time of the dataset in seconds.
      This is the median time (with respect to `iters`) it takes for the dataset
      to go through `num_elements` elements, divided by `num_elements.`
    """

        # The options that have been applied to the dataset are preserved so that
        # they are not overwritten while benchmarking.
        options = options_lib.Options()
        options.experimental_optimization.apply_default_optimizations = (
            apply_default_optimizations)
        dataset = dataset.with_options(options)

        # NOTE: We use `dataset.skip()` to perform the iterations in C++, avoiding
        # the overhead of having to execute a TensorFlow op for each step of the
        # input pipeline. Note that this relies on the underlying implementation of
        # `skip` to execute upstream computation. If it is optimized in the future,
        # we will have to change this code.
        dataset = dataset.skip(num_elements - 1)

        if context.executing_eagerly():
            median_duration = self._run_eager_benchmark(iterable=dataset,
                                                        iters=iters,
                                                        warmup=warmup)
            return median_duration / float(num_elements)

        iterator = dataset_ops.make_initializable_iterator(dataset)
        next_element = iterator.get_next()
        op = nest.flatten(next_element)[0].op
        median_duration = self._run_graph_benchmark(
            iterable=op,
            iters=iters,
            warmup=warmup,
            session_config=session_config,
            initializer=iterator.initializer)
        return median_duration / float(num_elements)
Exemple #29
0
 def testPrefetchWithSlackOptionWithoutIterator(self):
     """Defaults to slack period of 1 without iterator."""
     dataset = dataset_ops.Dataset.range(10)
     dataset = dataset.prefetch(1)
     options = options_lib.Options()
     options.experimental_slack = True
     dataset = dataset.with_options(options)
     self.assertDatasetProduces(dataset, range(10))
Exemple #30
0
 def testOptimizationStatefulFunction(self):
     dataset = dataset_ops.Dataset.range(10).map(
         lambda _: random_ops.random_uniform([])).batch(10)
     options = options_lib.Options()
     options.experimental_optimization.apply_default_optimizations = False
     dataset = dataset.with_options(options)
     get_next = self.getNext(dataset)
     self.evaluate(get_next())