def _model_fit(self,
                 strategy,
                 steps_per_execution=1,
                 validation_data=None,
                 x=None,
                 steps_per_epoch=10,
                 run_eagerly=False,
                 with_normalization_layer=False,
                 callbacks=None,
                 use_lookup_layer=False):
    if callbacks is None:
      callbacks = []

    model, default_callbacks = self._model_compile(strategy,
                                                   steps_per_execution,
                                                   run_eagerly,
                                                   with_normalization_layer,
                                                   use_lookup_layer)
    callbacks += default_callbacks

    x = x or dataset_creator.DatasetCreator(
        self._get_dataset_fn(use_lookup_layer))
    validation_data = (
        validation_data or
        dataset_creator.DatasetCreator(self._get_dataset_fn(use_lookup_layer)))

    model.fit(
        x,
        epochs=10,
        steps_per_epoch=steps_per_epoch,
        callbacks=callbacks,
        validation_data=validation_data,
        validation_steps=steps_per_epoch)
    return model
    def _model_fit(self,
                   strategy,
                   steps_per_execution=1,
                   validation_data=None,
                   x=None,
                   steps_per_epoch=10,
                   run_eagerly=False,
                   with_normalization_layer=False,
                   callbacks=None):
        if callbacks is None:
            callbacks = []

        model, default_callbacks = self._model_compile(
            strategy, steps_per_execution, run_eagerly,
            with_normalization_layer)
        callbacks += default_callbacks

        def dataset_fn(input_context):
            del input_context
            x = tf.random.uniform((10, 10))
            y = tf.random.uniform((10, ))
            return tf.data.Dataset.from_tensor_slices(
                (x, y)).shuffle(10).repeat().batch(2)

        x = x or dataset_creator.DatasetCreator(dataset_fn)
        validation_data = (validation_data
                           or dataset_creator.DatasetCreator(dataset_fn))

        model.fit(x,
                  epochs=10,
                  steps_per_epoch=steps_per_epoch,
                  callbacks=callbacks,
                  validation_data=validation_data,
                  validation_steps=steps_per_epoch)
        return model
    def _model_fit(
        self,
        strategy,
        steps_per_execution=1,
        validation_data=None,
        x=None,
        y=None,
        shuffle=True,
        batch_size=None,
        steps_per_epoch=10,
        run_eagerly=False,
        with_normalization_layer=False,
        callbacks=None,
        use_lookup_layer=False,
        use_dataset_creator=True,
        verbose="auto",
        jit_compile=None,
    ):
        if callbacks is None:
            callbacks = []

        model, default_callbacks = self._model_compile(
            strategy,
            steps_per_execution,
            run_eagerly,
            with_normalization_layer,
            jit_compile,
        )
        callbacks += default_callbacks

        if x is None:
            if use_dataset_creator:
                x = dataset_creator.DatasetCreator(
                    self._get_dataset_fn(use_lookup_layer))
            else:
                x = self._get_dataset_fn(use_lookup_layer)(None)

        if validation_data is None:
            if use_dataset_creator:
                validation_data = dataset_creator.DatasetCreator(
                    self._get_dataset_fn(use_lookup_layer))
            else:
                validation_data = self._get_dataset_fn(use_lookup_layer)(None)

        model.fit(
            x,
            y,
            shuffle=shuffle,
            batch_size=batch_size,
            epochs=10,
            steps_per_epoch=steps_per_epoch,
            callbacks=callbacks,
            validation_data=validation_data,
            validation_steps=steps_per_epoch,
            verbose=verbose,
        )
        return model
示例#4
0
    def test_dataset_creator_input_options_with_cluster_coordinator(self):
        dataset_fn = lambda _: tf.data.Dataset.from_tensor_slices([1, 1])
        input_options = tf.distribute.InputOptions(
            experimental_fetch_to_device=True,
            experimental_per_replica_buffer_size=2)
        x = dataset_creator.DatasetCreator(dataset_fn,
                                           input_options=input_options)
        strategy = self._get_parameter_server_strategy()
        with strategy.scope():
            model = sequential.Sequential([core_layers.Dense(10)])
            model._cluster_coordinator = tf.distribute.experimental.coordinator.ClusterCoordinator(
                strategy)
            data_handler = data_adapter.get_data_handler(x,
                                                         steps_per_epoch=2,
                                                         model=model)

        iter_rv = iter(data_handler._dataset)._values[0]
        iter_rv._rebuild_on(model._cluster_coordinator._cluster.workers[0])
        distributed_iterator = iter_rv._get_values()

        # Ensuring the resulting `DistributedIterator` has the right options.
        self.assertTrue(
            distributed_iterator._options.experimental_fetch_to_device)
        self.assertEqual(
            distributed_iterator._options.experimental_per_replica_buffer_size,
            2)
示例#5
0
    def test_dataset_creator_usage_in_parameter_server_model_fit(self):
        cluster_def = multi_worker_test_base.create_in_process_cluster(
            num_workers=2, num_ps=1, rpc_layer="grpc")
        cluster_def["chief"] = [
            "localhost:%d" % multi_worker_test_base.pick_unused_port()
        ]
        strategy = tf.distribute.experimental.ParameterServerStrategy(
            SimpleClusterResolver(ClusterSpec(cluster_def), rpc_layer="grpc"))
        with strategy.scope():
            model = sequential.Sequential([core_layers.Dense(10)])
        model.compile(gradient_descent.SGD(), loss="mse")

        def dataset_fn(input_context):
            global_batch_size = 64
            batch_size = input_context.get_per_replica_batch_size(
                global_batch_size)
            dataset = tf.data.Dataset.from_tensors(([1.], [1.])).repeat()
            dataset = dataset.shard(input_context.num_input_pipelines,
                                    input_context.input_pipeline_id)
            dataset = dataset.batch(batch_size)
            dataset = dataset.prefetch(2)
            return dataset

        history = model.fit(dataset_creator.DatasetCreator(dataset_fn),
                            epochs=10,
                            steps_per_epoch=10,
                            verbose=0)
        self.assertLen(history.history["loss"], 10)
示例#6
0
 def test_linear_model(self, distribution, use_dataset_creator, data_fn):
     if (not use_dataset_creator) and isinstance(
             distribution,
             tf.distribute.experimental.ParameterServerStrategy):
         self.skipTest(
             "Parameter Server strategy requires dataset creator to be used in "
             "model.fit.")
     if (not tf.__internal__.tf2.enabled() and use_dataset_creator
             and isinstance(
                 distribution,
                 tf.distribute.experimental.ParameterServerStrategy)):
         self.skipTest(
             "Parameter Server strategy with dataset creator needs to be run when "
             "eager execution is enabled.")
     with distribution.scope():
         model = linear.LinearModel()
         opt = gradient_descent.SGD(learning_rate=0.1)
         model.compile(opt, "mse")
         if use_dataset_creator:
             x = dataset_creator.DatasetCreator(dataset_fn)
             hist = model.fit(x, epochs=3, steps_per_epoch=INPUT_SIZE)
         else:
             if data_fn == "numpy":
                 inputs, output = get_numpy()
                 hist = model.fit(inputs, output, epochs=3)
             else:
                 hist = model.fit(get_dataset(), epochs=3)
             self.assertLess(hist.history["loss"][2], 0.2)
示例#7
0
    def _model_fit(self,
                   steps_per_execution=1,
                   validation_data=None,
                   x=None,
                   steps_per_epoch=10,
                   run_eagerly=False,
                   with_normalization_layer=False):
        model, callbacks = self._model_compile(steps_per_execution,
                                               run_eagerly,
                                               with_normalization_layer)

        def dataset_fn(input_context):
            del input_context
            x = tf.random.uniform((10, 10))
            y = tf.random.uniform((10, ))
            return tf.data.Dataset.from_tensor_slices(
                (x, y)).shuffle(10).repeat().batch(2)

        x = x or dataset_creator.DatasetCreator(dataset_fn)

        model.fit(x,
                  epochs=10,
                  steps_per_epoch=steps_per_epoch,
                  verbose=0,
                  callbacks=callbacks,
                  validation_data=validation_data)
        return model
示例#8
0
  def test_dataset_creator(self):
    with self.assertRaisesRegex(
        TypeError, "`dataset_fn` for `DatasetCreator` must be a `callable`."):
      dataset_creator.DatasetCreator(2)

    dataset_fn = lambda: 3
    with self.assertRaisesRegex(
        TypeError, "The `callable` provided to `DatasetCreator` must return "
        "a Dataset."):
      dataset_creator.DatasetCreator(dataset_fn)()

    dataset_fn = lambda: tf.data.Dataset.from_tensor_slices([1, 1])
    got = dataset_creator.DatasetCreator(dataset_fn)()
    self.assertEqual(
        next(iter(got)),
        next(iter(tf.data.Dataset.from_tensor_slices([1, 1]))))
示例#9
0
    def test_wide_deep_model(self, distribution, use_dataset_creator, data_fn):
        if (not use_dataset_creator) and isinstance(
                distribution,
                tf.distribute.experimental.ParameterServerStrategy):
            self.skipTest(
                "Parameter Server strategy requires dataset creator to be used in "
                "model.fit.")
        if (not tf.__internal__.tf2.enabled() and use_dataset_creator
                and isinstance(
                    distribution,
                    tf.distribute.experimental.ParameterServerStrategy)):
            self.skipTest(
                "Parameter Server strategy with dataset creator needs to be run when "
                "eager execution is enabled.")
        with distribution.scope():
            linear_model = linear.LinearModel(units=1)
            dnn_model = sequential.Sequential([core.Dense(units=1)])
            wide_deep_model = wide_deep.WideDeepModel(linear_model, dnn_model)
            linear_opt = gradient_descent.SGD(learning_rate=0.05)
            dnn_opt = adagrad.Adagrad(learning_rate=0.1)
            wide_deep_model.compile(optimizer=[linear_opt, dnn_opt],
                                    loss="mse")

            if use_dataset_creator:
                x = dataset_creator.DatasetCreator(dataset_fn)
                hist = wide_deep_model.fit(x,
                                           epochs=3,
                                           steps_per_epoch=INPUT_SIZE)
            else:
                if data_fn == "numpy":
                    inputs, output = get_numpy()
                    hist = wide_deep_model.fit(inputs, output, epochs=3)
                else:
                    hist = wide_deep_model.fit(get_dataset(), epochs=3)
            self.assertLess(hist.history["loss"][2], 0.2)
示例#10
0
    def test_dataset_creator_model_fit_without_strategy(self):
        model = sequential.Sequential([core_layers.Dense(10)])
        model.compile(gradient_descent.SGD(), loss="mse")

        history = model.fit(dataset_creator.DatasetCreator(
            self._get_dataset_fn()),
                            epochs=10,
                            steps_per_epoch=10,
                            verbose=0)
        self.assertLen(history.history["loss"], 10)
示例#11
0
  def testModelFitAndPredict(self, strategy):
    def fit_dataset_fn(input_context):
      del input_context
      x = tf.random.uniform((10, 1))
      y = tf.random.uniform((10,))
      return tf.data.Dataset.from_tensor_slices(
          (x, y)).shuffle(10).repeat().batch(2)

    x = dataset_creator.DatasetCreator(fit_dataset_fn)
    validation_data = dataset_creator.DatasetCreator(fit_dataset_fn)

    model = self._model_fit(strategy, x=x, validation_data=validation_data)
    _, predictions = self._model_predict(strategy, model, steps=3)

    # Check the first (0th index), fourth (3rd index) and the last predictions
    # because the first, fourth and the last input is the same in
    # `model.predict` so there predictions should match.
    self.assertTrue(all(predictions[0] == predictions[i] for i in [0, 3, 5]))

    self.assertFalse(
        all(predictions[0] == predictions[i] for i in [0, 1, 2, 4]))
示例#12
0
    def test_dataset_creator_model_fit_without_strategy(
            self, use_input_options):
        model = sequential.Sequential([core_layers.Dense(10)])
        model.compile(gradient_descent.SGD(), loss="mse")

        input_options = tf.distribute.InputOptions(
        ) if use_input_options else None
        history = model.fit(dataset_creator.DatasetCreator(
            self._get_dataset_fn(), input_options),
                            epochs=10,
                            steps_per_epoch=10,
                            verbose=0)
        self.assertLen(history.history["loss"], 10)
示例#13
0
    def testGetGradientsInModelPss(self, strategy, optimizer_fn):
        with strategy.scope():
            model = self._get_model()
            optimizer = optimizer_fn()
        ds_fn = self._get_dataset_fn()
        if isinstance(strategy, tf.distribute.ParameterServerStrategy):
            ds = dataset_creator.DatasetCreator(ds_fn)
        else:
            ds = ds_fn(None)
        model.compile(loss="mse", optimizer=optimizer)
        model.fit(ds, epochs=1, steps_per_epoch=5)

        self._verify_accumulators_updated(optimizer)
示例#14
0
    def testModelFitwithStepsPerEpochNegativeOne(self, strategy):
        def dataset_fn(input_context):
            del input_context
            x = tf.random.uniform((10, 10))
            y = tf.random.uniform((10, ))
            return (tf.data.Dataset.from_tensor_slices(
                (x, y)).shuffle(10).batch(2))

        if strategy._should_use_with_coordinator:
            with self.assertRaises(
                (tf.errors.OutOfRangeError, tf.errors.CancelledError)):
                self._model_fit(
                    strategy,
                    steps_per_epoch=-1,
                    x=dataset_creator.DatasetCreator(dataset_fn),
                    validation_data=dataset_creator.DatasetCreator(dataset_fn),
                )
        else:
            self._model_fit(
                strategy,
                steps_per_epoch=-1,
                x=dataset_creator.DatasetCreator(dataset_fn),
                validation_data=dataset_creator.DatasetCreator(dataset_fn),
            )
示例#15
0
    def test_dataset_creator_usage_in_parameter_server_model_fit(self):
        cluster_def = multi_worker_test_base.create_in_process_cluster(
            num_workers=2, num_ps=1, rpc_layer="grpc")
        cluster_def["chief"] = [
            "localhost:%d" % multi_worker_test_base.pick_unused_port()
        ]
        strategy = tf.distribute.experimental.ParameterServerStrategy(
            SimpleClusterResolver(ClusterSpec(cluster_def), rpc_layer="grpc"))
        with strategy.scope():
            model = sequential.Sequential([core_layers.Dense(10)])
        model.compile(gradient_descent.SGD(), loss="mse")

        history = model.fit(dataset_creator.DatasetCreator(
            self._get_dataset_fn()),
                            epochs=10,
                            steps_per_epoch=10,
                            verbose=0)
        self.assertLen(history.history["loss"], 10)
    def _model_evaluate(
        self,
        strategy,
        steps_per_execution=1,
        x=None,
        y=None,
        batch_size=None,
        steps=10,
        run_eagerly=False,
        with_normalization_layer=False,
        callbacks=None,
        use_dataset_creator=True,
    ):
        if callbacks is None:
            callbacks = []

        model, default_callbacks = self._model_compile(
            strategy,
            steps_per_execution,
            run_eagerly,
            with_normalization_layer,
        )
        callbacks += default_callbacks

        def dataset_fn(input_context):
            del input_context
            x = tf.random.uniform((10, 10))
            y = tf.random.uniform((10, 1))
            return (tf.data.Dataset.from_tensor_slices(
                (x, y)).shuffle(10).repeat().batch(8))

        if x is None:
            if use_dataset_creator:
                x = dataset_creator.DatasetCreator(dataset_fn)
            else:
                x = dataset_fn(None)

        model.evaluate(x=x,
                       y=y,
                       steps=steps,
                       callbacks=callbacks,
                       batch_size=batch_size)
        return model
示例#17
0
    def test_dataset_creator_input_options(self):
        dataset_fn = lambda _: tf.data.Dataset.from_tensor_slices([1, 1])
        input_options = tf.distribute.InputOptions(
            experimental_fetch_to_device=True,
            experimental_per_replica_buffer_size=2)
        x = dataset_creator.DatasetCreator(dataset_fn,
                                           input_options=input_options)
        with tf.distribute.MultiWorkerMirroredStrategy().scope():
            data_handler = data_adapter.get_data_handler(
                x,
                steps_per_epoch=2,
                model=sequential.Sequential([core_layers.Dense(10)]))

        # Ensuring the resulting `DistributedDatasetsFromFunction` has the right
        # options.
        self.assertTrue(
            data_handler._dataset._options.experimental_fetch_to_device)
        self.assertEqual(
            data_handler._dataset._options.
            experimental_per_replica_buffer_size, 2)
示例#18
0
    def testModelPredictWithDatasetCreator(self, strategy):
        if isinstance(strategy, tf.distribute.MultiWorkerMirroredStrategy):
            self.skipTest("b/189223991")

        def _dataset_fn(input_context):
            del input_context
            x = tf.constant([[1.0], [2.0], [3.0], [1.0], [5.0], [1.0]])
            return tf.data.Dataset.from_tensor_slices(x).repeat().batch(2)

        _, predictions = self._model_predict(
            strategy,
            steps=3,
            test_data=dataset_creator.DatasetCreator(_dataset_fn),
        )

        # Check the first (0th index), fourth (3rd index) and the last
        # predictions because the first, fourth and the last input is the same
        # in `model.predict` so there predictions should match.
        self.assertTrue(
            all(predictions[0] == predictions[i] for i in [0, 3, 5]))

        self.assertFalse(
            all(predictions[0] == predictions[i] for i in [0, 1, 2, 4]))