Пример #1
0
class KerasLayerBenchmarksBackwardXLA(
        six.with_metaclass(tf.__internal__.test.ParameterizedBenchmark,
                           layer_benchmarks_test_base.LayerBenchmarksBase)):

    _benchmark_parameters = benchmark_util.generate_benchmark_params_cpu_gpu([
        ("Conv2D_small_shape", tf.keras.layers.Conv2D, {
            "filters": 1,
            "kernel_size": 1,
            "activation": "relu"
        }, {
            "input_shape": (1, 1, 1, 1)
        }, 10000),
        ("Conv2D_normal_shape", tf.keras.layers.Conv2D, {
            "filters": 1,
            "kernel_size": 1,
            "activation": "relu"
        }, {
            "input_shape": (64, 28, 28, 3)
        }, 10000),
        # TODO(b/153480400)
        # ("LSTM_small_shape", tf.keras.layers.LSTM,
        #  {"units": 1}, {"input_shape": (1, 1, 1)}, 10000),
        # ("LSTM_normal_shape", tf.keras.layers.LSTM,
        #  {"units": 4}, {"input_shape": (32, 10, 8)}, 10000),
        ("Embedding_small_shape", tf.keras.layers.Embedding, {
            "input_dim": 1,
            "output_dim": 1,
            "input_length": 1
        }, {
            "input": np.random.randint(1, size=(1, 1))
        }, 10),
        ("Embedding_normal_shape", tf.keras.layers.Embedding, {
            "input_dim": 1000,
            "output_dim": 64,
            "input_length": 10
        }, {
            "input": np.random.randint(1000, size=(32, 10))
        }, 10),
    ])

    def benchmark_layer_call_backward_with_xla(self, layer_cls, layer_args,
                                               inputs, num_iters):
        name = benchmark_util.get_benchmark_name(self._get_name())
        # TODO(b/173461426)
        if layer_cls is tf.keras.layers.Embedding and name[-1] == "GPU":
            return
        layer = layer_cls(**layer_args)
        x = _get_input_data(inputs)
        layer.call = tf.function(layer.call, jit_compile=True)

        fn = functools.partial(_layer_call_backward, layer, x)
        metadata = {"implementation": name[0] + ".layer.call.backward.xla"}
        metadata.update(_get_metadata(name))
        self.run_report(fn, num_iters, metadata)
Пример #2
0
    def test_generate_benchmark_params_cpu_gpu(self):
        adam_opt = tf.keras.optimizers.Adam()
        sgd_opt = tf.keras.optimizers.SGD()
        params = [
            ("Adam", adam_opt, 10),
            ("SGD", sgd_opt, 10),
        ]
        expected = [
            ("Adam_CPU", adam_opt, 10),
            ("SGD_CPU", sgd_opt, 10),
            ("Adam_GPU", adam_opt, 10),
            ("SGD_GPU", sgd_opt, 10),
        ]

        out = benchmark_util.generate_benchmark_params_cpu_gpu(params)
        self.assertAllEqual(out, expected)
class KerasOptimizerBenchmark(
    tf.test.Benchmark, metaclass=ParameterizedBenchmark
):
    """Keras optimizer benchmarks."""

    # The parameter of each benchmark test is a tuple, and the first one is
    # the optimizer name.
    _benchmark_parameters = benchmark_util.generate_benchmark_params_cpu_gpu(
        [
            ("Adam", tf.keras.optimizers.Adam(), 10),
            ("NonFusedAdam", adam.NonFusedAdam(), 10),
        ]
    )

    def benchmark_optimizer(self, optimizer, num_iters):
        """Optimizer benchmark with Bidirectional LSTM model on IMDB data.

        Args:
          optimizer: The optimizer instance to be benchmarked.
          num_iters: The number of iterations to run for performance
            measurement.
        """
        model, train_x, train_y = bidirect_imdb_lstm_config()
        metrics, wall_time, extras = benchmark_util.measure_performance(
            model,
            x=train_x,
            y=train_y,
            batch_size=512,
            optimizer=optimizer,
            loss="binary_crossentropy",
            metrics=["accuracy"],
        )
        name = benchmark_util.get_benchmark_name(self._get_name())
        metadata = {
            "implementation": name[0],
            "model_name": "optimizers",
            "parameters": "lstm.512",
        }
        extras.update(metadata)
        self.report_benchmark(
            iters=num_iters, wall_time=wall_time, metrics=metrics, extras=extras
        )
Пример #4
0
class KerasLayerBenchmarks(
        layer_benchmarks_test_base.LayerBenchmarksBase,
        metaclass=tf.__internal__.test.ParameterizedBenchmark,
):

    # The parameter of each layer benchmark is a tuple, and the first one is
    # the benchmark name. It must follow the convention of
    # "{layer_name}_{small|normal|large}_shape" to make it compatible with
    # `self.report_benchmark()` method.
    _benchmark_parameters = benchmark_util.generate_benchmark_params_cpu_gpu(
        CORE_LAYERS + CONV_LAYERS + RECURRENT_LAYERS + NORMALIZATION_LAYERS +
        REGULARIZATION_LAYERS + ATTENSION_LAYERS + POOLING_LAYERS)

    def benchmark_layer_call(self, layer_cls, layer_args, inputs, num_iters):
        layer = layer_cls(**_get_layer_args(layer_cls, layer_args))
        x = _get_input_data(inputs)

        fn = functools.partial(layer, x)
        name = benchmark_util.get_benchmark_name(self._get_name())
        metadata = {"implementation": name[0] + ".layer.call"}
        metadata.update(_get_metadata(name))
        self.run_report(fn, num_iters, metadata)

    def benchmark_layer_call_with_function(self, layer_cls, layer_args, inputs,
                                           num_iters):
        layer = layer_cls(**_get_layer_args(layer_cls, layer_args))
        x = _get_input_data(inputs)
        layer.call = tf.function(layer.call)

        fn = functools.partial(layer, x)
        name = benchmark_util.get_benchmark_name(self._get_name())
        metadata = {"implementation": name[0] + ".layer.call.function"}
        metadata.update(_get_metadata(name))
        self.run_report(fn, num_iters, metadata)

    def benchmark_layer_call_with_xla(self, layer_cls, layer_args, inputs,
                                      num_iters):
        name = benchmark_util.get_benchmark_name(self._get_name())
        # TODO(b/173461426)
        if layer_cls is tf.keras.layers.Embedding and name[-1] == "GPU":
            return
        layer = layer_cls(**_get_layer_args(layer_cls, layer_args))
        x = _get_input_data(inputs)
        layer.call = tf.function(layer.call, jit_compile=True)

        fn = functools.partial(layer, x)
        metadata = {"implementation": name[0] + ".layer.call.xla"}
        metadata.update(_get_metadata(name))
        self.run_report(fn, num_iters, metadata)

    def benchmark_layer_call_backward(self, layer_cls, layer_args, inputs,
                                      num_iters):
        layer = layer_cls(**_get_layer_args(layer_cls, layer_args))
        x = _get_input_data(inputs)

        fn = functools.partial(_layer_call_backward, layer, x)
        name = benchmark_util.get_benchmark_name(self._get_name())
        metadata = {"implementation": name[0] + ".layer.call.backward"}
        metadata.update(_get_metadata(name))
        self.run_report(fn, num_iters, metadata)

    def benchmark_layer_call_backward_with_function(self, layer_cls,
                                                    layer_args, inputs,
                                                    num_iters):
        layer = layer_cls(**_get_layer_args(layer_cls, layer_args))
        x = _get_input_data(inputs)
        layer.call = tf.function(layer.call)

        fn = functools.partial(_layer_call_backward, layer, x)
        name = benchmark_util.get_benchmark_name(self._get_name())
        metadata = {
            "implementation": name[0] + ".layer.call.backward.function"
        }
        metadata.update(_get_metadata(name))
        self.run_report(fn, num_iters, metadata)

    def benchmark_layer_call_backward_with_xla(self, layer_cls, layer_args,
                                               inputs, num_iters):
        name = benchmark_util.get_benchmark_name(self._get_name())
        # TODO(b/153480400)
        if layer_cls in [
                tf.keras.layers.LSTM,
                tf.keras.layers.Bidirectional,
                tf.keras.layers.ConvLSTM2D,
                tf.keras.layers.GRU,
                tf.keras.layers.RNN,
                tf.keras.layers.SimpleRNN,
        ]:
            return
        # TODO(b/173461426)
        if layer_cls is tf.keras.layers.Embedding and name[-1] == "GPU":
            return
        layer = layer_cls(**_get_layer_args(layer_cls, layer_args))
        x = _get_input_data(inputs)
        layer.call = tf.function(layer.call, jit_compile=True)

        fn = functools.partial(_layer_call_backward, layer, x)
        metadata = {"implementation": name[0] + ".layer.call.backward.xla"}
        metadata.update(_get_metadata(name))
        self.run_report(fn, num_iters, metadata)
Пример #5
0
class KerasLayerBenchmarks(
        six.with_metaclass(tf.__internal__.test.ParameterizedBenchmark,
                           layer_benchmarks_test_base.LayerBenchmarksBase)):

    # The parameter of each layer benchmark is a tuple, and the first one is
    # the benchmark name. It must follow the convention of
    # "{layer_name}_{small|normal|large}_shape" to make it compatible with
    # `self.report_benchmark()` method.
    _benchmark_parameters = benchmark_util.generate_benchmark_params_cpu_gpu([
        ("Conv2D_small_shape", tf.keras.layers.Conv2D, {
            "filters": 1,
            "kernel_size": 1,
            "activation": "relu"
        }, {
            "input_shape": (1, 1, 1, 1)
        }, 100),
        ("Conv2D_normal_shape", tf.keras.layers.Conv2D, {
            "filters": 1,
            "kernel_size": 1,
            "activation": "relu"
        }, {
            "input_shape": (64, 28, 28, 3)
        }, 100),
        ("LSTM_small_shape", tf.keras.layers.LSTM, {
            "units": 1
        }, {
            "input_shape": (1, 1, 1)
        }, 100),
        ("LSTM_normal_shape", tf.keras.layers.LSTM, {
            "units": 4
        }, {
            "input_shape": (32, 10, 8)
        }, 100),
        ("Embedding_small_shape", tf.keras.layers.Embedding, {
            "input_dim": 1,
            "output_dim": 1,
            "input_length": 1
        }, {
            "input": np.random.randint(1, size=(1, 1))
        }, 100),
        ("Embedding_normal_shape", tf.keras.layers.Embedding, {
            "input_dim": 1000,
            "output_dim": 64,
            "input_length": 10
        }, {
            "input": np.random.randint(1000, size=(32, 10))
        }, 100),
    ])

    def benchmark_layer_call(self, layer_cls, layer_args, inputs, num_iters):
        layer = layer_cls(**layer_args)
        x = _get_input_data(inputs)

        fn = functools.partial(layer, x)
        name = benchmark_util.get_benchmark_name(self._get_name())
        metadata = {"implementation": name[0] + ".layer.call"}
        metadata.update(_get_metadata(name))
        self.run_report(fn, num_iters, metadata)

    def benchmark_layer_call_with_function(self, layer_cls, layer_args, inputs,
                                           num_iters):
        layer = layer_cls(**layer_args)
        x = _get_input_data(inputs)
        layer.call = tf.function(layer.call)

        fn = functools.partial(layer, x)
        name = benchmark_util.get_benchmark_name(self._get_name())
        metadata = {"implementation": name[0] + ".layer.call.function"}
        metadata.update(_get_metadata(name))
        self.run_report(fn, num_iters, metadata)

    def benchmark_layer_call_with_xla(self, layer_cls, layer_args, inputs,
                                      num_iters):
        name = benchmark_util.get_benchmark_name(self._get_name())
        # TODO(b/173461426)
        if layer_cls is tf.keras.layers.Embedding and name[-1] == "GPU":
            return
        layer = layer_cls(**layer_args)
        x = _get_input_data(inputs)
        layer.call = tf.function(layer.call, jit_compile=True)

        fn = functools.partial(layer, x)
        metadata = {"implementation": name[0] + ".layer.call.xla"}
        metadata.update(_get_metadata(name))
        self.run_report(fn, num_iters, metadata)

    def benchmark_layer_call_backward(self, layer_cls, layer_args, inputs,
                                      num_iters):
        layer = layer_cls(**layer_args)
        x = _get_input_data(inputs)

        fn = functools.partial(_layer_call_backward, layer, x)
        name = benchmark_util.get_benchmark_name(self._get_name())
        metadata = {"implementation": name[0] + ".layer.call.backward"}
        metadata.update(_get_metadata(name))
        self.run_report(fn, num_iters, metadata)

    def benchmark_layer_call_backward_with_function(self, layer_cls,
                                                    layer_args, inputs,
                                                    num_iters):
        layer = layer_cls(**layer_args)
        x = _get_input_data(inputs)
        layer.call = tf.function(layer.call)

        fn = functools.partial(_layer_call_backward, layer, x)
        name = benchmark_util.get_benchmark_name(self._get_name())
        metadata = {
            "implementation": name[0] + ".layer.call.backward.function"
        }
        metadata.update(_get_metadata(name))
        self.run_report(fn, num_iters, metadata)

    def benchmark_layer_call_backward_with_xla(self, layer_cls, layer_args,
                                               inputs, num_iters):
        name = benchmark_util.get_benchmark_name(self._get_name())
        # TODO(b/153480400)
        if layer_cls is tf.keras.layers.LSTM:
            return
        # TODO(b/173461426)
        if layer_cls is tf.keras.layers.Embedding and name[-1] == "GPU":
            return
        layer = layer_cls(**layer_args)
        x = _get_input_data(inputs)
        layer.call = tf.function(layer.call, jit_compile=True)

        fn = functools.partial(_layer_call_backward, layer, x)
        metadata = {"implementation": name[0] + ".layer.call.backward.xla"}
        metadata.update(_get_metadata(name))
        self.run_report(fn, num_iters, metadata)