Ejemplo n.º 1
0
    def benchmark_antirectifier_bs_512_gpu_2(self):
        """Measure performance with batch_size=512, gpu=2 and

        distribution_strategy=`mirrored`.
        """
        batch_size = 512
        metrics, wall_time, extras = benchmark_util.measure_performance(
            self._build_model,
            x=self.x_train,
            y=self.y_train,
            batch_size=batch_size,
            num_gpus=2,
            distribution_strategy="mirrored",
            optimizer="rmsprop",
            loss=tf.keras.losses.SparseCategoricalCrossentropy(
                from_logits=True
            ),
            metrics=["sparse_categorical_accuracy"],
        )

        metadata = benchmark_util.get_keras_examples_metadata(
            "antirectifier", batch_size
        )
        extras.update(metadata)
        self.report_benchmark(
            wall_time=wall_time, metrics=metrics, extras=extras
        )
Ejemplo n.º 2
0
    def benchmark_cnn_cifar10_bs_1024_gpu_2(self):
        """Measure performance with batch_size=1024, gpu=2 and

        distribution_strategy=`mirrored`.
        """
        batch_size = 1024
        metrics, wall_time, extras = benchmark_util.measure_performance(
            self._build_model,
            x=self.x_train,
            y=self.y_train,
            batch_size=batch_size,
            num_gpus=2,
            distribution_strategy="mirrored",
            epochs=self.epochs,
            optimizer=tf.keras.optimizers.RMSprop(learning_rate=0.0001,
                                                  decay=1e-6),
            loss="categorical_crossentropy",
            metrics=["accuracy"],
        )

        metadata = benchmark_util.get_keras_examples_metadata(
            "cnn", batch_size)
        extras.update(metadata)
        self.report_benchmark(wall_time=wall_time,
                              metrics=metrics,
                              extras=extras)
Ejemplo n.º 3
0
    def benchmark_conv_mnist_bs_512_gpu_2(self):
        """Measure performance with batch_size=512, gpu=2 and

        distribution_strategy='mirrored'
        """
        batch_size = 512
        metrics, wall_time, extras = benchmark_util.measure_performance(
            self._build_model,
            x=self.x_train,
            y=self.y_train,
            batch_size=batch_size,
            num_gpus=2,
            distribution_strategy="mirrored",
            epochs=self.epochs,
            optimizer="adam",
            loss="categorical_crossentropy",
            metrics=["accuracy"],
        )

        metadata = benchmark_util.get_keras_examples_metadata(
            "conv", batch_size
        )
        extras.update(metadata)
        self.report_benchmark(
            wall_time=wall_time, metrics=metrics, extras=extras
        )
    def benchmark_bidirect_lstm_imdb_bs_512_gpu_2(self):
        """Measure performance with batch_size=512, gpu=2 and

        distribution_strategy=`mirrored`.
        """
        batch_size = 512
        metrics, wall_time, extras = benchmark_util.measure_performance(
            self._build_model,
            x=self.imdb_x,
            y=self.imdb_y,
            batch_size=batch_size,
            num_gpus=2,
            distribution_strategy="mirrored",
            optimizer="adam",
            loss="binary_crossentropy",
            metrics=["accuracy"],
        )

        metadata = benchmark_util.get_keras_examples_metadata(
            "bidirectional_lstm", batch_size
        )
        extras.update(metadata)
        self.report_benchmark(
            wall_time=wall_time, metrics=metrics, extras=extras
        )
    def benchmark_custom_training_mnist_bs_512(self):
        """Measure performance with batch_size=512 and run_iters=10."""
        batch_size = 512
        run_iters = 5
        train_dataset = self.train_dataset.shuffle(
            buffer_size=1024).batch(batch_size)

        # Instantiate a loss function.
        loss_fn = tf.keras.losses.CategoricalCrossentropy(
            reduction=tf.keras.losses.Reduction.NONE)
        # Instantiate an optimizer to train the model.
        optimizer = tf.keras.optimizers.Adam()
        model = self._build_model()

        metrics, wall_time = self.measure_performance(
            model,
            train_dataset,
            loss_fn,
            optimizer,
            batch_size,
            run_iters,
            self.epochs,
        )
        extras = benchmark_util.get_keras_examples_metadata(
            "conv", batch_size, ".keras.ctl_graph")
        self.report_benchmark(iters=run_iters,
                              wall_time=wall_time,
                              metrics=metrics,
                              extras=extras)
Ejemplo n.º 6
0
  def benchmark_bidirect_lstm_imdb_bs_512(self):
    """Measure performance with batch_size=512."""
    batch_size = 512
    metrics, wall_time, extras = benchmark_util.measure_performance(
        self._build_model,
        x=self.imdb_x,
        y=self.imdb_y,
        batch_size=batch_size,
        optimizer='adam',
        loss='binary_crossentropy',
        metrics=['accuracy'])

    metadata = benchmark_util.get_keras_examples_metadata(
        'bidirectional_lstm', batch_size)
    extras.update(metadata)
    self.report_benchmark(wall_time=wall_time, metrics=metrics, extras=extras)
Ejemplo n.º 7
0
  def benchmark_cnn_cifar10_bs_512(self):
    """Measure performance with batch_size=512."""
    batch_size = 512
    metrics, wall_time, extras = benchmark_util.measure_performance(
        self._build_model,
        x=self.x_train,
        y=self.y_train,
        batch_size=batch_size,
        epochs=self.epochs,
        optimizer=tf.keras.optimizers.RMSprop(learning_rate=0.0001, decay=1e-6),
        loss='categorical_crossentropy',
        metrics=['accuracy'])

    metadata = benchmark_util.get_keras_examples_metadata('cnn', batch_size)
    extras.update(metadata)
    self.report_benchmark(wall_time=wall_time, metrics=metrics, extras=extras)
Ejemplo n.º 8
0
  def benchmark_text_classification_bs_256(self):
    """Measure performance with batch_size=256."""
    batch_size = 256
    metrics, wall_time, extras = benchmark_util.measure_performance(
        self._build_model,
        x=self.imdb_x,
        y=self.imdb_y,
        batch_size=batch_size,
        optimizer='adam',
        loss='sparse_categorical_crossentropy',
        metrics=['accuracy'])

    metadata = benchmark_util.get_keras_examples_metadata(
        'transformer', batch_size)
    extras.update(metadata)
    self.report_benchmark(wall_time=wall_time, metrics=metrics, extras=extras)
    def benchmark_custom_training_mnist_bs_512_gpu_2(self):
        """Measure performance with batch_size=512, run_iters=10, gpu=2 and

        distribution_strategy='mirrored'.
        """
        batch_size = 512
        run_iters = 10
        train_dataset = self.train_dataset.shuffle(
            buffer_size=1024).batch(batch_size)

        distribution_strategy = "mirrored"

        strategy = distribution_util.get_distribution_strategy(
            distribution_strategy=distribution_strategy, num_gpus=2)

        if distribution_strategy != "off":
            train_dataset = strategy.experimental_distribute_dataset(
                train_dataset)

        strategy_scope = distribution_util.get_strategy_scope(strategy)

        with strategy_scope:
            # Instantiate a loss function.
            loss_fn = tf.keras.losses.CategoricalCrossentropy(
                reduction=tf.keras.losses.Reduction.NONE)
            # Instantiate an optimizer to train the model.
            optimizer = tf.keras.optimizers.Adam()
            model = self._build_model()

        metrics, wall_time = self.measure_performance(
            model,
            train_dataset,
            loss_fn,
            optimizer,
            batch_size,
            run_iters,
            self.epochs,
            strategy,
        )
        extras = benchmark_util.get_keras_examples_metadata(
            "conv", batch_size, ".keras.ctl_graph")
        self.report_benchmark(iters=run_iters,
                              wall_time=wall_time,
                              metrics=metrics,
                              extras=extras)
    def benchmark_hrnn_mnist_bs_1024(self):
        """Measure performance with batch_size=1024."""
        batch_size = 1024
        metrics, wall_time, extras = benchmark_util.measure_performance(
            self._build_model,
            x=self.x_train,
            y=self.y_train,
            batch_size=batch_size,
            optimizer='rmsprop',
            loss='categorical_crossentropy',
            metrics=['accuracy'])

        metadata = benchmark_util.get_keras_examples_metadata(
            'hierarchical_rnn', batch_size)
        extras.update(metadata)
        self.report_benchmark(wall_time=wall_time,
                              metrics=metrics,
                              extras=extras)
Ejemplo n.º 11
0
    def benchmark_irnn_mnist_bs_256(self):
        """Measure performance with batch_size=256."""
        batch_size = 256
        metrics, wall_time, extras = benchmark_util.measure_performance(
            self._build_model,
            x=self.x_train,
            y=self.y_train,
            batch_size=batch_size,
            optimizer=tf.keras.optimizers.RMSprop(
                learning_rate=self.learning_rate),
            loss='categorical_crossentropy',
            metrics=['accuracy'])

        metadata = benchmark_util.get_keras_examples_metadata(
            'irnn', batch_size)
        extras.update(metadata)
        self.report_benchmark(wall_time=wall_time,
                              metrics=metrics,
                              extras=extras)
    def benchmark_text_classification_bs_128(self):
        """Measure performance with batch_size=128."""
        batch_size = 128
        metrics, wall_time, extras = benchmark_util.measure_performance(
            self._build_model,
            x=self.imdb_x,
            y=self.imdb_y,
            batch_size=batch_size,
            optimizer="adam",
            loss="sparse_categorical_crossentropy",
            metrics=["accuracy"],
        )

        metadata = benchmark_util.get_keras_examples_metadata(
            "transformer", batch_size)
        extras.update(metadata)
        self.report_benchmark(wall_time=wall_time,
                              metrics=metrics,
                              extras=extras)
Ejemplo n.º 13
0
    def benchmark_conv_mnist_bs_256(self):
        """Measure performance with batch_size=256."""
        batch_size = 256
        metrics, wall_time, extras = benchmark_util.measure_performance(
            self._build_model,
            x=self.x_train,
            y=self.y_train,
            batch_size=batch_size,
            epochs=self.epochs,
            optimizer='adam',
            loss='categorical_crossentropy',
            metrics=['accuracy'])

        metadata = benchmark_util.get_keras_examples_metadata(
            'conv', batch_size)
        extras.update(metadata)
        self.report_benchmark(wall_time=wall_time,
                              metrics=metrics,
                              extras=extras)
    def benchmark_hrnn_mnist_bs_512(self):
        """Measure performance with batch_size=512."""
        batch_size = 512
        metrics, wall_time, extras = benchmark_util.measure_performance(
            self._build_model,
            x=self.x_train,
            y=self.y_train,
            batch_size=batch_size,
            optimizer="rmsprop",
            loss="categorical_crossentropy",
            metrics=["accuracy"],
        )

        metadata = benchmark_util.get_keras_examples_metadata(
            "hierarchical_rnn", batch_size)
        extras.update(metadata)
        self.report_benchmark(wall_time=wall_time,
                              metrics=metrics,
                              extras=extras)
Ejemplo n.º 15
0
    def benchmark_antirectifier_bs_256(self):
        """Measure performance with batch_size=256."""
        batch_size = 256
        metrics, wall_time, extras = benchmark_util.measure_performance(
            self._build_model,
            x=self.x_train,
            y=self.y_train,
            batch_size=batch_size,
            optimizer="rmsprop",
            loss=tf.keras.losses.SparseCategoricalCrossentropy(
                from_logits=True),
            metrics=["sparse_categorical_accuracy"])

        metadata = benchmark_util.get_keras_examples_metadata(
            "antirectifier", batch_size)
        extras.update(metadata)
        self.report_benchmark(wall_time=wall_time,
                              metrics=metrics,
                              extras=extras)
    def benchmark_bidirect_lstm_imdb_bs_256(self):
        """Measure performance with batch_size=256."""
        batch_size = 256
        metrics, wall_time, extras = benchmark_util.measure_performance(
            self._build_model,
            x=self.imdb_x,
            y=self.imdb_y,
            batch_size=batch_size,
            optimizer="adam",
            loss="binary_crossentropy",
            metrics=["accuracy"],
        )

        metadata = benchmark_util.get_keras_examples_metadata(
            "bidirectional_lstm", batch_size
        )
        extras.update(metadata)
        self.report_benchmark(
            wall_time=wall_time, metrics=metrics, extras=extras
        )
    def benchmark_conv_mnist_bs_128(self):
        """Measure performance with batch_size=128."""
        batch_size = 128
        metrics, wall_time, extras = benchmark_util.measure_performance(
            self._build_model,
            x=self.x_train,
            y=self.y_train,
            batch_size=batch_size,
            epochs=self.epochs,
            optimizer="adam",
            loss="categorical_crossentropy",
            metrics=["accuracy"],
        )

        metadata = benchmark_util.get_keras_examples_metadata(
            "conv", batch_size)
        extras.update(metadata)
        self.report_benchmark(wall_time=wall_time,
                              metrics=metrics,
                              extras=extras)
Ejemplo n.º 18
0
  def benchmark_text_classification_bs_512_gpu_2(self):
    """Measure performance with batch_size=512, gpu=1 and

    distribution_strategy='mirrored'
    """
    batch_size = 512
    metrics, wall_time, extras = benchmark_util.measure_performance(
        self._build_model,
        x=self.imdb_x,
        y=self.imdb_y,
        batch_size=batch_size,
        num_gpus=2,
        distribution_strategy='mirrored',
        optimizer='adam',
        loss='sparse_categorical_crossentropy',
        metrics=['accuracy'])

    metadata = benchmark_util.get_keras_examples_metadata(
        'transformer', batch_size)
    extras.update(metadata)
    self.report_benchmark(wall_time=wall_time, metrics=metrics, extras=extras)
Ejemplo n.º 19
0
    def benchmark_irnn_mnist_bs_1024_gpu_2(self):
        """Measure performance with batch_size=1024, gpu=2 and

    distribution_strategy='mirrored'
    """
        batch_size = 1024
        metrics, wall_time, extras = benchmark_util.measure_performance(
            self._build_model,
            x=self.x_train,
            y=self.y_train,
            batch_size=batch_size,
            num_gpus=2,
            distribution_strategy='mirrored',
            optimizer=tf.keras.optimizers.RMSprop(
                learning_rate=self.learning_rate),
            loss='categorical_crossentropy',
            metrics=['accuracy'])

        metadata = benchmark_util.get_keras_examples_metadata(
            'irnn', batch_size)
        extras.update(metadata)
        self.report_benchmark(wall_time=wall_time,
                              metrics=metrics,
                              extras=extras)
    def benchmark_mlp_reuters_bs_512_gpu_2(self):
        """Measure performance with batch_size=512, gpu=2 and

    distribution_strategy='mirrored'
    """
        batch_size = 512
        metrics, wall_time, extras = benchmark_util.measure_performance(
            self._build_model,
            x=self.x_train,
            y=self.y_train,
            batch_size=batch_size,
            num_gpus=2,
            distribution_strategy='mirrored',
            epochs=self.epochs,
            optimizer='adam',
            loss='categorical_crossentropy',
            metrics=['accuracy'])

        metadata = benchmark_util.get_keras_examples_metadata(
            'mlp', batch_size)
        extras.update(metadata)
        self.report_benchmark(wall_time=wall_time,
                              metrics=metrics,
                              extras=extras)