def benchmark_optimizer(self, optimizer, num_iters):
        """Optimizer benchmark with Bidirectional LSTM model on IMDB data.

    Args:
      optimizer: The optimizer instance to be benchmarked.
      num_iters: The number of iterations to run for performance measurement.
    """
        model, train_x, train_y = bidirect_imdb_lstm_config()
        metrics, wall_time, extras = benchmark_util.measure_performance(
            model,
            x=train_x,
            y=train_y,
            batch_size=512,
            optimizer=optimizer,
            loss="binary_crossentropy",
            metrics=["accuracy"])
        name = benchmark_util.get_benchmark_name(self._get_name())
        metadata = {
            "implementation": name[0],
            "model_name": "optimizers",
            "parameters": "lstm.512",
        }
        extras.update(metadata)
        self.report_benchmark(iters=num_iters,
                              wall_time=wall_time,
                              metrics=metrics,
                              extras=extras)
    def benchmark_cnn_cifar10_bs_1024_gpu_2(self):
        """Measure performance with batch_size=1024, gpu=2 and

    distribution_strategy=`mirrored`.
    """
        batch_size = 1024
        metrics, wall_time, extras = benchmark_util.measure_performance(
            self._build_model,
            x=self.x_train,
            y=self.y_train,
            batch_size=batch_size,
            num_gpus=2,
            distribution_strategy='mirrored',
            epochs=self.epochs,
            optimizer=tf.keras.optimizers.RMSprop(learning_rate=0.0001,
                                                  decay=1e-6),
            loss='categorical_crossentropy',
            metrics=['accuracy'])

        metadata = benchmark_util.get_keras_examples_metadata(
            'cnn', batch_size)
        extras.update(metadata)
        self.report_benchmark(wall_time=wall_time,
                              metrics=metrics,
                              extras=extras)
Пример #3
0
  def benchmark_irnn_mnist_bs_1024(self):
    """Measure performance with batch_size=1024."""
    batch_size = 1024
    metrics, wall_time, extras = benchmark_util.measure_performance(
        self._build_model,
        x=self.x_train,
        y=self.y_train,
        batch_size=batch_size,
        optimizer=tf.keras.optimizers.RMSprop(learning_rate=self.learning_rate),
        loss='categorical_crossentropy',
        metrics=['accuracy'])

    self.report_benchmark(wall_time=wall_time, metrics=metrics, extras=extras)
Пример #4
0
 def benchmark_mnist_convnet(self, batch_size, run_iters):
   """Benchmark for Convnet model on synthetic mnist data."""
   convnet_x = np.random.random((5000, 28, 28, 1))
   convnet_y = np.random.random((5000, 10))
   metrics, wall_time, extras = benchmark_util.measure_performance(
       self._mnist_convnet,
       x=convnet_x,
       y=convnet_y,
       batch_size=batch_size,
       run_iters=run_iters,
       optimizer=_OPTIMIZER,
       loss=_LOSS)
   self.report_benchmark(
       iters=run_iters, wall_time=wall_time, metrics=metrics, extras=extras)
Пример #5
0
 def benchmark_imdb_lstm(self, batch_size, run_iters):
   """Benchmark for LSTM model on synthetic imdb review dataset."""
   lstm_x = np.random.randint(0, 1999, size=(2500, 100))
   lstm_y = np.random.random((2500, 1))
   metrics, wall_time, extras = benchmark_util.measure_performance(
       self._imdb_lstm,
       x=lstm_x,
       y=lstm_y,
       batch_size=batch_size,
       run_iters=run_iters,
       optimizer=_OPTIMIZER,
       loss=_LOSS)
   self.report_benchmark(
       iters=run_iters, wall_time=wall_time, metrics=metrics, extras=extras)
Пример #6
0
  def benchmark_mlp_reuters_bs_512(self):
    """Measure performance with batch_size=512."""
    batch_size = 512
    metrics, wall_time, extras = benchmark_util.measure_performance(
        self._build_model,
        x=self.x_train,
        y=self.y_train,
        batch_size=batch_size,
        epochs=self.epochs,
        optimizer='adam',
        loss='categorical_crossentropy',
        metrics=['accuracy'])

    self.report_benchmark(wall_time=wall_time, metrics=metrics, extras=extras)
Пример #7
0
 def benchmark_mnist_mlp(self, batch_size, run_iters):
   """Benchmark for MLP model on synthetic mnist data."""
   mlp_x = np.random.random((5000, 784))
   mlp_y = np.random.random((5000, 10))
   metrics, wall_time, extras = benchmark_util.measure_performance(
       self._mnist_mlp,
       x=mlp_x,
       y=mlp_y,
       batch_size=batch_size,
       run_iters=run_iters,
       optimizer=_OPTIMIZER,
       loss=_LOSS)
   self.report_benchmark(
       iters=run_iters, wall_time=wall_time, metrics=metrics, extras=extras)
Пример #8
0
    def benchmark_bidirect_lstm_imdb_bs_512(self):
        """Measure performance with batch_size=512."""
        batch_size = 512
        metrics, wall_time, extras = benchmark_util.measure_performance(
            self._build_model,
            x=self.imdb_x,
            y=self.imdb_y,
            batch_size=batch_size,
            optimizer='adam',
            loss='binary_crossentropy',
            metrics=['accuracy'])

        self.report_benchmark(wall_time=wall_time,
                              metrics=metrics,
                              extras=extras)
    def benchmark_hrnn_mnist_bs_256(self):
        """Measure performance with batch_size=256."""
        batch_size = 256
        metrics, wall_time, extras = benchmark_util.measure_performance(
            self._build_model,
            x=self.x_train,
            y=self.y_train,
            batch_size=batch_size,
            optimizer='rmsprop',
            loss='categorical_crossentropy',
            metrics=['accuracy'])

        self.report_benchmark(wall_time=wall_time,
                              metrics=metrics,
                              extras=extras)
    def benchmark_text_classification_bs_512(self):
        """Measure performance with batch_size=512."""
        batch_size = 512
        metrics, wall_time, extras = benchmark_util.measure_performance(
            self._build_model,
            x=self.imdb_x,
            y=self.imdb_y,
            batch_size=batch_size,
            optimizer='adam',
            loss='sparse_categorical_crossentropy',
            metrics=['accuracy'])

        self.report_benchmark(wall_time=wall_time,
                              metrics=metrics,
                              extras=extras)
  def benchmark_text_classification_bs_256(self):
    """Measure performance with batch_size=256."""
    batch_size = 256
    metrics, wall_time, extras = benchmark_util.measure_performance(
        self._build_model,
        x=self.imdb_x,
        y=self.imdb_y,
        batch_size=batch_size,
        optimizer='adam',
        loss='sparse_categorical_crossentropy',
        metrics=['accuracy'])

    metadata = benchmark_util.get_keras_examples_metadata(
        'transformer', batch_size)
    extras.update(metadata)
    self.report_benchmark(wall_time=wall_time, metrics=metrics, extras=extras)
Пример #12
0
  def benchmark_antirectifier_bs_512(self):
    """Measure performance with batch_size=512."""
    batch_size = 512
    metrics, wall_time, extras = benchmark_util.measure_performance(
        self._build_model,
        x=self.x_train,
        y=self.y_train,
        batch_size=batch_size,
        optimizer="rmsprop",
        loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
        metrics=["sparse_categorical_accuracy"])

    metadata = benchmark_util.get_keras_examples_metadata(
        "antirectifier", batch_size)
    extras.update(metadata)
    self.report_benchmark(wall_time=wall_time, metrics=metrics, extras=extras)
    def benchmark_antirectifier_bs_256(self):
        """Measure performance with batch_size=256."""
        batch_size = 256
        metrics, wall_time, extras = benchmark_util.measure_performance(
            self._build_model,
            x=self.x_train,
            y=self.y_train,
            batch_size=batch_size,
            optimizer="rmsprop",
            loss=tf.keras.losses.SparseCategoricalCrossentropy(
                from_logits=True),
            metrics=["sparse_categorical_accuracy"])

        self.report_benchmark(wall_time=wall_time,
                              metrics=metrics,
                              extras=extras)
Пример #14
0
  def benchmark_conv_mnist_bs_256(self):
    """Measure performance with batch_size=256 and run_iters=3."""
    batch_size = 256
    run_iters = 3
    metrics, wall_time, extras = benchmark_util.measure_performance(
        self._build_model,
        x=self.x_train,
        y=self.y_train,
        batch_size=batch_size,
        run_iters=run_iters,
        epochs=self.epochs,
        optimizer='adam',
        loss='categorical_crossentropy',
        metrics=['accuracy'])

    self.report_benchmark(
        iters=run_iters, wall_time=wall_time, metrics=metrics, extras=extras)
Пример #15
0
    def benchmark_cnn_cifar10_bs_512(self):
        """Measure performance with batch_size=512."""
        batch_size = 512
        metrics, wall_time, extras = benchmark_util.measure_performance(
            self._build_model,
            x=self.x_train,
            y=self.y_train,
            batch_size=batch_size,
            epochs=self.epochs,
            optimizer=tf.keras.optimizers.RMSprop(learning_rate=0.0001,
                                                  decay=1e-6),
            loss='categorical_crossentropy',
            metrics=['accuracy'])

        self.report_benchmark(wall_time=wall_time,
                              metrics=metrics,
                              extras=extras)
    def benchmark_hrnn_mnist_bs_1024(self):
        """Measure performance with batch_size=1024."""
        batch_size = 1024
        metrics, wall_time, extras = benchmark_util.measure_performance(
            self._build_model,
            x=self.x_train,
            y=self.y_train,
            batch_size=batch_size,
            optimizer='rmsprop',
            loss='categorical_crossentropy',
            metrics=['accuracy'])

        metadata = benchmark_util.get_keras_examples_metadata(
            'hierarchical_rnn', batch_size)
        extras.update(metadata)
        self.report_benchmark(wall_time=wall_time,
                              metrics=metrics,
                              extras=extras)
    def benchmark_bidirect_lstm_imdb_bs_256(self):
        """Measure performance with batch_size=256."""
        batch_size = 256
        metrics, wall_time, extras = benchmark_util.measure_performance(
            self._build_model,
            x=self.imdb_x,
            y=self.imdb_y,
            batch_size=batch_size,
            optimizer='adam',
            loss='binary_crossentropy',
            metrics=['accuracy'])

        metadata = benchmark_util.get_keras_examples_metadata(
            'bidirectional_lstm', batch_size)
        extras.update(metadata)
        self.report_benchmark(wall_time=wall_time,
                              metrics=metrics,
                              extras=extras)
Пример #18
0
    def benchmark_mlp_reuters_bs_256(self):
        """Measure performance with batch_size=256."""
        batch_size = 256
        metrics, wall_time, extras = benchmark_util.measure_performance(
            self._build_model,
            x=self.x_train,
            y=self.y_train,
            batch_size=batch_size,
            epochs=self.epochs,
            optimizer='adam',
            loss='categorical_crossentropy',
            metrics=['accuracy'])

        metadata = benchmark_util.get_keras_examples_metadata(
            'mlp', batch_size)
        extras.update(metadata)
        self.report_benchmark(wall_time=wall_time,
                              metrics=metrics,
                              extras=extras)
Пример #19
0
    def benchmark_irnn_mnist_bs_512(self):
        """Measure performance with batch_size=512."""
        batch_size = 512
        metrics, wall_time, extras = benchmark_util.measure_performance(
            self._build_model,
            x=self.x_train,
            y=self.y_train,
            batch_size=batch_size,
            optimizer=tf.keras.optimizers.RMSprop(
                learning_rate=self.learning_rate),
            loss='categorical_crossentropy',
            metrics=['accuracy'])

        metadata = benchmark_util.get_keras_examples_metadata(
            'irnn', batch_size)
        extras.update(metadata)
        self.report_benchmark(wall_time=wall_time,
                              metrics=metrics,
                              extras=extras)
    def benchmark_pixel_cnn_bs_512(self):
        """Measure performance with batch_size=512 and run_iters=4."""
        batch_size = 512
        run_iters = 4
        metrics, wall_time, extras = benchmark_util.measure_performance(
            self._build_model,
            x=self.x_train,
            y=self.y_train,
            batch_size=batch_size,
            run_iters=run_iters,
            optimizer="rmsprop",
            loss=tf.keras.losses.SparseCategoricalCrossentropy(
                from_logits=True),
            metrics=["sparse_categorical_accuracy"])

        self.report_benchmark(iters=run_iters,
                              wall_time=wall_time,
                              metrics=metrics,
                              extras=extras)
Пример #21
0
    def benchmark_bidirect_lstm_imdb_bs_512_gpu_2(self):
        """Measure performance with batch_size=512, gpu=2 and

    distribution_strategy=`mirrored`.
    """
        batch_size = 512
        metrics, wall_time, extras = benchmark_util.measure_performance(
            self._build_model,
            x=self.imdb_x,
            y=self.imdb_y,
            batch_size=batch_size,
            num_gpus=2,
            distribution_strategy='mirrored',
            optimizer='adam',
            loss='binary_crossentropy',
            metrics=['accuracy'])

        self.report_benchmark(wall_time=wall_time,
                              metrics=metrics,
                              extras=extras)
    def benchmark_hrnn_mnist_bs_1024_gpu_2(self):
        """Measure performance with batch_size=1024, gpu=2 and

    distribution_strategy='mirrored'
    """
        batch_size = 1024
        metrics, wall_time, extras = benchmark_util.measure_performance(
            self._build_model,
            x=self.x_train,
            y=self.y_train,
            batch_size=batch_size,
            num_gpus=2,
            distribution_strategy='mirrored',
            optimizer='rmsprop',
            loss='categorical_crossentropy',
            metrics=['accuracy'])

        self.report_benchmark(wall_time=wall_time,
                              metrics=metrics,
                              extras=extras)
Пример #23
0
    def benchmark_bidirect_lstm_imdb(self, batch_size, run_iters):
        """Benchmark for Bidirectional LSTM on IMDB."""
        # Load dataset.
        (x_train,
         y_train), _ = tf.keras.datasets.imdb.load_data(num_words=_MAX_FEATURE)
        x_train = tf.keras.preprocessing.sequence.pad_sequences(
            x_train, maxlen=_MAX_LEN)
        results = benchmark_util.measure_performance(
            self._lstm_imdb_model,
            x=x_train,
            y=y_train,
            batch_size=batch_size,
            run_iters=run_iters,
            optimizer='adam',
            loss='binary_crossentropy',
            metrics=['accuracy'])

        self.report_benchmark(iters=run_iters,
                              wall_time=results['wall_time'],
                              extras=results)
  def benchmark_text_classification_bs_512_gpu_2(self):
    """Measure performance with batch_size=512, gpu=1 and

    distribution_strategy='mirrored'
    """
    batch_size = 512
    metrics, wall_time, extras = benchmark_util.measure_performance(
        self._build_model,
        x=self.imdb_x,
        y=self.imdb_y,
        batch_size=batch_size,
        num_gpus=2,
        distribution_strategy='mirrored',
        optimizer='adam',
        loss='sparse_categorical_crossentropy',
        metrics=['accuracy'])

    metadata = benchmark_util.get_keras_examples_metadata(
        'transformer', batch_size)
    extras.update(metadata)
    self.report_benchmark(wall_time=wall_time, metrics=metrics, extras=extras)
    def benchmark_antirectifier_bs_512_gpu_2(self):
        """Measure performance with batch_size=512, gpu=2 and

    distribution_strategy=`mirrored`.
    """
        batch_size = 512
        metrics, wall_time, extras = benchmark_util.measure_performance(
            self._build_model,
            x=self.x_train,
            y=self.y_train,
            batch_size=batch_size,
            num_gpus=2,
            distribution_strategy="mirrored",
            optimizer="rmsprop",
            loss=tf.keras.losses.SparseCategoricalCrossentropy(
                from_logits=True),
            metrics=["sparse_categorical_accuracy"])

        self.report_benchmark(wall_time=wall_time,
                              metrics=metrics,
                              extras=extras)
Пример #26
0
  def benchmark_conv_mnist_bs_256_gpu_2(self):
    """Measure performance with batch_size=256, run_iters=3, gpu=2 and

    distribution_strategy='mirrored'
    """
    batch_size = 256
    run_iters = 3
    metrics, wall_time, extras = benchmark_util.measure_performance(
        self._build_model,
        x=self.x_train,
        y=self.y_train,
        batch_size=batch_size,
        run_iters=run_iters,
        num_gpus=2,
        distribution_strategy='mirrored',
        epochs=self.epochs,
        optimizer='adam',
        loss='categorical_crossentropy',
        metrics=['accuracy'])

    self.report_benchmark(
        iters=run_iters, wall_time=wall_time, metrics=metrics, extras=extras)
Пример #27
0
    def benchmark_text_classification_bs_512_gpu_2(self):
        """Measure performance with batch_size=512, run_iters=4, gpu=1 and

    distribution_strategy='mirrored'
    """
        batch_size = 512
        run_iters = 4
        metrics, wall_time, extras = benchmark_util.measure_performance(
            self._build_model,
            x=self.imdb_x,
            y=self.imdb_y,
            batch_size=batch_size,
            run_iters=run_iters,
            num_gpus=2,
            distribution_strategy='mirrored',
            optimizer='adam',
            loss='sparse_categorical_crossentropy',
            metrics=['accuracy'])

        self.report_benchmark(iters=run_iters,
                              wall_time=wall_time,
                              metrics=metrics,
                              extras=extras)
Пример #28
0
    def benchmark_irnn_mnist_bs_1024_gpu_3(self):
        """Measure performance with batch_size=1024, run_iters=3, gpu=3 and

    distribution_strategy='mirrored'
    """
        batch_size = 1024
        run_iters = 3
        metrics, wall_time, extras = benchmark_util.measure_performance(
            self._build_model,
            x=self.x_train,
            y=self.y_train,
            batch_size=batch_size,
            run_iters=run_iters,
            num_gpus=3,
            distribution_strategy='mirrored',
            optimizer=tf.keras.optimizers.RMSprop(
                learning_rate=self.learning_rate),
            loss='categorical_crossentropy',
            metrics=['accuracy'])

        self.report_benchmark(iters=run_iters,
                              wall_time=wall_time,
                              metrics=metrics,
                              extras=extras)
Пример #29
0
    def benchmark_mlp_reuters_bs_512_gpu_2(self):
        """Measure performance with batch_size=512, gpu=2 and

    distribution_strategy='mirrored'
    """
        batch_size = 512
        metrics, wall_time, extras = benchmark_util.measure_performance(
            self._build_model,
            x=self.x_train,
            y=self.y_train,
            batch_size=batch_size,
            num_gpus=2,
            distribution_strategy='mirrored',
            epochs=self.epochs,
            optimizer='adam',
            loss='categorical_crossentropy',
            metrics=['accuracy'])

        metadata = benchmark_util.get_keras_examples_metadata(
            'mlp', batch_size)
        extras.update(metadata)
        self.report_benchmark(wall_time=wall_time,
                              metrics=metrics,
                              extras=extras)