def benchmark_hrnn_mnist_bs_256(self): """Measure performance with batch_size=256.""" batch_size = 256 metrics, wall_time, extras = benchmark_util.measure_performance( self._build_model, x=self.x_train, y=self.y_train, batch_size=batch_size, optimizer='rmsprop', loss='categorical_crossentropy', metrics=['accuracy']) metadata = benchmark_util.get_keras_examples_metadata( 'hierarchical_rnn', batch_size) extras.update(metadata) self.report_benchmark(wall_time=wall_time, metrics=metrics, extras=extras)
def benchmark_text_classification_bs_256(self): """Measure performance with batch_size=256.""" batch_size = 256 metrics, wall_time, extras = benchmark_util.measure_performance( self._build_model, x=self.imdb_x, y=self.imdb_y, batch_size=batch_size, optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy']) metadata = benchmark_util.get_keras_examples_metadata( 'transformer', batch_size) extras.update(metadata) self.report_benchmark(wall_time=wall_time, metrics=metrics, extras=extras)
def benchmark_bidirect_lstm_imdb_bs_512(self): """Measure performance with batch_size=512.""" batch_size = 512 metrics, wall_time, extras = benchmark_util.measure_performance( self._build_model, x=self.imdb_x, y=self.imdb_y, batch_size=batch_size, optimizer='adam', loss='binary_crossentropy', metrics=['accuracy']) metadata = benchmark_util.get_keras_examples_metadata( 'bidirectional_lstm', batch_size) extras.update(metadata) self.report_benchmark(wall_time=wall_time, metrics=metrics, extras=extras)
def benchmark_text_classification_bs_128(self): """Measure performance with batch_size=128.""" batch_size = 128 metrics, wall_time, extras = benchmark_util.measure_performance( self._build_model, x=self.imdb_x, y=self.imdb_y, batch_size=batch_size, optimizer="adam", loss="sparse_categorical_crossentropy", metrics=["accuracy"], ) metadata = benchmark_util.get_keras_examples_metadata( "transformer", batch_size) extras.update(metadata) self.report_benchmark(wall_time=wall_time, metrics=metrics, extras=extras)
def benchmark_antirectifier_bs_512(self): """Measure performance with batch_size=512.""" batch_size = 512 metrics, wall_time, extras = benchmark_util.measure_performance( self._build_model, x=self.x_train, y=self.y_train, batch_size=batch_size, optimizer="rmsprop", loss=tf.keras.losses.SparseCategoricalCrossentropy( from_logits=True), metrics=["sparse_categorical_accuracy"]) metadata = benchmark_util.get_keras_examples_metadata( "antirectifier", batch_size) extras.update(metadata) self.report_benchmark(wall_time=wall_time, metrics=metrics, extras=extras)
def benchmark_conv_mnist_bs_128(self): """Measure performance with batch_size=128.""" batch_size = 128 metrics, wall_time, extras = benchmark_util.measure_performance( self._build_model, x=self.x_train, y=self.y_train, batch_size=batch_size, epochs=self.epochs, optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy']) metadata = benchmark_util.get_keras_examples_metadata( 'conv', batch_size) extras.update(metadata) self.report_benchmark(wall_time=wall_time, metrics=metrics, extras=extras)
def benchmark_hrnn_mnist_bs_512(self): """Measure performance with batch_size=512.""" batch_size = 512 metrics, wall_time, extras = benchmark_util.measure_performance( self._build_model, x=self.x_train, y=self.y_train, batch_size=batch_size, optimizer="rmsprop", loss="categorical_crossentropy", metrics=["accuracy"], ) metadata = benchmark_util.get_keras_examples_metadata( "hierarchical_rnn", batch_size) extras.update(metadata) self.report_benchmark(wall_time=wall_time, metrics=metrics, extras=extras)
def benchmark_irnn_mnist_bs_512(self): """Measure performance with batch_size=512.""" batch_size = 512 metrics, wall_time, extras = benchmark_util.measure_performance( self._build_model, x=self.x_train, y=self.y_train, batch_size=batch_size, optimizer=tf.keras.optimizers.RMSprop( learning_rate=self.learning_rate), loss='categorical_crossentropy', metrics=['accuracy']) metadata = benchmark_util.get_keras_examples_metadata( 'irnn', batch_size) extras.update(metadata) self.report_benchmark(wall_time=wall_time, metrics=metrics, extras=extras)
def benchmark_conv_mnist_bs_256(self): """Measure performance with batch_size=256.""" batch_size = 256 metrics, wall_time, extras = benchmark_util.measure_performance( self._build_model, x=self.x_train, y=self.y_train, batch_size=batch_size, epochs=self.epochs, optimizer="adam", loss="categorical_crossentropy", metrics=["accuracy"], ) metadata = benchmark_util.get_keras_examples_metadata( "conv", batch_size) extras.update(metadata) self.report_benchmark(wall_time=wall_time, metrics=metrics, extras=extras)
def benchmark_text_classification_bs_512_gpu_2(self): """Measure performance with batch_size=512, gpu=1 and distribution_strategy='mirrored' """ batch_size = 512 metrics, wall_time, extras = benchmark_util.measure_performance( self._build_model, x=self.imdb_x, y=self.imdb_y, batch_size=batch_size, num_gpus=2, distribution_strategy='mirrored', optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy']) self.report_benchmark(wall_time=wall_time, metrics=metrics, extras=extras)
def benchmark_cnn_cifar10_bs_256(self): """Measure performance with batch_size=256.""" batch_size = 256 metrics, wall_time, extras = benchmark_util.measure_performance( self._build_model, x=self.x_train, y=self.y_train, batch_size=batch_size, epochs=self.epochs, optimizer=tf.keras.optimizers.RMSprop(learning_rate=0.0001, decay=1e-6), loss='categorical_crossentropy', metrics=['accuracy']) metadata = benchmark_util.get_keras_examples_metadata( 'cnn', batch_size) extras.update(metadata) self.report_benchmark(wall_time=wall_time, metrics=metrics, extras=extras)
def benchmark_bidirect_lstm_imdb_bs_256(self): """Measure performance with batch_size=256.""" batch_size = 256 metrics, wall_time, extras = benchmark_util.measure_performance( self._build_model, x=self.imdb_x, y=self.imdb_y, batch_size=batch_size, optimizer="adam", loss="binary_crossentropy", metrics=["accuracy"], ) metadata = benchmark_util.get_keras_examples_metadata( "bidirectional_lstm", batch_size ) extras.update(metadata) self.report_benchmark( wall_time=wall_time, metrics=metrics, extras=extras )
def benchmark_bidirect_lstm_imdb_bs_512_gpu_2(self): """Measure performance with batch_size=512, gpu=2 and distribution_strategy=`mirrored`. """ batch_size = 512 metrics, wall_time, extras = benchmark_util.measure_performance( self._build_model, x=self.imdb_x, y=self.imdb_y, batch_size=batch_size, num_gpus=2, distribution_strategy='mirrored', optimizer='adam', loss='binary_crossentropy', metrics=['accuracy']) self.report_benchmark(wall_time=wall_time, metrics=metrics, extras=extras)
def benchmark_antirectifier_bs_512_gpu_2(self): """Measure performance with batch_size=512, gpu=2 and distribution_strategy=`mirrored`. """ batch_size = 512 metrics, wall_time, extras = benchmark_util.measure_performance( self._build_model, x=self.x_train, y=self.y_train, batch_size=batch_size, num_gpus=2, distribution_strategy="mirrored", optimizer="rmsprop", loss=tf.keras.losses.SparseCategoricalCrossentropy( from_logits=True), metrics=["sparse_categorical_accuracy"]) self.report_benchmark(wall_time=wall_time, metrics=metrics, extras=extras)
def benchmark_conv_mnist_bs_512_gpu_2(self): """Measure performance with batch_size=512, gpu=2 and distribution_strategy='mirrored' """ batch_size = 512 metrics, wall_time, extras = benchmark_util.measure_performance( self._build_model, x=self.x_train, y=self.y_train, batch_size=batch_size, num_gpus=2, distribution_strategy='mirrored', epochs=self.epochs, optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy']) self.report_benchmark(wall_time=wall_time, metrics=metrics, extras=extras)
def benchmark_cnn_cifar10_bs_1024_gpu_2(self): """Measure performance with batch_size=1024, gpu=2 and distribution_strategy=`mirrored`. """ batch_size = 1024 metrics, wall_time, extras = benchmark_util.measure_performance( self._build_model, x=self.x_train, y=self.y_train, batch_size=batch_size, num_gpus=2, distribution_strategy='mirrored', epochs=self.epochs, optimizer=tf.keras.optimizers.RMSprop(learning_rate=0.0001, decay=1e-6), loss='categorical_crossentropy', metrics=['accuracy']) self.report_benchmark(wall_time=wall_time, metrics=metrics, extras=extras)
def benchmark_hrnn_mnist_bs_1024_gpu_2(self): """Measure performance with batch_size=1024, gpu=2 and distribution_strategy='mirrored' """ batch_size = 1024 metrics, wall_time, extras = benchmark_util.measure_performance( self._build_model, x=self.x_train, y=self.y_train, batch_size=batch_size, num_gpus=2, distribution_strategy='mirrored', optimizer='rmsprop', loss='categorical_crossentropy', metrics=['accuracy']) metadata = benchmark_util.get_keras_examples_metadata( 'hierarchical_rnn', batch_size) extras.update(metadata) self.report_benchmark(wall_time=wall_time, metrics=metrics, extras=extras)
def benchmark_text_classification_bs_512_gpu_2(self): """Measure performance with batch_size=512, gpu=1 and distribution_strategy='mirrored' """ batch_size = 512 metrics, wall_time, extras = benchmark_util.measure_performance( self._build_model, x=self.imdb_x, y=self.imdb_y, batch_size=batch_size, num_gpus=2, distribution_strategy="mirrored", optimizer="adam", loss="sparse_categorical_crossentropy", metrics=["accuracy"], ) metadata = benchmark_util.get_keras_examples_metadata( "transformer", batch_size) extras.update(metadata) self.report_benchmark(wall_time=wall_time, metrics=metrics, extras=extras)
def benchmark_mlp_reuters_bs_512_gpu_2(self): """Measure performance with batch_size=512, gpu=2 and distribution_strategy='mirrored' """ batch_size = 512 metrics, wall_time, extras = benchmark_util.measure_performance( self._build_model, x=self.x_train, y=self.y_train, batch_size=batch_size, num_gpus=2, distribution_strategy='mirrored', epochs=self.epochs, optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy']) metadata = benchmark_util.get_keras_examples_metadata( 'mlp', batch_size) extras.update(metadata) self.report_benchmark(wall_time=wall_time, metrics=metrics, extras=extras)