コード例 #1
0
def main(argv):
  del argv

  named_strategy = (
      ds_utils.named_strategies.get(FLAGS.strategy) if FLAGS.strategy else None)
  strategy = named_strategy.strategy if named_strategy else None

  with MaybeDistributionScope(strategy):
    feature_extractor = make_feature_extractor(
        FLAGS.input_saved_model_dir,
        FLAGS.retrain,
        FLAGS.regularization_loss_multiplier)
    model = make_classifier(feature_extractor)

    model.compile(loss=tf.keras.losses.categorical_crossentropy,
                  optimizer=tf.keras.optimizers.SGD(),
                  metrics=['accuracy'])

  # Train the classifier (possibly on a different dataset).
  (x_train, y_train), (x_test, y_test) = mnist_util.load_reshaped_data(
      use_fashion_mnist=FLAGS.use_fashion_mnist,
      fake_tiny_data=FLAGS.fast_test_mode)
  print('Training on %s with %d trainable and %d untrainable variables.' %
        ('Fashion MNIST' if FLAGS.use_fashion_mnist else 'MNIST',
         len(model.trainable_variables), len(model.non_trainable_variables)))
  model.fit(x_train, y_train,
            batch_size=128,
            epochs=FLAGS.epochs,
            verbose=1,
            validation_data=(x_test, y_test))

  if FLAGS.output_saved_model_dir and FLAGS.output_saved_model_dir != 'None':
    tf.saved_model.save(model, FLAGS.output_saved_model_dir)
コード例 #2
0
ファイル: export_mnist_cnn.py プロジェクト: flavz27/master_PA
def main(argv):
    del argv

    # Build a complete classifier model using a feature extractor.
    default_hparams = dict(dropout_rate=0.25)
    l2_strength = 0.01  # Not a hparam for inputs -> outputs.
    feature_extractor = make_feature_extractor(l2_strength=l2_strength,
                                               **default_hparams)
    classifier = make_classifier(feature_extractor, l2_strength=l2_strength)

    # Train the complete model.
    (x_train, y_train), (x_test, y_test) = mnist_util.load_reshaped_data(
        fake_tiny_data=FLAGS.fast_test_mode)
    classifier.compile(loss=tf.keras.losses.categorical_crossentropy,
                       optimizer=tf.keras.optimizers.SGD(),
                       metrics=['accuracy'])
    classifier.fit(x_train,
                   y_train,
                   batch_size=128,
                   epochs=FLAGS.epochs,
                   verbose=1,
                   validation_data=(x_test, y_test))

    # Save the feature extractor to a framework-agnostic SavedModel for reuse.
    # Note that the feature_extractor object has not been compiled or fitted,
    # so it does not contain an optimizer and related state.
    exportable = wrap_keras_model_for_export(feature_extractor,
                                             (None, ) + mnist_util.INPUT_SHAPE,
                                             set_feature_extractor_hparams,
                                             default_hparams)
    tf.saved_model.save(exportable, FLAGS.export_dir)
コード例 #3
0
def main(argv):
    del argv

    # Build a complete classifier model using a feature extractor.
    feature_extractor = make_feature_extractor()
    classifier = make_classifier(feature_extractor)

    # Train the complete model
    (x_train, y_train), (x_test, y_test) = mnist_util.load_reshaped_data(
        fake_tiny_data=FLAGS.fast_test_mode)
    classifier.compile(loss=tf.keras.losses.categorical_crossentropy,
                       optimizer=tf.keras.optimizers.SGD(),
                       metrics=['accuracy'])
    classifier.fit(x_train,
                   y_train,
                   batch_size=128,
                   epochs=FLAGS.epochs,
                   verbose=1,
                   validation_data=(x_test, y_test))

    # Save the feature extractor to a framework-agnostic SavedModel for reuse.
    # Note that the feature_extractor object has not been compiled or fitted,
    # so it does not contain an optimizer and related state.
    exportable = wrap_keras_model_for_export(feature_extractor)
    tf.saved_model.save(exportable, FLAGS.export_dir)
コード例 #4
0
def main(argv):
  del argv

  if FLAGS.use_mirrored_strategy:
    strategy = tf.distribute.MirroredStrategy()
  else:
    strategy = tf.distribute.get_strategy()

  with strategy.scope():
    feature_extractor = make_feature_extractor(
        FLAGS.input_saved_model_dir,
        FLAGS.retrain,
        FLAGS.regularization_loss_multiplier)
    model = make_classifier(feature_extractor,
                            dropout_rate=0.0)  # TODO(b/134660903): Remove.

    model.compile(loss=tf.keras.losses.categorical_crossentropy,
                  optimizer=tf.keras.optimizers.SGD(),
                  metrics=['accuracy'])

  # Train the classifier (possibly on a different dataset).
  (x_train, y_train), (x_test, y_test) = mnist_util.load_reshaped_data(
      use_fashion_mnist=FLAGS.use_fashion_mnist,
      fake_tiny_data=FLAGS.fast_test_mode)
  print('Training on %s with %d trainable and %d untrainable variables.' %
        ('Fashion MNIST' if FLAGS.use_fashion_mnist else 'MNIST',
         len(model.trainable_variables), len(model.non_trainable_variables)))
  model.fit(x_train, y_train,
            batch_size=128,
            epochs=FLAGS.epochs,
            verbose=1,
            validation_data=(x_test, y_test))

  if FLAGS.output_saved_model_dir:
    tf.saved_model.save(model, FLAGS.output_saved_model_dir)
コード例 #5
0
def main(argv):
    del argv

    # Load a pre-trained feature extractor and wrap it for use in Keras.
    obj = tf.saved_model.load(FLAGS.export_dir)
    scale_regularization_losses(obj, FLAGS.regularization_loss_multiplier)
    arguments = {}
    if FLAGS.dropout_rate is not None:
        arguments['dropout_rate'] = FLAGS.dropout_rate
    feature_extractor = util.CustomLayer(obj,
                                         output_shape=[10],
                                         trainable=FLAGS.retrain,
                                         arguments=arguments)

    # Build a classifier with it.
    model = make_classifier(feature_extractor)

    # Train the classifier (possibly on a different dataset).
    (x_train, y_train), (x_test, y_test) = mnist_util.load_reshaped_data(
        use_fashion_mnist=FLAGS.use_fashion_mnist,
        fake_tiny_data=FLAGS.fast_test_mode)
    model.compile(loss=tf.keras.losses.categorical_crossentropy,
                  optimizer=tf.keras.optimizers.SGD(),
                  metrics=['accuracy'])
    print('Training on %s with %d trainable and %d untrainable variables.' %
          ('Fashion MNIST' if FLAGS.use_fashion_mnist else 'MNIST',
           len(model.trainable_variables), len(model.non_trainable_variables)))
    model.fit(x_train,
              y_train,
              batch_size=128,
              epochs=FLAGS.epochs,
              verbose=1,
              validation_data=(x_test, y_test))
コード例 #6
0
ファイル: use_mnist_cnn.py プロジェクト: perfmjs/tensorflow
def main(argv):
  del argv

  # Load a pre-trained feature extractor and wrap it for use in Keras.
  obj = tf.saved_model.load(FLAGS.export_dir)
  scale_regularization_losses(obj, FLAGS.regularization_loss_multiplier)
  arguments = {}
  if FLAGS.dropout_rate is not None:
    arguments['dropout_rate'] = FLAGS.dropout_rate
  feature_extractor = util.CustomLayer(obj, output_shape=[10],
                                       trainable=FLAGS.retrain,
                                       arguments=arguments)

  # Build a classifier with it.
  model = make_classifier(feature_extractor)

  # Train the classifier (possibly on a different dataset).
  (x_train, y_train), (x_test, y_test) = mnist_util.load_reshaped_data(
      use_fashion_mnist=FLAGS.use_fashion_mnist,
      fake_tiny_data=FLAGS.fast_test_mode)
  model.compile(loss=tf.keras.losses.categorical_crossentropy,
                optimizer=tf.keras.optimizers.SGD(),
                metrics=['accuracy'])
  print('Training on %s with %d trainable and %d untrainable variables.' %
        ('Fashion MNIST' if FLAGS.use_fashion_mnist else 'MNIST',
         len(model.trainable_variables), len(model.non_trainable_variables)))
  model.fit(x_train, y_train,
            batch_size=128,
            epochs=FLAGS.epochs,
            verbose=1,
            validation_data=(x_test, y_test))
コード例 #7
0
def main(argv):
  del argv

  if FLAGS.use_mirrored_strategy:
    strategy = tf.distribute.MirroredStrategy()
  else:
    strategy = tf.distribute.get_strategy()

  with strategy.scope():
    feature_extractor = make_feature_extractor(
        FLAGS.export_dir,
        FLAGS.retrain,
        FLAGS.regularization_loss_multiplier)
    model = make_classifier(feature_extractor)

    model.compile(loss=tf.keras.losses.categorical_crossentropy,
                  optimizer=tf.keras.optimizers.SGD(),
                  metrics=['accuracy'])

  # Train the classifier (possibly on a different dataset).
  (x_train, y_train), (x_test, y_test) = mnist_util.load_reshaped_data(
      use_fashion_mnist=FLAGS.use_fashion_mnist,
      fake_tiny_data=FLAGS.fast_test_mode)
  print('Training on %s with %d trainable and %d untrainable variables.' %
        ('Fashion MNIST' if FLAGS.use_fashion_mnist else 'MNIST',
         len(model.trainable_variables), len(model.non_trainable_variables)))
  model.fit(x_train, y_train,
            batch_size=128,
            epochs=FLAGS.epochs,
            verbose=1,
            validation_data=(x_test, y_test))
コード例 #8
0
def main(argv):
  del argv

  # Build a complete classifier model using a feature extractor.
  default_hparams = dict(dropout_rate=0.25)
  l2_strength = 0.01  # Not a hparam for inputs -> outputs.
  feature_extractor = make_feature_extractor(l2_strength=l2_strength,
                                             **default_hparams)
  classifier = make_classifier(feature_extractor, l2_strength=l2_strength)

  # Train the complete model.
  (x_train, y_train), (x_test, y_test) = mnist_util.load_reshaped_data(
      fake_tiny_data=FLAGS.fast_test_mode)
  classifier.compile(loss=tf.keras.losses.categorical_crossentropy,
                     optimizer=tf.keras.optimizers.SGD(),
                     metrics=['accuracy'])
  classifier.fit(x_train, y_train,
                 batch_size=128,
                 epochs=FLAGS.epochs,
                 verbose=1,
                 validation_data=(x_test, y_test))

  # Save the feature extractor to a framework-agnostic SavedModel for reuse.
  # Note that the feature_extractor object has not been compiled or fitted,
  # so it does not contain an optimizer and related state.
  exportable = wrap_keras_model_for_export(feature_extractor,
                                           (None,) + mnist_util.INPUT_SHAPE,
                                           set_feature_extractor_hparams,
                                           default_hparams)
  tf.saved_model.save(exportable, FLAGS.export_dir)
コード例 #9
0
def main(argv):
    del argv

    # Load a pre-trained feature extractor and wrap it for use in Keras.
    obj = tf.saved_model.load(FLAGS.export_dir)
    feature_extractor = util.CustomLayer(obj,
                                         output_shape=[128],
                                         trainable=FLAGS.retrain)

    # Build a classifier with it.
    model = make_classifier(feature_extractor)

    # Train the classifier (possibly on a different dataset).
    (x_train, y_train), (x_test, y_test) = mnist_util.load_reshaped_data(
        use_fashion_mnist=FLAGS.use_fashion_mnist,
        fake_tiny_data=FLAGS.fast_test_mode)
    model.compile(
        loss=tf.keras.losses.categorical_crossentropy,
        optimizer=tf.keras.optimizers.SGD(),
        metrics=['accuracy'],
        # TODO(arnoegw): Remove after investigating huge allocs.
        run_eagerly=True)
    print('Training on %s with %d trainable and %d untrainable variables.' %
          ('Fashion MNIST' if FLAGS.use_fashion_mnist else 'MNIST',
           len(model.trainable_variables), len(model.non_trainable_variables)))
    model.fit(x_train,
              y_train,
              batch_size=128,
              epochs=FLAGS.epochs,
              steps_per_epoch=3,
              verbose=1,
              validation_data=(x_test, y_test))
コード例 #10
0
    def loadData(self):
        # module_path = os.path.dirname(__file__)

        # test_image = module_path + "t10k-images-idx3-ubyte"
        (x_train, y_train), (x_test, y_test) = mnist_util.load_reshaped_data(use_fashion_mnist=False,
                                                                             fake_tiny_data=False)
        x_train = np.reshape(x_train, (60000, 784))
        x_test = np.reshape(x_test, (10000, 784))

        return (x_train, y_train), (x_test, y_test)
コード例 #11
0
def main(argv):
    del argv

    # First convert the SavedModel in a pristine environment.
    converter = tf.lite.TFLiteConverter.from_saved_model(FLAGS.saved_model_dir)
    lite_model_content = converter.convert()
    # Here is how you can save it for actual deployment.
    if FLAGS.tflite_output_file:
        with open(FLAGS.tflite_output_file, 'wb') as outfile:
            outfile.write(lite_model_content)
    # For testing, the TFLite model can be executed like this.
    interpreter = tf.lite.Interpreter(model_content=lite_model_content)

    def lite_model(images):
        interpreter.allocate_tensors()
        interpreter.set_tensor(interpreter.get_input_details()[0]['index'],
                               images)
        interpreter.invoke()
        return interpreter.get_tensor(
            interpreter.get_output_details()[0]['index'])

    # Load the SavedModel again for use as a test baseline.
    imported = tf.saved_model.load(FLAGS.saved_model_dir)

    def tf_model(images):
        output_dict = imported.signatures['serving_default'](
            tf.constant(images))
        logits, = output_dict.values()  # Unpack single value.
        return logits

    # Compare model outputs on the test inputs.
    (_, _), (x_test, _) = mnist_util.load_reshaped_data(
        use_fashion_mnist=FLAGS.use_fashion_mnist,
        fake_tiny_data=FLAGS.fast_test_mode)
    for i, x in enumerate(x_test):
        x = x[None, ...]  # Make batch of size 1.
        y_lite = lite_model(x)
        y_tf = tf_model(x)
        # This numpy primitive uses plain `raise` and works outside tf.TestCase.
        # Model outputs are probabilities that sum to 1, so atol makes sense here.
        np.testing.assert_allclose(y_lite,
                                   y_tf,
                                   rtol=0,
                                   atol=1e-5,
                                   err_msg='Mismatch at test example %d' % i)
コード例 #12
0
def main(argv):
    del argv

    # Build a complete classifier model using a feature extractor.
    default_hparams = dict(dropout_rate=0.25)
    l2_strength = 0.01  # Not a hparam for inputs -> outputs.
    feature_extractor = make_feature_extractor(l2_strength=l2_strength,
                                               **default_hparams)
    classifier = make_classifier(feature_extractor, l2_strength=l2_strength)

    # Train the complete model.
    (x_train, y_train), (x_test, y_test) = mnist_util.load_reshaped_data(
        fake_tiny_data=FLAGS.fast_test_mode)
    classifier.compile(loss=tf.keras.losses.categorical_crossentropy,
                       optimizer=tf.keras.optimizers.SGD(),
                       metrics=['accuracy'])
    classifier.fit(x_train,
                   y_train,
                   batch_size=128,
                   epochs=FLAGS.epochs,
                   verbose=1,
                   validation_data=(x_test, y_test))

    # Save the feature extractor to a framework-agnostic SavedModel for reuse.
    # Note that the feature_extractor object has not been compiled or fitted,
    # so it does not contain an optimizer and related state.
    if FLAGS.use_keras_save_api:
        # Use Keras' built-in way of creating reusable SavedModels.
        # This has no support for adjustable hparams at this time (July 2019).
        # (We could also call tf.saved_model.save(feature_extractor, ...),
        # point is we're passing a Keras model, not a plain Checkpoint.)
        tf.keras.models.save_model(feature_extractor, FLAGS.export_dir)
    else:
        # Assemble a reusable SavedModel manually, with adjustable hparams.
        exportable = wrap_keras_model_for_export(
            feature_extractor, (None, ) + mnist_util.INPUT_SHAPE,
            set_feature_extractor_hparams, default_hparams)
        tf.saved_model.save(exportable, FLAGS.export_dir)