Exemplo n.º 1
0
    def test_mobilenet_v2_saved_model_and_softmax_classifier_model(self):
        input_size = DEFAULT_INPUT_SIZE
        output_size = 5
        batch_size = DEFAULT_BATCH_SIZE

        converter = tflite_transfer_converter.TFLiteTransferConverter(
            output_size, self._default_base_model,
            heads.SoftmaxClassifierHead(batch_size, (input_size, ),
                                        output_size),
            optimizers.SGD(LEARNING_RATE), batch_size)
        models = converter._convert()

        parameter_shapes = [(input_size, output_size), (output_size, )]
        self.assertSignatureEqual(models['initialize'], [()], parameter_shapes)
        self.assertSignatureEqual(models['bottleneck'], [(1, input_size)],
                                  [(1, input_size)])
        self.assertSignatureEqual(models['train_head'],
                                  [(batch_size, input_size),
                                   (batch_size, output_size)] +
                                  parameter_shapes, [()] + parameter_shapes)
        self.assertSignatureEqual(models['inference'],
                                  [(1, input_size)] + parameter_shapes,
                                  [(1, output_size)])
        self.assertSignatureEqual(models['optimizer'],
                                  parameter_shapes + parameter_shapes,
                                  parameter_shapes)
 def test_mobilenet_v2_base_quantized_and_softmax_classifier(self):
   base_model = bases.MobileNetV2Base(quantize=True)
   head_model = heads.SoftmaxClassifierHead(BATCH_SIZE, BOTTLENECK_SHAPE,
                                            NUM_CLASSES)
   optimizer = optimizers.SGD(LEARNING_RATE)
   model = TransferModel(self.dataset_dir, base_model, head_model, optimizer)
   self.assertModelAchievesAccuracy(model, 0.80)
Exemplo n.º 3
0
    def test_mobilenet_v2_saved_model_and_keras_model(self):
        input_size = DEFAULT_INPUT_SIZE
        output_size = 5

        head_model = tf.keras.Sequential([
            layers.Dense(units=32,
                         input_shape=(input_size, ),
                         activation='relu',
                         kernel_regularizer=l2(0.01),
                         bias_regularizer=l2(0.01)),
            layers.Dense(units=output_size,
                         kernel_regularizer=l2(0.01),
                         bias_regularizer=l2(0.01)),
        ])
        head_model.compile(loss='categorical_crossentropy', optimizer='sgd')

        converter = tflite_transfer_converter.TFLiteTransferConverter(
            output_size, self._default_base_model,
            heads.KerasModelHead(head_model), optimizers.SGD(LEARNING_RATE),
            DEFAULT_BATCH_SIZE)

        models = converter._convert()

        parameter_shapes = [(input_size, 32), (32, ), (32, output_size),
                            (output_size, )]
        self.assertSignatureEqual(models['initialize'], [()], parameter_shapes)
        self.assertSignatureEqual(models['bottleneck'], [(1, input_size)],
                                  [(1, input_size)])
        self.assertSignatureEqual(models['inference'],
                                  [(1, input_size)] + parameter_shapes,
                                  [(1, output_size)])
        self.assertSignatureEqual(models['optimizer'],
                                  parameter_shapes + parameter_shapes,
                                  parameter_shapes)
Exemplo n.º 4
0
    def test_mobilenet_v2_base_and_softmax_classifier_model(self):
        input_size = 224
        output_size = 5
        batch_size = DEFAULT_BATCH_SIZE

        base = bases.MobileNetV2Base(image_size=input_size)
        head = heads.SoftmaxClassifierHead(batch_size, base.bottleneck_shape(),
                                           output_size)
        optimizer = optimizers.SGD(LEARNING_RATE)

        converter = tflite_transfer_converter.TFLiteTransferConverter(
            output_size, base, head, optimizer, batch_size)
        models = converter._convert()

        parameter_shapes = [(7 * 7 * 1280, output_size), (output_size, )]
        self.assertSignatureEqual(models['initialize'], [()], parameter_shapes)
        self.assertSignatureEqual(models['bottleneck'],
                                  [(1, input_size, input_size, 3)],
                                  [(1, 7, 7, 1280)])
        self.assertSignatureEqual(models['train_head'],
                                  [(batch_size, 7, 7, 1280),
                                   (batch_size, output_size)] +
                                  parameter_shapes, [()] + parameter_shapes)
        self.assertSignatureEqual(models['inference'],
                                  [(1, 7, 7, 1280)] + parameter_shapes,
                                  [(1, output_size)])
        self.assertSignatureEqual(models['optimizer'],
                                  parameter_shapes + parameter_shapes,
                                  parameter_shapes)
Exemplo n.º 5
0
    # on-device model configuration.
    num_classes = 2
    learning_rate = 0.001
    batch_size = 5
    l2_rate = 0.0001
    hidden_units = 128
    input_shape = model.get_layer(encoder_layer).output.shape

    base = bases.SavedModelBase(tflite_model)

    head = Sequential([
        Flatten(input_shape=input_shape),
        Dense(units=hidden_units,
              activation="relu",
              kernel_regularizer=l2(l2_rate)),
        Dense(units=num_classes,
              activation="softmax",
              kernel_regularizer=l2(l2_rate)),
    ])

    # Optimizer is ignored by the converter!
    head.compile(loss="categorical_crossentropy", optimizer="adam")

    converter = TFLiteTransferConverter(num_classes,
                                        base,
                                        heads.KerasModelHead(head),
                                        optimizers.SGD(learning_rate),
                                        train_batch_size=batch_size)

    converter.convert_and_save(tflite_ondevice_model)
Exemplo n.º 6
0
This is the model architecture that we will train using Flower. 
"""
head = tf.keras.Sequential([
    tf.keras.Input(shape=(32, 32, 3)),
    tf.keras.layers.Conv2D(6, 5, activation="relu"),
    tf.keras.layers.MaxPooling2D(pool_size=(2, 2)),
    tf.keras.layers.Conv2D(16, 5, activation="relu"),
    tf.keras.layers.Flatten(),
    tf.keras.layers.Dense(units=120, activation="relu"),
    tf.keras.layers.Dense(units=84, activation="relu"),
    tf.keras.layers.Dense(units=10, activation="softmax"),
])

head.compile(loss="categorical_crossentropy", optimizer="sgd")
"""Convert the model for TFLite.

Using 10 classes in CIFAR10, learning rate = 1e-3 and batch size = 32

This will generate a directory called tflite_model with five tflite models.
Copy them in your Android code under the assets/model directory.
"""

base_path = bases.saved_model_base.SavedModelBase("identity_model")
converter = TFLiteTransferConverter(10,
                                    base_path,
                                    heads.KerasModelHead(head),
                                    optimizers.SGD(1e-3),
                                    train_batch_size=32)

converter.convert_and_save("tflite_model")
def main():
    parser = argparse.ArgumentParser(
        description='Combines two TF models into a transfer learning model')
    parser.add_argument('--train_batch_size',
                        help='Training batch size',
                        type=int,
                        default=20)
    parser.add_argument('--num_classes',
                        help='Number of classes for the output',
                        type=int,
                        default=4)

    # Base model configuration.
    base_group = parser.add_mutually_exclusive_group(required=True)
    base_group.add_argument('--base_mobilenetv2',
                            help='Use MobileNetV2 as the base model',
                            dest='base_mobilenetv2',
                            action='store_true')
    base_group.add_argument(
        '--base_model_dir',
        help='Use a SavedModel under a given path as the base model',
        type=str)
    parser.add_argument('--base_quantize',
                        help='Whether the base model should be quantized',
                        dest='base_quantize',
                        action='store_true')
    parser.set_defaults(base_quantize=False)

    # Head model configuration.
    head_group = parser.add_mutually_exclusive_group(required=True)
    head_group.add_argument(
        '--head_model_dir',
        help='Use a SavedModel under a given path as the head model',
        type=str)
    head_group.add_argument('--head_softmax',
                            help='Use SoftmaxClassifier for the head model',
                            dest='head_softmax',
                            action='store_true')
    parser.add_argument(
        '--head_l2_reg',
        help='L2 regularization parameter for SoftmaxClassifier',
        type=float)

    # Optimizer configuration.
    parser.add_argument('--optimizer',
                        required=True,
                        type=str,
                        choices=['sgd', 'adam'],
                        help='Which optimizer should be used')
    parser.add_argument('--sgd_learning_rate',
                        help='Learning rate for SGD',
                        type=float)

    parser.add_argument(
        '--out_model_dir',
        help='Where the generated transfer learning model is saved',
        required=True,
        type=str)
    args = parser.parse_args()

    if args.base_mobilenetv2:
        base = bases.MobileNetV2Base(quantize=args.base_quantize)
    else:
        base = bases.SavedModelBase(args.base_model_dir,
                                    quantize=args.base_quantize)

    if args.head_model_dir:
        head = heads.LogitsSavedModelHead(args.head_model_dir)
    else:
        head = heads.SoftmaxClassifierHead(args.train_batch_size,
                                           base.bottleneck_shape(),
                                           args.num_classes,
                                           l2_reg=args.head_l2_reg)

    if args.optimizer == 'sgd':
        if args.sgd_learning_rate is not None:
            optimizer = optimizers.SGD(args.sgd_learning_rate)
        else:
            raise RuntimeError(
                '--sgd_learning_rate is required when SGD is used as an optimizer'
            )
    elif args.optimizer == 'adam':
        optimizer = optimizers.Adam()

    converter = tflite_transfer_converter.TFLiteTransferConverter(
        args.num_classes, base, head, optimizer, args.train_batch_size)
    converter.convert_and_save(args.out_model_dir)