Ejemplo n.º 1
0
 def testSparsityValueIsValid(self, schedule_type):
   if schedule_type == 'constant_sparsity':
     # pylint: disable=unnecessary-lambda
     self._validate_sparsity(lambda s: pruning_schedule.ConstantSparsity(s, 0))
   elif schedule_type == 'polynomial_decay':
     self._validate_sparsity(
         lambda s: pruning_schedule.PolynomialDecay(s, 0.8, 0, 10))
     self._validate_sparsity(
         lambda s: pruning_schedule.PolynomialDecay(0.2, s, 0, 10))
Ejemplo n.º 2
0
    def testPruneStopAndRestartOnModel(self, save_restore_fn):
        params = {
            'pruning_schedule':
            pruning_schedule.PolynomialDecay(0.2, 0.6, 0, 4, 3, 1),
            'block_size': (1, 1),
            'block_pooling_type':
            'AVG'
        }
        model = prune.prune_low_magnitude(
            keras_test_utils.build_simple_dense_model(), **params)
        model.compile(loss='categorical_crossentropy',
                      optimizer='sgd',
                      metrics=['accuracy'])
        # Model hasn't been trained yet. Sparsity 0.0
        test_utils.assert_model_sparsity(self, 0.0, model)

        model.fit(np.random.rand(20, 10),
                  np_utils.to_categorical(np.random.randint(5, size=(20, 1)),
                                          5),
                  batch_size=20,
                  callbacks=[pruning_callbacks.UpdatePruningStep()])
        # Training has run only 1 step. Sparsity 0.2 (initial_sparsity)
        test_utils.assert_model_sparsity(self, 0.2, model)

        model = save_restore_fn(model)
        model.fit(np.random.rand(20, 10),
                  np_utils.to_categorical(np.random.randint(5, size=(20, 1)),
                                          5),
                  batch_size=20,
                  epochs=3,
                  callbacks=[pruning_callbacks.UpdatePruningStep()])
        # Training has run all 4 steps. Sparsity 0.6 (final_sparsity)
        test_utils.assert_model_sparsity(self, 0.6, model)

        self._check_strip_pruning_matches_original(model, 0.6)
    def testPruneStopAndRestart_PreservesSparsity(self, save_restore_fn):
        # TODO(tfmot): renable once SavedModel preserves step again.
        # This existed in TF 2.0 and 2.1 and should be reenabled in
        # TF 2.3. b/151755698
        if save_restore_fn.__name__ == '_save_restore_tf_model':
            return

        begin_step, end_step = 1, 4
        params = self.params
        params['pruning_schedule'] = pruning_schedule.PolynomialDecay(
            0.2, 0.6, begin_step, end_step, 3, 1)

        model = prune.prune_low_magnitude(
            keras_test_utils.build_simple_dense_model(), **params)
        model.compile(loss='categorical_crossentropy',
                      optimizer='sgd',
                      metrics=['accuracy'])
        # Model hasn't been trained yet. Sparsity 0.0
        test_utils.assert_model_sparsity(self, 0.0, model)

        # Train only 1 step. Sparsity 0.2 (initial_sparsity)
        self._train_model(model, epochs=1)
        test_utils.assert_model_sparsity(self, 0.2, model)

        model = save_restore_fn(model)

        # Training has run all 4 steps. Sparsity 0.6 (final_sparsity)
        self._train_model(model, epochs=3)
        test_utils.assert_model_sparsity(self, 0.6, model)

        self._check_strip_pruning_matches_original(model, 0.6)
    def testPruneWithPolynomialDecayPastEndStep_PreservesSparsity(
            self, save_restore_fn):
        begin_step, end_step = 0, 2
        params = self.params
        params['pruning_schedule'] = pruning_schedule.PolynomialDecay(
            0.2, 0.6, begin_step, end_step, 3, 1)

        model = prune.prune_low_magnitude(
            keras_test_utils.build_simple_dense_model(), **params)
        model.compile(loss='categorical_crossentropy',
                      optimizer='sgd',
                      metrics=['accuracy'])

        # Model hasn't been trained yet. Sparsity 0.0
        test_utils.assert_model_sparsity(self, 0.0, model)

        # Train 3 steps, past end_step. Sparsity 0.6 (final_sparsity)
        self._train_model(model, epochs=3)
        test_utils.assert_model_sparsity(self, 0.6, model)

        model = save_restore_fn(model)

        # Ensure sparsity is preserved.
        test_utils.assert_model_sparsity(self, 0.6, model)

        # Train one more step to ensure nothing happens that brings sparsity
        # back below 0.6.
        self._train_model(model, epochs=1)
        test_utils.assert_model_sparsity(self, 0.6, model)

        self._check_strip_pruning_matches_original(model, 0.6)
    def testPruneStopAndRestart_PreservesSparsity(self, save_restore_fn):
        begin_step, end_step = 0, 4
        params = self.params
        params['pruning_schedule'] = pruning_schedule.PolynomialDecay(
            0.2, 0.6, begin_step, end_step, 3, 1)

        model = prune.prune_low_magnitude(
            keras_test_utils.build_simple_dense_model(), **params)
        model.compile(loss='categorical_crossentropy',
                      optimizer='sgd',
                      metrics=['accuracy'])
        # Model hasn't been trained yet. Sparsity 0.0
        test_utils.assert_model_sparsity(self, 0.0, model)

        # Train only 1 step. Sparsity 0.2 (initial_sparsity)
        self._train_model(model, epochs=1)
        test_utils.assert_model_sparsity(self, 0.2, model)

        model = save_restore_fn(model)

        # Training has run all 4 steps. Sparsity 0.6 (final_sparsity)
        self._train_model(model, epochs=3)
        test_utils.assert_model_sparsity(self, 0.6, model)

        self._check_strip_pruning_matches_original(model, 0.6)
Ejemplo n.º 6
0
 def _construct_pruning_schedule(
     schedule_type, begin_step, end_step, frequency=10):
   # Uses default values for sparsity. We're only testing begin_step, end_step
   # and frequency here.
   if schedule_type == 'constant_sparsity':
     return pruning_schedule.ConstantSparsity(
         0.5, begin_step, end_step, frequency)
   elif schedule_type == 'polynomial_decay':
     return pruning_schedule.PolynomialDecay(
         0.2, 0.8, begin_step, end_step, 3, frequency)
Ejemplo n.º 7
0
    def testSerializeDeserialize(self):
        sparsity = pruning_schedule.PolynomialDecay(0.2, 0.6, 10, 20, 5, 10)

        config = sparsity.get_config()
        sparsity_deserialized = tf.keras.utils.deserialize_keras_object(
            config,
            custom_objects={
                'ConstantSparsity': pruning_schedule.ConstantSparsity,
                'PolynomialDecay': pruning_schedule.PolynomialDecay
            })

        self.assertEqual(sparsity.__dict__, sparsity_deserialized.__dict__)
Ejemplo n.º 8
0
    def testPolynomialDecay_PrunesCorrectly(self):
        sparsity = pruning_schedule.PolynomialDecay(0.2, 0.8, 100, 110, 3, 2)

        step_100 = tf.Variable(100)
        step_102 = tf.Variable(102)
        step_105 = tf.Variable(105)
        step_110 = tf.Variable(110)
        compat.initialize_variables(self)

        # These values were generated using tf.polynomial_decay with the same
        # params in a colab to verify.
        self.assertAllClose(0.2, self.evaluate(sparsity(step_100))[1])
        self.assertAllClose(0.4928, self.evaluate(sparsity(step_102))[1])
        self.assertAllClose(0.725, self.evaluate(sparsity(step_105))[1])
        self.assertAllClose(0.8, self.evaluate(sparsity(step_110))[1])
Ejemplo n.º 9
0
def get_pruning_params(mode='prune',
                       initial_sparsity=0.0,
                       final_sparsity=0.8,
                       begin_step=2000,
                       end_step=4000,
                       frequency=200):
  """Gets pruning hyper-parameters."""
  p_params = {}
  if mode == 'prune':
    p_params['pruning_schedule'] = pruning_schedule.PolynomialDecay(
        initial_sparsity=initial_sparsity,
        final_sparsity=final_sparsity,
        begin_step=begin_step,
        end_step=end_step,
        frequency=frequency)
  elif mode == 'constant':
    p_params['pruning_schedule'] = pruning_schedule.ConstantSparsity(
        target_sparsity=final_sparsity, begin_step=begin_step)
  else:
    raise ValueError('Mode: %s, is not valid' % mode)
  return p_params
Ejemplo n.º 10
0
if __name__ == "__main__":

    

    parameters = open("parameters.yml")
    yamlparameters = yaml.load(parameters,Loader=yaml.FullLoader)
    experiment = Experiment(api_key=yamlparameters["comet_api_key"],project_name='qkeras',auto_param_logging=True)
    

    X_train, X_test, y_train, y_test  = get_features(yamlparameters["DataDir"])

    steps_per_epoch = int(len(X_train)/yamlparameters["Training_batch_size"])

    
    pruning_params = {"pruning_schedule" : pruning_schedule.PolynomialDecay(initial_sparsity=0.0,
                                                                            final_sparsity=yamlparameters["Sparsity"],
                                                                            begin_step=yamlparameters["Pruning_begin_epoch"]*steps_per_epoch, 
                                                                            end_step=yamlparameters["Pruning_end_epoch"]*steps_per_epoch)}

    #pruning_params = {"pruning_schedule" : pruning_schedule.ConstantSparsity(
    #                                                                         target_sparsity=yamlparameters["Sparsity"],
    #                                                                         begin_step=yamlparameters["Pruning_begin_epoch"]*steps_per_epoch, 
    #                                                                         end_step=yamlparameters["Pruning_end_epoch"]*steps_per_epoch,
    #                                                                         frequency=yamlparameters["Pruning_frequency"]*steps_per_epoch)}
    

    keras_model = models.qdense_model(Input(shape=X_train.shape[1:]), 
                                       l1Reg=yamlparameters["Training_regularization"],
                                       bits=yamlparameters["Layer_bits"],
                                       ints=yamlparameters["Layer_ints"])
    keras_model = prune.prune_low_magnitude(keras_model, **pruning_params)
    
Ejemplo n.º 11
0
def _main():
    annotation_path = 'model_data/combined.txt'
    log_dir = 'logs/000/'
    classes_path = 'model_data/voc_classes.txt'
    anchors_path = 'model_data/yolo_anchors.txt'
    class_names = get_classes(classes_path)
    num_classes = len(class_names)
    anchors = get_anchors(anchors_path)
    model_path = 'model_data/'
    init_model = model_path + '/pelee3'
    new_pruned_keras_file = model_path + 'pruned_' + init_model
    epochs = 100
    init_epoch = 50
    input_shape = (384, 286)  # multiple of 32, hw
    model = tf.keras.models.load_model(
        init_model)  # make sure you know what you freeze

    logging = TensorBoard(log_dir=log_dir)
    checkpoint = ModelCheckpoint(
        log_dir + 'ep{epoch:03d}-loss{loss:.3f}-val_loss{val_loss:.3f}.h5',
        monitor='val_loss',
        save_weights_only=True,
        save_best_only=True,
        period=3)
    reduce_lr = ReduceLROnPlateau(monitor='val_loss',
                                  factor=0.1,
                                  patience=3,
                                  verbose=1)
    early_stopping = EarlyStopping(monitor='val_loss',
                                   min_delta=0,
                                   patience=10,
                                   verbose=1)

    val_split = 0.1
    with open(annotation_path) as f:
        lines = f.readlines()
    np.random.seed(10101)
    np.random.shuffle(lines)
    np.random.seed(None)
    num_val = int(len(lines) * val_split)
    num_train = len(lines) - num_val
    logdir = "d:/training"

    # Train with frozen layers first, to get a stable loss.
    # Adjust num epochs to your dataset. This step is enough to obtain a not bad model.
    if False:
        model.compile(
            optimizer=SGD(lr=2e-2),
            loss={
                # use custom yolo_loss Lambda layer.
                'yolo_loss': lambda y_true, y_pred: y_pred
            })

        batch_size = 16
        print('Train on {} samples, val on {} samples, with batch size {}.'.
              format(num_train, num_val, batch_size))
        model.fit_generator(data_generator_wrapper(lines[:num_train],
                                                   batch_size, input_shape,
                                                   anchors, num_classes),
                            steps_per_epoch=max(1, num_train // batch_size),
                            validation_data=data_generator_wrapper(
                                lines[num_train:], batch_size, input_shape,
                                anchors, num_classes),
                            validation_steps=max(1, num_val // batch_size),
                            epochs=50,
                            initial_epoch=0,
                            callbacks=[logging, checkpoint])
        model.save_weights(log_dir + 'trained_weights_stage_1.h5')

    # Unfreeze and continue training, to fine-tune.
    # Train longer if the result is not good.
    if False:
        for i in range(len(model.layers)):
            model.layers[i].trainable = True
        model.compile(optimizer=Adam(lr=1e-4),
                      loss={
                          'yolo_loss': lambda y_true, y_pred: y_pred
                      })  # recompile to apply the change
        print('Unfreeze all of the layers.')

        batch_size = 16  # note that more GPU memory is required after unfreezing the body
        print('Train on {} samples, val on {} samples, with batch size {}.'.
              format(num_train, num_val, batch_size))

        model.fit_generator(data_generator_wrapper(lines[:num_train],
                                                   batch_size, input_shape,
                                                   anchors, num_classes),
                            steps_per_epoch=max(1, num_train // batch_size),
                            validation_data=data_generator_wrapper(
                                lines[num_train:], batch_size, input_shape,
                                anchors, num_classes),
                            validation_steps=max(1, num_val // batch_size),
                            epochs=epochs,
                            initial_epoch=init_epoch,
                            callbacks=[logging, checkpoint, reduce_lr])
        model.save_weights(log_dir + 'trained_weights_final.h5')

    # Further training if needed.
    if True:
        new_pruning_params = {
            'pruning_schedule':
            pruning_schedule.PolynomialDecay(initial_sparsity=0.30,
                                             final_sparsity=0.60,
                                             begin_step=0,
                                             end_step=4,
                                             frequency=100)
        }

        new_pruned_model = prune.prune_low_magnitude(model,
                                                     **new_pruning_params)
        new_pruned_model.summary()
        yolo_loss = YoloLoss(input_shape, num_classes, anchors, coord_scale,
                             class_scale, object_scale, no_object_scale)
        new_pruned_model.compile(loss=yolo_loss,
                                 optimizer='adam',
                                 metrics=['accuracy'])
        callbacks = [
            sparsity.UpdatePruningStep(),
            sparsity.PruningSummaries(log_dir=logdir, profile_batch=0)
        ]
        model.fit_generator(data_generator_wrapper(lines[:num_train],
                                                   batch_size, input_shape,
                                                   anchors, num_classes),
                            steps_per_epoch=max(1, num_train // batch_size),
                            validation_data=data_generator_wrapper(
                                lines[num_train:], batch_size, input_shape,
                                anchors, num_classes),
                            validation_steps=max(1, num_val // batch_size),
                            epochs=4,
                            initial_epoch=init_epoch,
                            callbacks=[logging, checkpoint, reduce_lr])
        score = new_pruned_model.evaluate(x_test, y_test, verbose=0)
        print('Test loss:', score[0])
        print('Test accuracy:', score[1])
        final_model = sparsity.strip_pruning(pruned_model)
        final_model.summary()
        print('Saving pruned model to: ', new_pruned_keras_file)
        tf.keras.models.save_model(final_model,
                                   new_pruned_keras_file,
                                   include_optimizer=False)
        tflite_model_file = mode_path + "sparse_" + init_model
        converter = tf.lite.TFLiteConverter.from_keras_model_file(final_model)
        converter.optimizations = [tf.lite.Optimize.OPTIMIZE_FOR_SIZE]
        tflite_model = converter.convert()
        with open(tflite_model_file, 'wb') as f:
            f.write(tflite_model)
Ejemplo n.º 12
0
x_test = keras.sequence.pad_sequences(x_test, maxlen=maxlen)
print("x_train shape:", x_train.shape)
print("x_test shape:", x_test.shape)

print("Build model...")
model = keras.models.Sequential()
model.add(keras.layers.Embedding(max_features, 128, input_length=maxlen))
model.add(keras.layers.LSTM(128))  # try using a GRU instead, for fun
model.add(keras.layers.Dropout(0.5))
model.add(keras.layers.Dense(1))
model.add(keras.layers.Activation("sigmoid"))

model = prune.prune_low_magnitude(
    model,
    pruning_schedule.PolynomialDecay(initial_sparsity=0.3,
                                     final_sparsity=0.7,
                                     begin_step=1000,
                                     end_step=3000))

# try using different optimizers and different optimizer configs
model.compile(loss="binary_crossentropy",
              optimizer="adam",
              metrics=["accuracy"])
print_model_sparsity(model)

print("Train...")
model.fit(x_train,
          y_train,
          batch_size=batch_size,
          epochs=3,
          callbacks=[pruning_callbacks.UpdatePruningStep()],
          validation_data=(x_test, y_test))
Ejemplo n.º 13
0
 def testRaisesErrorIfEndStepIsNegative(self):
     with self.assertRaises(ValueError):
         pruning_schedule.PolynomialDecay(0.4, 0.8, 10, -1)
Ejemplo n.º 14
0
    GradientBoostingRegressor(n_estimators=15),
    get_mlp_diabetes(25),
    get_mlp_diabetes(50),
    get_mlp_diabetes(100),
]

CONFIG_POOL_REG = [
    Config(id=i, dataset_name=DATASET_NAME, classifier_model=clf)
    for i, clf in enumerate(MODEL_POOL_REG)
]

prune_poly_50 = {
    'pruning_schedule':
    pruning_schedule.PolynomialDecay(initial_sparsity=0.00,
                                     final_sparsity=0.50,
                                     begin_step=50,
                                     end_step=920,
                                     frequency=50)
}

prune_poly_90 = {
    'pruning_schedule':
    pruning_schedule.PolynomialDecay(initial_sparsity=0.00,
                                     final_sparsity=0.90,
                                     begin_step=50,
                                     end_step=920,
                                     frequency=50)
}

prune_const_90 = {
    'pruning_schedule':
Ejemplo n.º 15
0
opt = Optimizer(config,
                api_key=yamlparameters["comet_api_key"],
                project_name="NNqhmv6",
                auto_metric_logging=True)

X_train, X_test, y_train, y_test = get_features(yamlparameters["DataDir"])

for experiment in opt.get_experiments():
    steps_per_epoch = int(len(X_train) / yamlparameters["Training_batch_size"])

    pruning_params = {
        "pruning_schedule":
        pruning_schedule.PolynomialDecay(
            initial_sparsity=0.0,
            final_sparsity=yamlparameters["Sparsity"],
            begin_step=experiment.get_parameter("pruning_begin_epoch") *
            steps_per_epoch,
            end_step=experiment.get_parameter("pruning_end_epoch") *
            steps_per_epoch)
    }
    keras_model = models.qdense_model(
        Input(shape=X_train.shape[1:]),
        l1Reg=experiment.get_parameter("Regularization"),
        bits=yamlparameters["Layer_bits"],
        ints=yamlparameters["Layer_ints"])
    keras_model = prune.prune_low_magnitude(keras_model, **pruning_params)

    startlearningrate = experiment.get_parameter("learning_rate")

    adam = Adam(lr=startlearningrate,
                beta_1=yamlparameters["Training_learning_beta1"],
Ejemplo n.º 16
0
print("Pad sequences (samples x time)")
x_train = sequence.pad_sequences(x_train, maxlen=maxlen)
x_test = sequence.pad_sequences(x_test, maxlen=maxlen)
print("x_train shape:", x_train.shape)
print("x_test shape:", x_test.shape)

print("Build model...")
model = keras.models.Sequential()
model.add(keras.layers.Embedding(max_features, 128, input_length=maxlen))
model.add(keras.layers.LSTM(128))  # try using a GRU instead, for fun
model.add(keras.layers.Dropout(0.5))
model.add(keras.layers.Dense(1))
model.add(keras.layers.Activation("sigmoid"))

model = prune.prune_low_magnitude(model, pruning_schedule.PolynomialDecay(
    initial_sparsity=0.3, final_sparsity=0.7, begin_step=1000, end_step=3000))

# try using different optimizers and different optimizer configs
model.compile(loss="binary_crossentropy",
              optimizer="adam",
              metrics=["accuracy"])
print_model_sparsity(model)

print("Train...")
model.fit(x_train, y_train, batch_size=batch_size, epochs=3,
          callbacks=[pruning_callbacks.UpdatePruningStep()],
          validation_data=(x_test, y_test))
score, acc = model.evaluate(x_test, y_test,
                            batch_size=batch_size)
print_model_sparsity(model)
print("Test score:", score)