def test_on_epoch_end(self):
        model1 = Mock()

        model2 = Mock()
        model2.save = Mock()

        callback = AltModelCheckpoint('path/to/model.hdf5', model2)
        callback.model = model1

        callback.on_epoch_end(42)
        self.assertIs(callback.model, model1, 'original model is restored')

        # model2 saved
        model2.save.assert_called_once_with('path/to/model.hdf5',
                                            overwrite=True)
 def test_base_cls(self):
     self.assertIsInstance(AltModelCheckpoint('foobar', None),
                           keras.callbacks.ModelCheckpoint)
Example #3
0
                                  val_source_path,
                                  num_tsteps,
                                  batch_size=batch_size,
                                  N_seq=num_seq_val,
                                  output_mode="prediction")

print("Shapes: ", train_generator.X.shape, val_generator.X.shape)
print("train generator", np.amax(train_generator.X),
      np.amin(train_generator.X))

lr_schedule = lambda epoch: 0.00001 if epoch < 75 else 0.0001  # start with lr of 0.001 and then drop to 0.0001 after 75 epochs

lr_callback = LearningRateScheduler(lr_schedule)
if save_model:
    weight_callback = AltModelCheckpoint(save_weights_path,
                                         model,
                                         monitor='val_loss',
                                         save_best_only=True)

# Append the "l2 norm of gradients" tensor as a metric sequences for input
parallel_model.metrics_names.append("gradient_norm")
parallel_model.metrics_tensors.append(get_gradient_norm(parallel_model))
parallel_model.metrics_names.append("gradient_dyn_norm")
parallel_model.metrics_tensors.append(
    get_gradient_dynamic_norm(parallel_model))
parallel_model.metrics_names.append("gradient_static_norm")
parallel_model.metrics_tensors.append(get_gradient_static_norm(parallel_model))

parallel_model.metrics_names.append("w_norm")
parallel_model.metrics_tensors.append(get_weight_norm(parallel_model))
parallel_model.metrics_names.append("w_dyn_norm")
parallel_model.metrics_tensors.append(get_weight_dynamic_norm(parallel_model))
 def test_kwargs_pass_through(self):
     callback = AltModelCheckpoint('path/to/model.hdf5',
                                   None,
                                   monitor='foobar')
     self.assertEqual(callback.filepath, 'path/to/model.hdf5')
     self.assertEqual(callback.monitor, 'foobar')
Example #5
0
    def fit(self, _dataset, model_dir_path, tensorboard_dir='./logs', epochs=20, batch_size=32, threshold=0.6, save_to="."):
        self.threshold = threshold
        dataset = dict()
        # for _key in _dataset:
        #     if len(_dataset[_key]) > 1:
        #         dataset[_key] = _dataset[_key]
		
        # for name, feature in dataset.items():
        #     self.input_shape = feature[0].shape
        #     break

        # self.pretrained_model = self.load_pretrained_model()

        self.model = self.create_network(input_shape=self.input_shape)
        architecture_file_path = self.get_architecture_path(save_to)
        with open(architecture_file_path, 'w+') as f:
            f.write(self.model.to_json())

        names = []
        self.labels = dict()
        for name in dataset.keys():
            names.append(name)
            self.labels[name] = len(self.labels)

        self.config = dict()
        self.config['input_shape'] = self.input_shape
        self.config['labels'] = self.labels
        self.config['threshold'] = self.threshold

        config_file_path = SiameseFaceModel.get_config_path(model_dir_path=save_to)
        np.save(config_file_path, self.config)


        # parallel_model = multi_gpu_model(self.model,1)
        # parallel_model = self.model
        weight_file_path = SiameseFaceModel.get_weight_path(model_dir_path)
        # checkpoint = ModelCheckpoint(weight_file_path, monitor='loss', verbose=SiameseFaceModel.VERBOSE, save_best_only=True, mode='auto')
        alt_checkpoint = AltModelCheckpoint(save_to + '/' + 'model{epoch:02d}.h5', self.model)
        reduce_lr = ReduceLROnPlateau(monitor='val_accuracy', factor=0.1, patience=2, verbose=2)
        early_stopping = EarlyStopping(monitor='val_accuracy', min_delta=0, patience=5, verbose=2)
        tensorboard = TensorBoard(log_dir=tensorboard_dir)
        callbacks_list = [alt_checkpoint,tensorboard, reduce_lr, early_stopping]

        # rms = RMSprop(lr=.00001)
        adam = Adam(lr=0.00001)
        # sgd = SGD(lr=0.0001)


        self.model.compile(loss=contrastive_loss, optimizer=adam, metrics=[self.accuracy])
        print(self.model.summary())

        df_train, df_valid = utils.get_faces_df(_dataset)

        train_gen = PairImageLoader(df_train, preprocess_input, (224,224), batchSize = batch_size, flip=True, dataset_path=_dataset)
        valid_gen = PairImageLoader(df_valid, preprocess_input, (224,224), batchSize = batch_size, flip=False, dataset_path=_dataset)
        # valid_gen = [valid_gen[:0], valid_gen[:,1]]
        print('Data load done, starting train')

        self.model.fit_generator(train_gen, steps_per_epoch=len(train_gen),
                                 epochs=epochs, validation_data=valid_gen, validation_steps=len(valid_gen),
                                  callbacks=callbacks_list, workers=12, use_multiprocessing=True)

        self.model.save(save_to + "/" + "final.h5")