def _classify_inter_dataset(self, dataset_origin: str, dataset_target: str, classifier: BaseClassifier, model: CnnModel, prop: PropertyExtractor): origin_path = join(self.features_root_path, dataset_origin, self.target_all, prop.get_property_alias(), model.alias) target_path = join(self.features_root_path, dataset_target, self.target_all, prop.get_property_alias(), model.alias) X_train = features_utils.concatenate_features(origin_path) X_test = features_utils.concatenate_features(target_path) y_train = features_utils.concatenate_labels(origin_path) y_test = features_utils.concatenate_labels(target_path) names_train = features_utils.concatenate_names(origin_path) names_test = features_utils.concatenate_names(target_path) y_pred, y_proba = self._fit_and_predict(classifier, X_train, y_train, X_test) results = self._evaluate_results(y_pred, y_test, names_test) print('HTER: %f\nAPCER: %f\nBPCER: %f' % (results[0], results[1], results[2])) output_dir = join(self.inter_dataset_output, dataset_origin, dataset_target, self.target_all, prop.get_property_alias(), model.alias, classifier.get_alias()) self._save_artifacts(classifier, output_dir, y_pred, y_proba, results)
def _test_inter_features_classifier(self) -> None: """ STEP 2.1 Used to predict on the test set with the already trained first classifier """ for dataset_origin in os.listdir(self.features_root_path): for dataset_target in os.listdir(self.features_root_path): if dataset_origin == dataset_target: print('Origin and target are the same. Skipping.') continue # [ResNet, VGG] for model in self.models: base_path_target = join(self.features_root_path, dataset_target, self.target_all) for prop in self.properties: for classifier in self.classifiers: print('origin %s target %s model %s prop %s classifier %s' % (dataset_origin, dataset_target, model.alias, prop.get_property_alias(), classifier.get_alias())) classifier_path = join(self.meta_dataset_output, self.INTER_NAME, "train", "features", dataset_origin, prop.get_property_alias(), model.alias, classifier.get_alias(), 'model.sav') with open(classifier_path, 'rb') as f: model_fitted = pickle.load(f) path_features = join(base_path_target, prop.get_property_alias(), model.alias) features_concatenated = features_utils.concatenate_features(path_features) names_concatenated = features_utils.concatenate_names(path_features) labels_concatenated = features_utils.concatenate_labels(path_features) y_pred, y_pred_proba = self._predict(model_fitted, features_concatenated) results = self._evaluate_results(y_pred, labels_concatenated, names_concatenated) print('HTER: %f\nAPCER: %f\nBPCER: %f' % (results[0], results[1], results[2])) output_dir = join(self.meta_dataset_output, self.INTER_NAME, "test", "features", dataset_origin, dataset_target, prop.get_property_alias(), model.alias, classifier.get_alias()) self._save_artifacts(classifier, output_dir, y_pred, y_pred_proba, results) # save_txt(join(output_dir, 'names.txt'), names_concatenated) np.save(join(output_dir, 'labels.npy'), labels_concatenated)
def _train_inter_feature_classifier(self) -> None: """ STEP 1.1 Used to train the first classifier (feature classifier) and generate the probabilities for each property map. """ # [CBSR, RA, NUAA] for dataset in os.listdir(self.features_root_path): # [ResNet, VGG, MobileNet, etc] for model in self.models: base_path = join(self.features_root_path, dataset, self.target_all) # [Depth, Illum, Saliency] for prop in self.properties: property_path = join(base_path, prop.get_property_alias(), model.alias) features_concatenated = features_utils.concatenate_features( property_path) names_concatenated = features_utils.concatenate_names( property_path) labels_concatenated = features_utils.concatenate_labels( property_path) for classifier in self.classifiers: output_dir = join(self.meta_dataset_output, self.INTER_NAME, "train", "features", dataset, prop.get_property_alias(), model.alias, classifier.get_alias()) if exists(output_dir): print('Already generated, skipping.') continue y_pred, y_proba = self._fit_and_predict( classifier, features_concatenated, labels_concatenated, features_concatenated) results = self._evaluate_results( y_pred, labels_concatenated, names_concatenated) print('HTER: %f\nAPCER: %f\nBPCER: %f' % (results[0], results[1], results[2])) self._save_artifacts(classifier, output_dir, labels_concatenated, y_pred, y_proba, results) save_txt(join(output_dir, 'names.txt'), names_concatenated) np.save(join(output_dir, 'labels.npy'), labels_concatenated)