def _load_features_and_targets(self, base_path):
        X_train = np.load(
            os.path.join(base_path, (NAME_FEATURES % self.train_alias)))
        X_test = np.load(
            os.path.join(base_path, (NAME_FEATURES % self.test_alias)))

        y_train = np.load(
            os.path.join(base_path, (NAME_TARGETS % self.train_alias)))
        y_test = np.load(
            os.path.join(base_path, (NAME_TARGETS % self.test_alias)))

        names_test = load_txt(
            os.path.join(base_path, (NAME_SAMPLES % self.test_alias)))
        return X_train, y_train, X_test, y_test, names_test
Exemplo n.º 2
0
    def _test_inter_probas_classifier(self):
        features_path = os.path.join(self.meta_dataset_output, self.INTER_NAME, "test", "features")
        # [CBSR, RA, NUAA]
        for dataset_origin in os.listdir(features_path):
            for dataset_target in os.listdir(features_path):

                if dataset_target == dataset_origin:
                    print('Origin and target are the same, skipping.')
                    continue

                # path_dataset = os.path.join(self.features_root_path, dataset, self.target_all)

                for classifier in self.classifiers:

                    # [ResNet, VGG...]
                    for model in self.models:
                        base_path = join(self.meta_dataset_output, self.INTER_NAME, "test", "features", dataset_origin,
                                         dataset_target)
                        probas_original = self.__load_test_probas(dataset_origin, dataset_target, "original", model,
                                                                  classifier)
                        probas_depth = self.__load_test_probas(dataset_origin, dataset_target, "depth", model,
                                                               classifier)
                        probas_illumination = self.__load_test_probas(dataset_origin, dataset_target, "illumination",
                                                                      model, classifier)
                        probas_saliency = self.__load_test_probas(dataset_origin, dataset_target, "saliency", model,
                                                                  classifier)

                        stacked_probas = np.stack((probas_depth, probas_illumination, probas_saliency, probas_original),
                                                  axis=2)

                        labels = self.__load_test_labels(dataset_origin, dataset_target, "original", model, classifier)
                        names = load_txt(
                            join(base_path, "original", model.alias, classifier.get_alias(), 'names.txt'))

                        classifier_path = join(self.meta_dataset_output, self.INTER_NAME, "train", "probas",
                                               dataset_origin, model.alias, classifier.get_alias(), 'model.sav')

                        with open(classifier_path, 'rb') as f:
                            model_fitted = pickle.load(f)

                        stacked_probas = np.reshape(stacked_probas, (stacked_probas.shape[0], -1))

                        y_pred, y_pred_proba = self._predict(model_fitted, stacked_probas)

                        results = self._evaluate_results(y_pred, labels, names)
                        print('HTER: %f\nAPCER: %f\nBPCER: %f' % (results[0], results[1], results[2]))
    def _test_intra_probas_classifier(self):
        features_path = os.path.join(self.meta_dataset_output, self.INTRA_NAME,
                                     "test", "features")
        # [CBSR, RA, NUAA]
        for dataset_origin in os.listdir(features_path):

            # path_dataset = os.path.join(self.features_root_path, dataset, self.target_all)

            for classifier in self.classifiers:

                # [ResNet, VGG...]
                for model in self.models:
                    base_path = join(self.meta_dataset_output, self.INTRA_NAME,
                                     "test", "features", dataset_origin)
                    probas_original = self.__load_test_probas_intra(
                        dataset_origin, "original", model, classifier)
                    probas_depth = self.__load_test_probas_intra(
                        dataset_origin, "depth", model, classifier)
                    probas_illumination = self.__load_test_probas_intra(
                        dataset_origin, "illumination", model, classifier)
                    probas_saliency = self.__load_test_probas_intra(
                        dataset_origin, "saliency", model, classifier)

                    stacked_probas = np.stack(
                        (probas_depth, probas_illumination, probas_saliency,
                         probas_original),
                        axis=2)

                    labels = self.__load_test_labels_intra(
                        dataset_origin, "original", model, classifier)
                    names = load_txt(
                        join(base_path, "original", model.alias,
                             classifier.get_alias(), 'names.txt'))

                    classifier_path = join(self.meta_dataset_output,
                                           self.INTRA_NAME, "train", "probas",
                                           dataset_origin, model.alias,
                                           classifier.get_alias(), 'model.sav')

                    with open(classifier_path, 'rb') as f:
                        model_fitted = pickle.load(f)

                    stacked_probas = np.reshape(stacked_probas,
                                                (stacked_probas.shape[0], -1))

                    y_pred, y_pred_proba = self._predict(
                        model_fitted, stacked_probas)

                    print(
                        '\n\nDataset %s classifier %s model %s ' %
                        (dataset_origin, classifier.get_alias(), model.alias))
                    results = self._evaluate_results(y_pred, labels, names)
                    print('HTER: %f\nAPCER: %f\nBPCER: %f\nACC: %f' %
                          (results[0], results[1], results[2], results[3]))

                    output_dir = join(self.meta_dataset_output,
                                      self.INTRA_NAME, "test", "probas",
                                      dataset_origin, model.alias,
                                      classifier.get_alias())

                    self._save_artifacts(classifier, output_dir, labels,
                                         y_pred, y_pred_proba, results)
                    #
                    save_txt(join(output_dir, 'names.txt'), names)
                    np.save(join(output_dir, 'labels.npy'), labels)

                print('stacked done!')