def test_get_nearest_neighbors(self):
        inference_model = InferenceModel(trunk=self.model)

        train_vectors = [self.dataset[i][0] for i in range(len(self.dataset))]
        inference_model.train_indexer(train_vectors, self.emb_dim)

        self.assertTrue(inference_model.indexer.index.is_trained)

        indices, distances = inference_model.get_nearest_neighbors([train_vectors[0]], k=10)
        # The closest image is the query itself
        self.assertTrue(indices[0][0] == 0)
        self.assertTrue(len(indices) == 1)
        self.assertTrue(len(distances) == 1)
        self.assertTrue(len(indices[0]) == 10)
        self.assertTrue(len(distances[0]) == 10)

        self.assertTrue((indices != -1).any())
    def test_get_nearest_neighbors(self):
        test_filename = "test_inference.index"
        for indexer_input in [self.train_vectors, self.dataset]:
            for load_from_file in [False, True]:
                inference_model = InferenceModel(trunk=self.model)
                if load_from_file:
                    inference_model.load_index(test_filename)
                else:
                    inference_model.train_indexer(indexer_input)
                    inference_model.save_index(test_filename)

                self.helper_assertions(inference_model)

        os.remove(test_filename)
예제 #3
0
    parser.add_argument('--patches_to_compare_c2',
                        type=int,
                        default=5,
                        help='number of patches to compare with cam2')
    return parser.parse_args(args)


if __name__ == '__main__':
    args = parse_args()

    trunk, embedder = load_trunk_embedder(args.trunk_model,
                                          args.embedder_model)

    match_finder = MatchFinder(distance=CosineSimilarity(), threshold=args.thr)
    inference_model = InferenceModel(trunk,
                                     embedder,
                                     match_finder=match_finder,
                                     batch_size=64)

    labels = pd.read_csv(args.det_csv)
    test_data, _ = get_data_loader('test', labels, args.det_patches)
    indices_cameras = c_f.get_labels_to_indices(test_data.camera)

    id_frames_cams = {}
    for cam in ['c010', 'c011', 'c012', 'c013', 'c014', 'c015']:
        id_frames_cams[cam] = get_id_frames_cam(test_data, indices_cameras,
                                                cam)

    id_frames_all = deepcopy(id_frames_cams['c010'])

    for cam in tqdm(['c011', 'c012', 'c013', 'c014', 'c015']):
        re_id_cam = compare_cams(test_data, deepcopy(id_frames_all),
예제 #4
0
 def test_untrained_indexer(self):
     inference_model = InferenceModel(trunk=self.model)
     with self.assertRaises(RuntimeError):
         inference_model.get_nearest_neighbors(self.dataset[0][0], k=10)
예제 #5
0
def main():

    device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
    model = torch.load(model_path).to(device)
    match_finder = MatchFinder(CosineSimilarity(), threshold=0.4)
    inference_model = InferenceModel(model, match_finder=match_finder)

    faces_db = datasets.ImageFolder(database_path,
                                    transform=transform_pipeline)
    faces_test = datasets.ImageFolder(test_set_path,
                                      transform=transform_pipeline)
    name_dict = faces_db.classes

    inference_model.train_indexer(faces_db)
    labels_to_indices = c_f.get_labels_to_indices(faces_db.targets)

    confusion_matrix = np.zeros((len(name_dict) + 1, len(name_dict) + 1))

    for img, label in faces_test:
        img = img.unsqueeze_(0).to(device)

        indices, distances = inference_model.get_nearest_neighbors(
            img, k=nb_neighbors)
        distances = distances[0]
        indices = indices[0]
        filter_indices = indices[distances < distance_threshold]

        class_name = faces_test.classes[label]
        if class_name == 'unknown':
            true_label = len(name_dict)
        else:
            true_label = faces_db.class_to_idx[class_name]
        if len(filter_indices):
            class_neighbors = indices_to_class(filter_indices,
                                               labels_to_indices)
            prediction_index = max(class_neighbors, key=class_neighbors.count)
            confusion_matrix[true_label, prediction_index] += 1
        else:
            confusion_matrix[true_label, len(name_dict)] += 1

    plt.figure(figsize=(15, 10))

    # Total accuracy
    confusion_matrix_n = confusion_matrix.astype(
        'float') / confusion_matrix.sum(axis=1)[:, np.newaxis]
    confusion_matrix_n[np.isnan(confusion_matrix_n)] = 0

    positive_accuracy = confusion_matrix_n.diagonal()[:-1].sum() / (
        len(faces_test.class_to_idx) - 1)
    print(f"Positive accuracy {positive_accuracy}%")

    negative_accuracy = confusion_matrix_n.diagonal()[-1]
    print(f"Negative accuracy {negative_accuracy}%")

    class_names = [name_dict[i]
                   for i in range(0, len(name_dict))] + ['unknown']
    df_cm = pd.DataFrame(confusion_matrix_n,
                         index=class_names,
                         columns=class_names)
    heatmap = sns.heatmap(df_cm,
                          annot=True,
                          annot_kws={"size": 10},
                          fmt='.2f',
                          cbar_kws={
                              'format': '%.0f%%',
                              'ticks': [0, 100]
                          })

    heatmap.yaxis.set_ticklabels(heatmap.yaxis.get_ticklabels(),
                                 rotation=0,
                                 ha='right',
                                 fontsize=15)
    heatmap.xaxis.set_ticklabels(heatmap.xaxis.get_ticklabels(),
                                 rotation=45,
                                 ha='right',
                                 fontsize=15)
    plt.ylabel('True label', fontsize=15)
    plt.xlabel('Predicted label', fontsize=15)
    plt.title(f"Confusion matrix  Triplet-Loss, Positive - Negative accuracy \
        ({round(positive_accuracy, 2)}%, {round(negative_accuracy, 2)}%) ",
              fontsize=20)
    plt.savefig("confusion_matrix.png")
예제 #6
0
def get_inference_model(trunk, embedder):
    match_finder = MatchFinder(mode="sim", threshold=0.7)
    inference_model = InferenceModel(trunk,
                                     embedder=embedder,
                                     match_finder=match_finder)
    return inference_model
 def test_add_to_indexer(self):
     inference_model = InferenceModel(trunk=self.model)
     inference_model.indexer.index = faiss.IndexFlatL2(512)
     inference_model.add_to_indexer(self.dataset)
     self.helper_assertions(inference_model)