Exemplo n.º 1
0
def calculate_accuracy_for_matching(match_res_file, topk_acc):
    max_k = max(topk_acc)
    correct_counts = [0] * max_k
    num_total = 0
    all_gt = []
    all_pred = []
    for parts in tsv_io.tsv_reader(match_res_file):
        num_total += 1
        query_fea_idx = parts[0]
        query_bbox = json.loads(parts[1])
        pred_labels = json.loads(parts[2])
        # calculate mAP
        all_gt.append([query_fea_idx, qd_common.json_dump([query_bbox])])
        all_pred.append([query_fea_idx, qd_common.json_dump(
                [{"class": pred_labels[0][0], "conf": pred_labels[0][1]/1000.0, "rect": query_bbox["rect"]}])])
        # calculate top k accuracy
        gt_label = query_bbox["class"]
        for i in range(min(max_k, len(pred_labels))):
            cur_pred = pred_labels[i][0]
            if cur_pred == gt_label:
                correct_counts[i] += 1
                break

    map_report = match_res_file + ".eval.map"
    pred_file = match_res_file + ".pred"
    gt_file = match_res_file + ".gt"
    tsv_io.tsv_writer(all_pred, pred_file)
    tsv_io.tsv_writer(all_gt, gt_file)
    deteval(truth=gt_file, dets=pred_file, report_file=map_report)

    for i in range(1, len(correct_counts)):
        correct_counts[i] += correct_counts[i-1]

    return [c / float(num_total) for c in correct_counts]
Exemplo n.º 2
0
 def load_yaml_list(self, yaml_lst_file):
     yaml_list = []
     for parts in tsv_reader(yaml_lst_file):
         f = parts[0]
         assert (os.path.isfile(f))
         yaml_list.append(f)
     return yaml_list
Exemplo n.º 3
0
 def gen_rows():
     for parts in tsv_reader(infile):
         assert (len(parts) == 3)
         pred_labels = [it.split(':') for it in parts[2].split(';')]
         pred_labels = [[label, int(score), ""]
                        for label, score in pred_labels]
         yield parts[0], parts[1], json_dump(pred_labels)
Exemplo n.º 4
0
 def label_counts(self):
     assert not self._for_test
     if self._label_counts is None:
         self._label_counts = np.zeros(len(self.label_to_idx))
         for parts in tsv_reader(self._bbox_idx_file):
             self._label_counts[int(parts[5])] += 1
     return self._label_counts
Exemplo n.º 5
0
def _delf_feature_match(args):
    query_fea_rows, all_query_fea, all_index_fea, outfile, max_k = args

    # resume from last checkpoint
    last_cache = {}
    checkpoints = [outfile + ".tmp", outfile]
    for cache_file in checkpoints:
        if op.isfile(cache_file):
            for parts in tsv_io.tsv_reader(cache_file):
                if len(parts) == 3:
                    try:
                        json.loads(parts[1])
                        json.loads(parts[2])
                    except Exception:
                        continue
                    last_cache[int(parts[0])] = parts

    def gen_rows():
        for query_idx in query_fea_rows:
            print(query_idx)
            if query_idx in last_cache:
                yield last_cache[query_idx]
            else:
                query_fea = all_query_fea[query_idx]
                scores = []
                for i in range(len(all_index_fea)):
                    index_fea = all_index_fea[i]
                    inliers, locations_1_to_use, locations_2_to_use = matcher.get_inliers(
                            query_fea['location_np_list'],
                            query_fea['descriptor_np_list'],
                            index_fea['location_np_list'],
                            index_fea['descriptor_np_list'])
                    if inliers is not None:
                        score = sum(inliers)
                    else:
                        score = 0
                    scores.append((i, score))
                scores = sorted(scores, key=lambda t: t[1], reverse=True)
                # use top1 matching image
                pred_labels = []
                for i, (matched_fea_idx, score) in enumerate(scores):
                    if i >= max_k:
                        break
                    cur_pred = get_bbox_from_fea(all_index_fea[matched_fea_idx])["class"]
                    pred_labels.append([cur_pred, score, matched_fea_idx])

                query_bbox = get_bbox_from_fea(query_fea)
                yield str(query_idx), qd_common.json_dump(query_bbox), qd_common.json_dump(pred_labels)

    tsv_io.tsv_writer(gen_rows(), outfile)