Exemplo n.º 1
0
def test_repr():
    """ Check that a Matching is represented by a normal dictionary. """

    matching = Matching()
    assert repr(matching) == "{}"

    matching = Matching(dictionary)
    assert repr(matching) == str(dictionary)
Exemplo n.º 2
0
def test_keys():
    """ Check a Matching can have its `keys` accessed. """

    matching = Matching()
    assert list(matching.keys()) == []

    matching = Matching(dictionary)
    assert list(matching.keys()) == suitors
Exemplo n.º 3
0
def test_values():
    """ Check a Matching can have its `values` accessed. """

    matching = Matching()
    assert list(matching.values()) == []

    matching = Matching(dictionary)
    assert list(matching.values()) == reviewers
Exemplo n.º 4
0
def test_init():
    """Make an instance of the Matching class and check their attributes are
    correct."""

    matching = Matching()
    assert matching == {}

    matching = Matching(dictionary)
    assert matching == dictionary
    def left_stitch(self, a, b, flag=None):
        if flag == 1:
            m = Matching('./lib/lib1.jpg', './lib/lib2.jpg')
            m.ransac(100)
            H_inv = m.H

        H = self.matcher.match(a, b, 'left')
        H_inv = np.linalg.inv(H)

        # find top left point for offset calculation.
        tl = np.dot(H_inv, np.array([0, 0, 1]))
        tl = tl / tl[-1]
        H_inv[0][-1] += abs(tl[0])
        H_inv[1][-1] += abs(tl[1])

        # find down right for size calculation.
        w, h = a.shape[1], a.shape[0]
        dr = np.dot(H_inv, np.array([w, h, 1]))
        dsize = (int(dr[0]) + abs(int(tl[0])), int(dr[1]) + abs(int(tl[1])))

        # warp a into b's view and put them together.
        merge = cv2.warpPerspective(a, H_inv, dsize)
        fig = plt.figure()
        ax1 = fig.add_subplot(1, 2, 1)
        plt.imshow(merge)
        ax2 = fig.add_subplot(1, 2, 2)
        merge[abs(int(tl[1])):b.shape[0] + abs(int(tl[1])),
              abs(int(tl[0])):b.shape[1] + abs(int(tl[0])), :] = b
        plt.imshow(merge)
        plt.show()
        offset = (abs(int(tl[0])), abs(int(tl[1])))
        return merge, offset
Exemplo n.º 6
0
    def __init__(self, batch_size, encoder):
        super(Model, self).__init__()
        self.encoder = encoder
        n_classes = 3   # entailment, contradiction, neutral
        n_inputs = 4 * encoder.get_dim()
        words_dim = 0

        # classifier: Multi-Layer Perceptron with 1 hidden layer of 512 hidden units
        # dnn_hidden_units = [512]
        dnn_hidden_units = 512
        classifier = nn.Sequential(*[
            nn.Linear(n_inputs, dnn_hidden_units),
            nn.ReLU(),
            nn.Linear(dnn_hidden_units, n_classes),
        ])

        self.matching = Matching()
        self.classifier = classifier
        self.softmax = torch.nn.Softmax(dim=words_dim)

        self.net = nn.Sequential(*[
            # Matching(),
            self.classifier,
            self.softmax,
        ])
Exemplo n.º 7
0
    def solve(self, optimal="suitor"):
        """ Solve the instance of SM using either the suitor- or
        reviewer-oriented Gale-Shapley algorithm. Return the matching. """

        self.matching = Matching(
            stable_marriage(self.suitors, self.reviewers, optimal))
        return self.matching
Exemplo n.º 8
0
    def solve(self, optimal="student"):
        """Solve the instance of SA using either the student- or
        supervisor-optimal algorithm."""

        self.matching = Matching(
            student_allocation(self.students, self.projects, self.supervisors,
                               optimal))
        return self.matching
Exemplo n.º 9
0
def test_setitem_key_error():
    """Check that a ValueError is raised if trying to add a new item to a
    Matching."""

    matching = Matching(dictionary)

    with pytest.raises(ValueError):
        matching["foo"] = "bar"
Exemplo n.º 10
0
    def solve(self, optimal="resident"):
        """ Solve the instance of HR using either the resident- or
        hospital-oriented algorithm. Return the matching. """

        self._matching = Matching(
            hospital_resident(self.residents, self.hospitals, optimal)
        )
        return self.matching
Exemplo n.º 11
0
def test_is_stable():
    men_ranks, women_ranks = init_preferences()

    matches = Matching(men_ranks, women_ranks)
    assert (matches.is_stable() is False)

    matches.match_pair('abe', 'ada')
    matches.match_pair('ben', 'bea')
    matches.match_pair('che', 'cee')
    assert (matches.is_stable() is True)

    matches = Matching(men_ranks, women_ranks)
    matches.match_pair('abe', 'bea')
    matches.match_pair('ben', 'ada')
    matches.match_pair('che', 'cee')
    assert (matches.is_stable() is False)

    print("is_stable: OK!")
Exemplo n.º 12
0
def test_setitem_none():
    """ Check can set item in Matching to be None. """

    matching = Matching(dictionary)
    suitor = suitors[0]

    matching[suitor] = None
    assert matching[suitor] is None
    assert suitor.matching is None
Exemplo n.º 13
0
def matching():
    if request.method == 'POST':
        content = request.get_json(force=True)
        payload = dict()
        payload["user_ids"] = content.get('user_ids')
        payload["desc"] = content.get('descriptions')
        matcher = Matching(payload)
        matches = matcher.get_matches()
        return jsonify(matches)
Exemplo n.º 14
0
def test_setitem_val_error():
    """Check that a ValueError is raised if trying to set an item with some
    illegal new matching."""

    matching = Matching(dictionary)
    suitor = suitors[0]
    new_match = [1, 2, 3]

    with pytest.raises(ValueError):
        matching[suitor] = new_match
Exemplo n.º 15
0
def test_setitem_multiple():
    """ Check can set item in Matching to be a group of Player instances. """

    matching = Matching(dictionary)
    suitor = suitors[0]
    new_match = reviewers[:-1]

    matching[suitor] = new_match
    assert set(matching[suitor]) == set(new_match)
    for rev in new_match:
        assert rev.matching == suitor
Exemplo n.º 16
0
 def initObj(self):
     # 设置大小,用于背景图片的缩放
     self.gameNet = GameNet(self)
     self.gameNet.connectToServer()
     self.menu = Menu((self.width, self.height), self)
     self.rules = Rules(self.width, self.height)
     self.setLevel = SetLevel(self.width, self.height)
     self.matching = Matching((self.width, self.height), self)
     self.game = Game(self)
     # finish 界面的大小和游戏界面一样
     self.finish = Finish((self.game.width, self.game.height))
Exemplo n.º 17
0
def test_setitem_single():
    """Check that a key in Matching can have its value changed to another
    Player instance."""

    matching = Matching(dictionary)
    suitor, reviewer = suitors[0], reviewers[-1]

    matching[suitor] = reviewer
    assert matching[suitor] == reviewer
    assert suitor.matching == reviewer
    assert reviewer.matching == suitor
Exemplo n.º 18
0
 def process_data(self,address,begin_time,end_time):
     ap_m = self.conn.select_data(address,begin_time,end_time)
     # 底图处理
     if end_time == global_begin_time: 
         #***与上一次底图进行比较,更新ap_mac(包含其中的无效化值)
     # 历史数据匹配
     else:
         #**历史数据匹配
         #
         match = Matching("信号类型0或1")
         # 匹配历史数据的时间范围
         match.history_division("开始时间", "结束时间")
         # 匹配的结果
         aps,result = match.match.history_matching(begin_time,end_time)
         #**两部分数据整合:ap_m和result
     return ap_m
Exemplo n.º 19
0
    def repair(self,
               P,
               Q,
               inter,
               ins=None,
               args=None,
               entryfnc=None,
               ignoreio=False,
               ignoreret=False):

        self.starttime = time.time()

        self.vignore = set()
        if ignoreio:
            self.vignore |= set([VAR_IN, VAR_OUT])

        # (1) Check struct match
        M = Matching(verbose=self.verbose)
        self.sm = M.match_struct(P, Q)
        if self.sm is None:
            raise StructMismatch('')

        # (2) Obtain trace of P
        self.trace = self.gettrace(P, inter, ins, args, entryfnc)

        # (3) Repair each fnc sepearately
        self.inter = inter()
        results = {}
        for fnc1 in P.getfncs():
            fnc2 = Q.getfnc(fnc1.name)
            results[fnc1.name] = (self.repair_fnc(fnc1, fnc2) +
                                  (self.sm[fnc1.name], ))

        self.debug('total time: %.3f', round(time.time() - self.starttime, 3))

        return results
Exemplo n.º 20
0
 def __init__(self, n, criteria):
     """ Initialize the condition with the value of n """
     self.n = n
     self.playedFilter = ComparisonFilter(PLAYED, criteria)
     self.eventCondition = Matching(EVENT, criteria)
Exemplo n.º 21
0
def score(path_predictions, path_groundtruth, path_output, iou_threshold=.5):
    """
    Compute metrics on a number of prediction files, given a folder of prediction files
    and a ground truth.  Primary metric is mean average precision (mAP).

    Args:
        path_predictions: a folder path of prediction files.  
          Prediction files should have filename format 'XYZ.tif.txt',
          where 'XYZ.tif' is the xView TIFF file being predicted on.  
          Prediction files should be in space-delimited csv format, with each
          line like (xmin ymin xmax ymax class_prediction score_prediction)

        path_groundtruth: a file path to a single ground truth geojson

        path_output: a folder path for output scoring files

        iou_threshold: a float between 0 and 1 indicating the percentage
          iou required to count a prediction as a true positive

    Outputs:
      Writes two files to the 'path_output' parameter folder: 'score.txt' and 'metrics.txt'
      'score.txt' contains a single floating point value output: mAP
      'metrics.txt' contains the remaining metrics in per-line format (metric/class_num: score_float)

    Raises:
      ValueError: if there are files in the prediction folder that are not in the ground truth geojson.
        EG a prediction file is titled '15.tif.txt', but the file '15.tif' is not in the ground truth.

  """
    assert (iou_threshold < 1 and iou_threshold > 0)

    ttime = time.time()
    boxes_dict = {}
    pchips = []
    stclasses = []
    num_preds = 0

    for file in tqdm(os.listdir(path_predictions)):
        fname = file.split(".txt")[0]
        pchips.append(fname)

        with open(path_predictions + file, 'r') as f:
            arr = np.array(list(csv.reader(f, delimiter=" ")))
            arr = arr[:, :6].astype(np.float64)
            threshold = 0
            arr = arr[arr[:, 5] > threshold]
            stclasses += list(arr[:, 4])
            num_preds += arr.shape[0]
            if np.any(arr[:, :4] < 0):
                raise ValueError('Bounding boxes cannot be negative.')
            boxes_dict[fname] = arr[:, :6]

    pchips = sorted(pchips)
    stclasses = np.unique(stclasses).astype(np.int64)

    gt_coords, gt_chips, gt_classes = get_labels(path_groundtruth)

    gt_unique = np.unique(gt_classes.astype(np.int64))
    max_gt_cls = 100

    if set(pchips).issubset(set(gt_unique)):
        raise ValueError(
            'The prediction files {%s} are not in the ground truth.' %
            str(set(pchips) - (set(gt_unique))))

    print("Number of Predictions: %d" % num_preds)
    print("Number of GT: %d" % np.sum(gt_classes.shape))

    per_file_class_data = {}
    for i in gt_unique:
        per_file_class_data[i] = [[], []]

    num_gt_per_cls = np.zeros((max_gt_cls))

    for file_ind in range(len(pchips)):
        print(pchips[file_ind])
        det_box = boxes_dict[pchips[file_ind]][:, :4]
        det_scores = boxes_dict[pchips[file_ind]][:, 5]
        det_cls = boxes_dict[pchips[file_ind]][:, 4]

        gt_box = gt_coords[(gt_chips == pchips[file_ind]).flatten()]
        gt_cls = gt_classes[(gt_chips == pchips[file_ind])]

        for i in gt_unique:
            gt_box_i_cls = gt_box[gt_cls == i].flatten().tolist()
            det_box_i_cls = det_box[det_cls == i].flatten().tolist()

            gt_rects = convert_to_rectangle_list(gt_box_i_cls)
            rects = convert_to_rectangle_list(det_box_i_cls)

            matching = Matching(gt_rects, rects)
            rects_matched, gt_matched = matching.greedy_match(iou_threshold)

            #we aggregate confidence scores, rectangles, and num_gt across classes
            per_file_class_data[i][0] += det_scores[det_cls == i].tolist()
            per_file_class_data[i][1] += rects_matched
            num_gt_per_cls[i] += len(gt_matched)

    average_precision_per_class = np.ones(max_gt_cls) * float('nan')
    per_class_p = np.ones(max_gt_cls) * float('nan')
    per_class_r = np.ones(max_gt_cls) * float('nan')

    for i in gt_unique:
        scores = np.array(per_file_class_data[i][0])
        rects_matched = np.array(per_file_class_data[i][1])

        if num_gt_per_cls[i] != 0:
            sorted_indices = np.argsort(scores)[::-1]
            tp_sum = np.cumsum(rects_matched[sorted_indices])
            fp_sum = np.cumsum(np.logical_not(rects_matched[sorted_indices]))
            precision = tp_sum / (tp_sum + fp_sum + np.spacing(1))
            recall = tp_sum / num_gt_per_cls[i]
            per_class_p[i] = np.sum(rects_matched) / len(rects_matched)
            per_class_r[i] = np.sum(rects_matched) / num_gt_per_cls[i]
            ap = ap_from_pr(precision, recall)
        else:
            ap = float('nan')
        average_precision_per_class[i] = ap

    #metric splits
    metric_keys = [
        'map', 'map/small', 'map/medium', 'map/large', 'map/common', 'map/rare'
    ]

    splits = {
        'map/small': [
            17, 18, 19, 20, 21, 23, 24, 26, 27, 28, 32, 41, 60, 62, 63, 64, 65,
            66, 91
        ],
        'map/medium': [
            11, 12, 15, 25, 29, 33, 34, 35, 36, 37, 38, 42, 44, 47, 50, 53, 56,
            59, 61, 71, 72, 73, 76, 84, 86, 93, 94
        ],
        'map/large': [13, 40, 45, 49, 51, 52, 54, 55, 57, 74, 77, 79, 83, 89],
        'map/common': [
            13, 17, 18, 19, 20, 21, 23, 24, 25, 26, 27, 28, 34, 35, 41, 47, 60,
            63, 64, 71, 72, 73, 76, 77, 79, 83, 86, 89, 91
        ],
        'map/rare': [
            11, 12, 15, 29, 32, 33, 36, 37, 38, 40, 42, 44, 45, 49, 50, 51, 52,
            53, 54, 55, 56, 57, 59, 61, 62, 65, 66, 74, 84, 93, 94
        ]
    }

    vals = {}
    vals['map'] = np.nanmean(average_precision_per_class)
    vals['map_score'] = np.nanmean(per_class_p)
    vals['mar_score'] = np.nanmean(per_class_r)

    for i in splits.keys():
        vals[i] = np.nanmean(average_precision_per_class[splits[i]])

    for i in gt_unique:
        vals[int(i)] = average_precision_per_class[int(i)]

    vals['f1'] = 2 / ((1 / (np.spacing(1) + vals['map_score'])) +
                      (1 / (np.spacing(1) + vals['mar_score'])))

    print("mAP: %f | mAP score: %f | mAR: %f | F1: %f" %
          (vals['map'], vals['map_score'], vals['mar_score'], vals['f1']))

    with open(path_output + '/score.txt', 'w') as f:
        f.write(str("%.8f" % vals['map']))

    with open(path_output + '/metrics.txt', 'w') as f:
        for key in vals.keys():
            f.write("%s %f\n" % (str(key), vals[key]))

    print("Final time: %s" % str(time.time() - ttime))
Exemplo n.º 22
0
    for gen in genParticleHandle.product():
        if (abs(gen.pdgId()) == 443 and gen.numberOfDaughters() == 2
                and (abs(gen.daughter(0).pdgId()) == 13
                     or abs(gen.daughter(0).pdgId()) == 1)):
            jpsi_idx = jpsi_idx + 1
            genJpsis.append(gen)
            print "genJpsi pt : %f , eta %f, phi %f" % (gen.pt(), gen.eta(),
                                                        gen.phi())
    if jpsi_idx == 0: continue
    V1 = V1Handle.product()
    V2 = V2Handle.product()
    V3 = V3Handle.product()
    V40 = V40Handle.product()
    V41 = V41Handle.product()
    V42 = V42Handle.product()
    mV1 = Matching(genJpsis, V1)
    mV2 = Matching(genJpsis, V2)
    mV3 = Matching(genJpsis, V3)
    mV40 = Matching(genJpsis, V40)
    mV41 = Matching(genJpsis, V41)
    mV42 = Matching(genJpsis, V42)

    #print "genJpsi : %d // Size of J/psi v1  : %d  // j/psi v2 : %d  // j/psi v3 :  %d  // j/psi v4 :  %d"%(jpsi_idx, len(V1), len(V2),len(V3), len(V4))
    if (len(mV1) > 0): profile1.Fill(1, 1)
    else: profile1.Fill(1, 0)
    if (len(mV2) > 0): profile1.Fill(2, 1)
    else: profile1.Fill(2, 0)
    if (len(mV3) > 0): profile1.Fill(3, 1)
    else: profile1.Fill(3, 0)
    if (len(mV40) > 0): profile1.Fill(4, 1)
    else: profile1.Fill(4, 0)
Exemplo n.º 23
0
 def solve(self):
     self._matching = Matching(
         mentor_mentee(self.mentees, self.mentors, "mentor"))
     return self.matching
Exemplo n.º 24
0
def score(path_predictions, path_groundtruth, path_output, iou_threshold=.4):

    assert (iou_threshold < 1 and iou_threshold > 0)

    ttime = time.time()
    boxes_dict = {}
    pchips = []
    stclasses = []
    num_preds = 0

    for file in tqdm(os.listdir(path_predictions)):
        fname = file.split(".txt")[0]
        pchips.append(fname)

        with open(path_predictions + file, 'r') as f:
            arr = np.array(list(csv.reader(f, delimiter=" ")))
            if arr.shape[0] == 0:
                #If the file is empty, we fill it in with an array of zeros
                boxes_dict[fname] = np.array([[0, 0, 0, 0, 0, 0]])
                num_preds += 1
            else:
                arr = arr[:, :6].astype(np.float64)
                threshold = iou_threshold
                arr = arr[arr[:, 5] > threshold]
                stclasses += list(arr[:, 4])
                num_preds += arr.shape[0]

                if np.any(arr[:, :4] < 0):
                    raise ValueError('Bounding boxes cannot be negative.')

                if np.any(arr[:, 5] < 0) or np.any(arr[:, 5] > 1):
                    raise ValueError(
                        'Confidence scores should be between 0 and 1.')

                boxes_dict[fname] = arr[:, :6]

    pchips = sorted(pchips)
    stclasses = np.unique(stclasses).astype(np.int64)

    gt_coords, gt_chips, gt_classes = get_labels(path_groundtruth)
    gt_coords = gt_coords[gt_chips == '5.tif']
    gt_classes = gt_classes[gt_chips == '5.tif'].astype(np.int64)
    gt_chips = gt_chips[gt_chips == '5.tif']

    gt_unique = np.unique(gt_classes.astype(np.int64))
    print(gt_unique)
    max_gt_cls = 100

    if set(pchips).issubset(set(gt_unique)):
        raise ValueError(
            'The prediction files {%s} are not in the ground truth.' %
            str(set(pchips) - (set(gt_unique))))

    print("Number of Predictions: %d" % num_preds)
    print("Number of GT: %d" % np.sum(gt_classes.shape))

    per_file_class_data = {}
    for i in gt_unique:
        per_file_class_data[i] = [[], []]

    num_gt_per_cls = np.zeros((max_gt_cls))

    for file_ind in range(len(pchips)):
        print(pchips[file_ind])
        det_box = boxes_dict[pchips[file_ind]][:, :4]
        det_scores = boxes_dict[pchips[file_ind]][:, 5]
        det_cls = boxes_dict[pchips[file_ind]][:, 4]

        gt_box = gt_coords[(gt_chips == pchips[file_ind]).flatten()]

        gt_cls = gt_classes[(gt_chips == pchips[file_ind])]

        for i in gt_unique:
            s = det_scores[det_cls == i]
            ssort = np.argsort(s)[::-1]
            per_file_class_data[i][0] += s[ssort].tolist()
            gt_box_i_cls = gt_box[gt_cls == i].flatten().tolist()
            det_box_i_cls = det_box[det_cls == i]
            det_box_i_cls = det_box_i_cls[ssort].flatten().tolist()

            gt_rects = convert_to_rectangle_list(gt_box_i_cls)
            rects = convert_to_rectangle_list(det_box_i_cls)

            matching = Matching(gt_rects, rects)

            rects_matched, gt_matched = matching.greedy_match(iou_threshold)

            #we aggregate confidence scores, rectangles, and num_gt across classes
            #per_file_class_data[i][0] += det_scores[det_cls == i].tolist()
            per_file_class_data[i][1] += rects_matched
            num_gt_per_cls[i] += len(gt_matched)

    average_precision_per_class = np.ones(max_gt_cls) * float('nan')
    per_class_p = np.ones(max_gt_cls) * float('nan')
    per_class_r = np.ones(max_gt_cls) * float('nan')

    for i in gt_unique:
        scores = np.array(per_file_class_data[i][0])
        rects_matched = np.array(per_file_class_data[i][1])

        if num_gt_per_cls[i] != 0:
            sorted_indices = np.argsort(scores)[::-1]
            tp_sum = np.cumsum(rects_matched[sorted_indices])
            fp_sum = np.cumsum(np.logical_not(rects_matched[sorted_indices]))
            precision = tp_sum / (tp_sum + fp_sum + np.spacing(1))
            recall = tp_sum / num_gt_per_cls[i]
            per_class_p[i] = np.sum(rects_matched) / len(rects_matched)
            per_class_r[i] = np.sum(rects_matched) / num_gt_per_cls[i]
            ap = ap_from_pr(precision, recall)
        else:
            ap = float('nan')
        average_precision_per_class[i] = ap

#metric splits
    metric_keys = [
        'map', 'map/small', 'map/medium', 'map/large', 'map/common', 'map/rare'
    ]

    splits = {
        'map/small': [
            17, 18, 19, 20, 21, 23, 24, 26, 27, 28, 32, 41, 60, 62, 63, 64, 65,
            66, 91
        ],
        'map/medium': [
            11, 12, 15, 25, 29, 33, 34, 35, 36, 37, 38, 42, 44, 47, 50, 53, 56,
            59, 61, 71, 72, 73, 76, 84, 86, 93, 94
        ],
        'map/large': [13, 40, 45, 49, 51, 52, 54, 55, 57, 74, 77, 79, 83, 89],
        'map/common': [
            13, 17, 18, 19, 20, 21, 23, 24, 25, 26, 27, 28, 34, 35, 41, 47, 60,
            63, 64, 71, 72, 73, 76, 77, 79, 83, 86, 89, 91
        ],
        'map/rare': [
            11, 12, 15, 29, 32, 33, 36, 37, 38, 40, 42, 44, 45, 49, 50, 51, 52,
            53, 54, 55, 56, 57, 59, 61, 62, 65, 66, 74, 84, 93, 94
        ]
    }

    vals = {}
    vals['map'] = np.nanmean(average_precision_per_class)
    vals['map_score'] = np.nanmean(per_class_p)
    vals['mar_score'] = np.nanmean(per_class_r)

    for i in splits.keys():
        vals[i] = np.nanmean(average_precision_per_class[splits[i]])

    for i in gt_unique:
        vals[int(i)] = average_precision_per_class[int(i)]

    vals['f1'] = 2 / ((1 / (np.spacing(1) + vals['map_score'])) +
                      (1 / (np.spacing(1) + vals['mar_score'])))

    print("mAP: %f | mAP score: %f | mAR: %f | F1: %f" %
          (vals['map'], vals['map_score'], vals['mar_score'], vals['f1']))

    with open(path_output + '/score.txt', 'w') as f:
        f.write(str("%.4f" % vals['map']))

    result = []
    with open(path_output + '/metrics.txt', 'w') as f:
        for key in vals.keys():
            f.write("%s %.4f\n" % (str(key), vals[key]))
            result.append(
                str(key) + " " + str(round(float(vals[key]), 4)) + "\n")
    result = sorted(result)
    print("Final time: %s" % str(time.time() - ttime))
    return result
Exemplo n.º 25
0
def test_getitem():
    """ Check that you can access items in a Matching correctly. """

    matching = Matching(dictionary)
    for key, val in matching.items():
        assert matching[key] == val
Exemplo n.º 26
0
    def solve(self):
        """Solve the instance of SR using Irving's algorithm. Return the
        matching."""

        self.matching = Matching(stable_roommates(self.players))
        return self.matching
Exemplo n.º 27
0
    def __init__(self, men, women):
        self.matching = Matching(men, women)

        pass
def compute_average_precision_recall(groundtruth_coordinates, coordinates,
                                     iou_threshold):
    """Computes the average precision (AP) and average recall (AR).

  Args:
      groundtruth_info_dict: the groundtruth_info_dict holds all the groundtruth
        information for an evaluation dataset. The format of this groundtruth_info_dict is
        as follows:
        {'image_id_0':
         [xmin_0,ymin_0,xmax_0,ymax_0,...,xmin_N0,ymin_N0,xmax_N0,ymax_N0],
         ...,
         'image_id_M':
         [xmin_0,ymin_0,xmax_0,ymax_0,...,xmin_NM,ymin_NM,xmax_NM,ymax_NM]},
        where
          image_id_* is an image_id that has the groundtruth rectangles labeled.
          xmin_*,ymin_*,xmax_*,ymax_* is the top-left and bottom-right corners
            of one groundtruth rectangle.

      test_info_dict: the test_info_dict holds all the test information for an
        evaluation dataset.
         The format of this test_info_dict is the same
        as the above groundtruth_info_dict.

      iou_threshold_range: the IOU threshold range to compute the average
        precision (AP) and average recall (AR). For example:
        iou_threshold_range = [0.50:0.05:0.95]
  Returns:
      average_precision, average_recall, as well as the precision_recall_dict,
      where precision_recall_dict holds the full precision/recall information
      for each of the iou_threshold in the iou_threshold_range.
  Raises:
      ValueError: if the input groundtruth_info_dict and test_info_dict show
      inconsistent information.
  """

    # Start to build up the Matching instances for each of the image_id_*, which
    # is to hold the IOU computation between the rectangle pairs for the same
    # image_id_*.
    matchings = {}
    if (len(groundtruth_coordinates) % 4 != 0) or (len(coordinates) % 4 != 0):
        raise ValueError(
            'groundtruth_info_dict and test_info_dict should hold '
            'only 4 * N numbers.')

    groundtruth_rects = convert_to_rectangle_list(groundtruth_coordinates)
    rects = convert_to_rectangle_list(coordinates)
    matching = Matching(groundtruth_rects, rects)

    image_statistics_list = []
    groundtruth_rects_matched, rects_matched = (
        matching.matching_by_greedy_assignment(iou_threshold))

    image_statistics = compute_statistics_given_rectangle_matches(
        groundtruth_rects_matched, rects_matched)
    image_statistics_list.append(image_statistics)

    # Compute the precision and recall under this iou_threshold.
    precision_recall = compute_precision_recall_given_image_statistics_list(
        iou_threshold, image_statistics_list)

    # Compute the average_precision and average_recall.
    #average_precision, average_recall = (
    #    compute_average_precision_recall_given_precision_recall_dict(
    #        precision_recall_dict))

    return precision_recall
def score(path_predictions, path_groundtruth, path_output, iou_threshold=.5):
    """
    Compute metrics on a number of prediction files, given a folder of prediction files
    and a ground truth.  Primary metric is mean average precision (mAP).

    Args:
        path_predictions: a folder path of prediction files.  
          Prediction files should have filename format 'XYZ.tif.txt',
          where 'XYZ.tif' is the xView TIFF file being predicted on.  
          Prediction files should be in space-delimited csv format, with each
          line like (xmin ymin xmax ymax class_prediction score_prediction)

        path_groundtruth: a file path to a single ground truth geojson

        path_output: a folder path for output scoring files

        iou_threshold: a float between 0 and 1 indicating the percentage
          iou required to count a prediction as a true positive

    Outputs:
      Writes two files to the 'path_output' parameter folder: 'score.txt' and 'metrics.txt'
      'score.txt' contains a single floating point value output: mAP
      'metrics.txt' contains the remaining metrics in per-line format (metric/class_num: score_float)

    Raises:
      ValueError: if there are files in the prediction folder that are not in the ground truth geojson.
        EG a prediction file is titled '15.tif.txt', but the file '15.tif' is not in the ground truth.

  """
    assert (iou_threshold < 1 and iou_threshold > 0)

    ttime = time.time()
    boxes_dict = {}
    pchips = []
    stclasses = []
    num_preds = 0

    # pchips: prediction txt
    for file in tqdm(os.listdir(path_predictions)):
        fname = file.split(".txt")[0]
        pchips.append(fname)
        # debug
        with open(path_predictions + file, 'r') as f:

            #arr = np.array(list(csv.reader(f,delimiter=" ")))

            # maybe not needed
            predict_list = list(csv.reader(f, delimiter=" "))
            new_list = remove_invalid_predictions(predict_list)
            arr = np.array(new_list)
            if arr.shape[0] == 0:
                #If the file is empty, we fill it in with an array of zeros
                boxes_dict[fname] = np.array([[0, 0, 0, 0, 0, 0]])
                num_preds += 1
            else:
                arr = arr[:, :6].astype(np.float64)
                # TODO: may adjust the threshold of scores that to be counted as valid predictions
                # default = 0
                # There should be a nms mode
                threshold = 0.4
                arr = arr[arr[:, 5] > threshold]
                stclasses += list(arr[:, 4])
                num_preds += arr.shape[0]

                if np.any(arr[:, :4] < 0):
                    raise ValueError('Bounding boxes cannot be negative.')

                if np.any(arr[:, 5] < 0) or np.any(arr[:, 5] > 1):
                    raise ValueError(
                        'Confidence scores should be between 0 and 1.')

                boxes_dict[fname] = arr[:, :6]

    pchips = sorted(pchips)
    stclasses = np.unique(stclasses).astype(np.int64)

    # debug
    #gt_coords, gt_chips, gt_classes = get_labels(path_groundtruth)
    gt_coords, gt_chips, gt_classes, _ = get_labels_w_uid_nondamaged(
        path_groundtruth)

    # TODO: add removing bboxes over clouds manually or / test images should not contain any black chips

    gt_unique = np.unique(gt_classes.astype(np.int64))
    #debug
    print('gt_unique: ', gt_unique)
    max_gt_cls = 100  # max number of classes
    # debug
    # need to remove class 0 from evaluation
    ignored_classes = [0]
    gt_unique_ig = np.array(
        [i for i in gt_unique if int(i) not in ignored_classes],
        dtype=np.int64)

    #added
    # get statistics of ground truth
    num_gt_class = dict()
    for i in gt_unique:
        num_gt_class[i] = gt_classes[gt_classes == i].shape[0]

    if set(pchips).issubset(set(gt_unique_ig)):
        raise ValueError(
            'The prediction files {%s} are not in the ground truth.' %
            str(set(pchips) - (set(gt_unique))))

    #print("Number of Predictions: %d" % num_preds)
    #print("Number of GT: %d" % np.sum(gt_classes.shape) )

    per_file_class_data = {}
    for i in gt_unique_ig:
        per_file_class_data[i] = [[], []]

    num_gt_per_cls = np.zeros((max_gt_cls))

    for file_ind in range(len(pchips)):
        print(pchips[file_ind])
        det_box = boxes_dict[pchips[file_ind]][:, :4]
        det_scores = boxes_dict[pchips[file_ind]][:, 5]
        det_cls = boxes_dict[pchips[file_ind]][:, 4]

        gt_box = gt_coords[(gt_chips == pchips[file_ind]).flatten()]
        gt_cls = gt_classes[(gt_chips == pchips[file_ind])]

        for i in gt_unique:
            s = det_scores[det_cls == i]
            ssort = np.argsort(s)[::-1]
            per_file_class_data[i][0] += s[ssort].tolist()

            gt_box_i_cls = gt_box[gt_cls == i].flatten().tolist()
            det_box_i_cls = det_box[det_cls == i]
            det_box_i_cls = det_box_i_cls[ssort].flatten().tolist()

            gt_rects = convert_to_rectangle_list(gt_box_i_cls)
            rects = convert_to_rectangle_list(det_box_i_cls)

            matching = Matching(gt_rects, rects)
            rects_matched, gt_matched = matching.greedy_match(iou_threshold)
            # debug
            print('len(gt_matched): ', len(gt_matched))
            print('len(rects_matched): ', len(rects_matched))
            #print('rects_matched: ', rects_matched)

            #we aggregate confidence scores, rectangles, and num_gt across classes
            #per_file_class_data[i][0] += det_scores[det_cls == i].tolist()
            per_file_class_data[i][1] += rects_matched
            num_gt_per_cls[i] += len(gt_matched)

    average_precision_per_class = np.ones(max_gt_cls) * float('nan')
    per_class_p = np.ones(max_gt_cls) * float('nan')
    per_class_r = np.ones(max_gt_cls) * float('nan')

    # debug
    # need to remove class 0 from evaluation
    ignored_classes = [0]
    gt_unique_ig = np.array(
        [i for i in gt_unique if int(i) not in ignored_classes],
        dtype=np.int64)

    for i in gt_unique_ig:
        scores = np.array(per_file_class_data[i][0])
        rects_matched = np.array(per_file_class_data[i][1])

        if num_gt_per_cls[i] != 0:
            sorted_indices = np.argsort(scores)[::-1]
            tp_sum = np.cumsum(rects_matched[sorted_indices])
            fp_sum = np.cumsum(np.logical_not(rects_matched[sorted_indices]))
            # calculated using confidence scores of the bboxes that have confidence score > 0.5 (or some other threshold)
            precision = tp_sum / (tp_sum + fp_sum + np.spacing(1))
            recall = tp_sum / num_gt_per_cls[i]
            # debug
            # per_class_precision: @IOU >= 0.5, # of correctly identified bboxes / all predicted boxes
            per_class_p[i] = np.sum(rects_matched) / len(rects_matched)
            per_class_r[i] = np.sum(rects_matched) / num_gt_per_cls[i]
            ap = ap_from_pr(precision, recall)

            # added
            print('for class: ', i)
            print('TP: ', tp_sum[-1])
            print('FP: ', fp_sum[-1])

        else:
            ap = float('nan')
        average_precision_per_class[i] = ap

    # debug
    #metric splits
    #metric_keys = ['map','map/small','map/medium','map/large',
    #'map/common','map/rare']

    metric_keys = ['map']
    '''
  splits = {
  'map/small': [17, 18, 19, 20, 21, 23, 24, 26, 27, 28, 32, 41, 60,
                   62, 63, 64, 65, 66, 91],
  'map/medium': [11, 12, 15, 25, 29, 33, 34, 35, 36, 37, 38, 42, 44,
                  47, 50, 53, 56, 59, 61, 71, 72, 73, 76, 84, 86, 93, 94],
  'map/large': [13, 40, 45, 49, 51, 52, 54, 55, 57, 74, 77, 79, 83, 89],

  'map/common': [13,17,18,19,20,21,23,24,25,26,27,28,34,35,41,
                  47,60,63,64,71,72,73,76,77,79,83,86,89,91],
  'map/rare': [11,12,15,29,32,33,36,37,38,40,42,44,45,49,50,
                  51,52,53,54,55,56,57,59,61,62,65,66,74,84,93,94]
  }
  '''
    vals = {}
    vals['map'] = np.nanmean(average_precision_per_class)
    vals['map_score'] = np.nanmean(per_class_p)
    vals['mar_score'] = np.nanmean(per_class_r)
    '''
  for i in splits.keys():
    vals[i] = np.nanmean(average_precision_per_class[splits[i]])
  '''

    for i in gt_unique:
        vals[int(i)] = average_precision_per_class[int(i)]

    vals['f1'] = 2 / ((1 / (np.spacing(1) + vals['map_score'])) +
                      (1 / (np.spacing(1) + vals['mar_score'])))

    #print("mAP: %f | mAP score: %f | mAR: %f | F1: %f" %
    print("mAP: %f | mean precision: %f | mean recall: %f | F1: %f" %
          (vals['map'], vals['map_score'], vals['mar_score'], vals['f1']))

    with open(path_output + '/score.txt', 'w') as f:
        f.write(str("%.8f" % vals['map']))

    with open(path_output + '/metrics.txt', 'w') as f:
        for key in vals.keys():
            f.write("%s %f\n" % (str(key), vals[key]))

    # added
    print('counting score threshold larger than %s as valid prediction' %
          str(threshold))
    for k, v in num_gt_class.items():
        print('ground truth class: ', k)
        print('the count of GT labels: ', v)

    print("Number of Predictions: %d" % num_preds)
    print("Number of GT: %d" % np.sum(gt_classes.shape))

    print("Final time: %s" % str(time.time() - ttime))