Пример #1
0
def eval_revop(p):
    # revisited evaluation
    gnd = cfg['gnd']

    # evaluate ranks
    ks = [1, 5, 10]

    # search for easy & hard
    gnd_t = []
    for i in range(len(gnd)):
        g = {}
        g['ok'] = np.concatenate([gnd[i]['easy'], gnd[i]['hard']])
        g['junk'] = np.concatenate([gnd[i]['junk']])
        gnd_t.append(g)
    mapM, _, _, _ = compute_map(p, gnd_t, ks)


    # search for hard
    gnd_t = []
    for i in range(len(gnd)):
        g = {}
        g['ok'] = np.concatenate([gnd[i]['hard']])
        g['junk'] = np.concatenate([gnd[i]['junk'], gnd[i]['easy']])
        gnd_t.append(g)
    mapH, _, _, _ = compute_map(p, gnd_t, ks)
    return np.around(mapH*100, decimals=2),np.around(mapM*100, decimals=2)
Пример #2
0
def eval_revop(p, silent=False):
    # revisited evaluation
    gnd = cfg['gnd']

    # evaluate ranks
    ks = [1, 5, 10]

    # search for easy
    gnd_t = []
    for i in range(len(gnd)):
        g = {}
        g['ok'] = np.concatenate([gnd[i]['easy']])
        g['junk'] = np.concatenate([gnd[i]['junk'], gnd[i]['hard']])
        gnd_t.append(g)
    mapE, apsE, mprE, prsE = compute_map(p, gnd_t, ks)

    # search for easy & hard
    gnd_t = []
    for i in range(len(gnd)):
        g = {}
        g['ok'] = np.concatenate([gnd[i]['easy'], gnd[i]['hard']])
        g['junk'] = np.concatenate([gnd[i]['junk']])
        gnd_t.append(g)
    mapM, apsM, mprM, prsM = compute_map(p, gnd_t, ks)

    # search for hard
    gnd_t = []
    for i in range(len(gnd)):
        g = {}
        g['ok'] = np.concatenate([gnd[i]['hard']])
        g['junk'] = np.concatenate([gnd[i]['junk'], gnd[i]['easy']])
        gnd_t.append(g)
    mapH, apsH, mprH, prsH = compute_map(p, gnd_t, ks)
    if not silent:
        print('>> {}: mAP E: {}, M: {}, H: {}'.format(
            test_dataset, np.around(mapE * 100, decimals=2),
            np.around(mapM * 100, decimals=2), np.around(mapH * 100,
                                                         decimals=2)))
        print('>> {}: mP@k{} E: {}, M: {}, H: {}'.format(
            test_dataset, np.array(ks), np.around(mprE * 100, decimals=2),
            np.around(mprM * 100, decimals=2), np.around(mprH * 100,
                                                         decimals=2)))
    return np.around(mapH * 100, decimals=2)
Пример #3
0
def simgle_img_map(model, output, labels):
    actual_num_labels = 0
    all_detections = [[[
        np.array([]) for _ in range(model.params['num_classes'])
    ]]]

    # n our model if no results it outputs int 0
    if output is not 0:
        # Get predicted boxes, confidence scores and labels
        pred_boxes = output[:, 1:6].cpu().numpy()
        scores = output[:, 5].cpu().numpy()
        pred_labels = output[:, 7].cpu().numpy()
        # Order by confidence
        sort_i = np.argsort(scores)
        pred_labels = pred_labels[sort_i]
        pred_boxes = pred_boxes[sort_i]
        for c in range(model.params['num_classes']):
            all_detections[0][-1][c] = pred_boxes[pred_labels == c]
    all_annotations = []
    for label_ in labels:
        all_annotations.append(
            [np.array([]) for _ in range(model.params['num_classes'])])

        if any(label_[:, -1] > 0):
            annotation_labels = label_[label_[:, -1] > 0, 0]
            _annotation_boxes = label_[label_[:, -1] > 0, 1:]
            num_labels = len(np.unique(annotation_labels))
            # Reformat to x1, y1, x2, y2 and rescale to image dim
            annotation_boxes = np.empty_like(_annotation_boxes)
            annotation_boxes[:, 0] = _annotation_boxes[:, 0] -\
                _annotation_boxes[:, 2] / 2
            annotation_boxes[:, 1] = _annotation_boxes[:, 1] -\
                _annotation_boxes[:, 3] / 2
            annotation_boxes[:, 2] = _annotation_boxes[:, 0] +\
                _annotation_boxes[:, 2] / 2
            annotation_boxes[:, 3] = _annotation_boxes[:, 1] +\
                _annotation_boxes[:, 3] / 2
            annotation_boxes *= model.params['height']

            for label in range(model.params['num_classes']):
                all_annotations[-1][label] =\
                    annotation_boxes[annotation_labels == label, :]
    # if train it results consists mAP, average_precisions map_frame
    # else: mAP, average_precisions
    actual_num_labels = np.max([actual_num_labels, num_labels])
    print(f"actual_num_labels : {actual_num_labels}")
    mAP, ap = compute_map(all_detections,
                          all_annotations,
                          conf_index=0,
                          map_frame=None,
                          train=True,
                          actual_num_labels=actual_num_labels,
                          params=model.params)
    return mAP, ap
Пример #4
0
ranks = np.argsort(-sim, axis=0)

# revisited evaluation
gnd = cfg['gnd']

# evaluate ranks
ks = [1, 5, 10]

# search for easy
gnd_t = []
for i in range(len(gnd)):
    g = {}
    g['ok'] = np.concatenate([gnd[i]['easy']])
    g['junk'] = np.concatenate([gnd[i]['junk'], gnd[i]['hard']])
    gnd_t.append(g)
mapE, apsE, mprE, prsE = compute_map(ranks, gnd_t, ks)

# search for easy & hard
gnd_t = []
for i in range(len(gnd)):
    g = {}
    g['ok'] = np.concatenate([gnd[i]['easy'], gnd[i]['hard']])
    g['junk'] = np.concatenate([gnd[i]['junk']])
    gnd_t.append(g)
mapM, apsM, mprM, prsM = compute_map(ranks, gnd_t, ks)

# search for hard
gnd_t = []
for i in range(len(gnd)):
    g = {}
    g['ok'] = np.concatenate([gnd[i]['hard']])