Exemplo n.º 1
0
    def get_img_props(self, detection_proposals, annotations):
        img_name = detection_proposals[0]
        det_bb_pairs = detection_proposals[1]

        proposal_list = []

        # Iterate through each pair in the detected objects
        for pair in det_bb_pairs:
            object_name = pair.obj.object_name.replace(' ','_')
            gt_hois = annotations.hoi_list 
            confirmed_hoi_classes = []

            #Iterate through the the list of groundtruths
            for gt in gt_hois:
                if gt.obj == object_name:
                    # Iterate through the connection list in the ground truth annotation
                    for connection in gt.connections:
                        o_idx = connection[1]-1
                        h_idx = connection[0]-1

                        iou_h = tools.compute_iou(pair.human.bbox, gt.human_boxes[h_idx])
                        iou_o = tools.compute_iou(pair.obj.bbox, gt.object_boxes[o_idx])
                        min_iou = min(iou_o, iou_h)
                        if min_iou >= 0.5:
                            #print('Confirmed ' + str(gt.hoi_id.astype(np.int32)))
                            confirmed_hoi_classes.append(gt.hoi_id.astype(np.int32))
            
            # If no groundthruth for the pair is found, the pair must be a no interaction class:
            if confirmed_hoi_classes:

                # Append the human & object bboxes and the list of confirmed hoi_classes
                proposal_list.append([pair.human.bbox, pair.obj.bbox, confirmed_hoi_classes])

        return proposal_list
Exemplo n.º 2
0
def mark_true_false_positives(pred_boxes, gt_boxes, threshold_iou):
    nimages = len(pred_boxes)
    for i in range(nimages):
        gt_used = []
        pred_boxes[i].sort(key=operator.attrgetter('confidence'), reverse=True)
        for idx_pred in range(len(pred_boxes[i])):
            pred_boxes[i][idx_pred].set_tp(False)
            iou_vec = np.zeros(len(gt_boxes[i]))
            class_ids = np.zeros(len(gt_boxes[i]), dtype=np.int32)
            for idx_lab in range(len(gt_boxes[i])):
                iou_vec[idx_lab] = tools.compute_iou(pred_boxes[i][idx_pred].get_coords(), gt_boxes[i][idx_lab].get_coords())
                class_ids[idx_lab] = gt_boxes[i][idx_lab].classid
            ord_idx = np.argsort(-1 * iou_vec)
            iou_vec = iou_vec[ord_idx]
            class_ids = class_ids[ord_idx]
            for j in range(len(iou_vec)):
                if iou_vec[j] >= threshold_iou:
                    if pred_boxes[i][idx_pred].classid == class_ids[j]:
                        if ord_idx[j] not in gt_used:
                            pred_boxes[i][idx_pred].set_tp(True)
                            gt_used.append(ord_idx[j])
                            break
                else:
                    break
    return pred_boxes
Exemplo n.º 3
0
def assign_gt_to_anchors(classes, bboxes, path_to_data):
    """
    Assigns ground truths to anchors, computes and prints the labels for each anchor
    """
    for image_idx in range(0, len(classes)):
        ious = []
        mask = np.zeros([p.NR_ANCHORS_PER_IMAGE])
        for obj in bboxes[image_idx]:
            ious_obj = t.compute_iou(np.transpose(p.ANCHORS),
                                     np.transpose(obj))
            ious.append(ious_obj)
        obj_idx_for_anchor = np.argmax(ious, axis=0)
        anchor_idx_for_obj = np.argmax(ious, axis=1)
        mask[anchor_idx_for_obj] = 1
        im_coords = bboxes[image_idx]
        coords = [im_coords[i] for i in obj_idx_for_anchor[:]]
        deltas = compute_deltas(coords)
        im_label = classes[image_idx]
        pre_label = np.array([im_label[i] for i in obj_idx_for_anchor[:]])
        label = np.zeros((p.NR_ANCHORS_PER_IMAGE, p.NR_CLASSES))
        label[np.arange(p.NR_ANCHORS_PER_IMAGE), pre_label] = 1
        create_files(image_idx, path_to_data + 'mask/', mask)
        create_files(image_idx, path_to_data + 'delta/', deltas)
        create_files(image_idx, path_to_data + 'coord/', coords)
        create_files(image_idx, path_to_data + 'class/', label)
Exemplo n.º 4
0
def non_maximum_suppression(boxes, threshold_nms):
    # boxes: List with all the predicted bounding boxes in the image.
    nboxes = len(boxes)
    boxes.sort(key=operator.attrgetter('confidence'))
    for i in range(nboxes):
        for j in range(i + 1, nboxes):
            if np.abs(boxes[i].classid - boxes[j].classid) < 0.5:
                if tools.compute_iou(boxes[i].get_coords(), boxes[j].get_coords()) > threshold_nms:
                    assert boxes[i].confidence <= boxes[j].confidence, 'Suppressing boxes in reverse order in NMS'
                    boxes[i].confidence = -np.inf
                    break
    remaining_boxes = [x for x in boxes if x.confidence != -np.inf]
    return remaining_boxes
Exemplo n.º 5
0
def compute_mAP(predictions, labels, classnames, opts):
    # predictions (nimages) List with the predicted bounding boxes of each image.
    # labels (nimages) List with the ground truth boxes of each image.
    logging.debug('Computing mean average precision...')
    initime = time.time()

    nclasses = len(classnames)
    nimages = len(predictions)

    predictions_matches = []
    for cl in range(nclasses):
        predictions_matches.append([])

    nobj_allclasses = np.zeros(shape=(nclasses), dtype=np.int32)

    # Compute correspondences between predictions and ground truth for every image.
    # logging.debug('Computing correspondences...')
    ini = time.time()
    for i in range(nimages):
        # print(str(i) + '/' + str(nimages))
        predboxes_img = predictions[i]
        gtlist_img = labels[i]
        for cl in range(nclasses):
            gtlist_img_class = [box for box in gtlist_img if box.classid == cl]
            predboxes_img_class = [
                box for box in predboxes_img if box.classid == cl
            ]
            nobj_allclasses[cl] += len(gtlist_img_class)
            gt_used = []
            predboxes_img_class.sort(key=operator.attrgetter('confidence'),
                                     reverse=True)
            for k in range(len(predboxes_img_class)):
                matches_gt = False
                iou_list = []
                iou_idx = []
                # Compute iou with all gt boxes
                for l in range(len(gtlist_img_class)):
                    if l not in gt_used:
                        iou = tools.compute_iou(
                            predboxes_img_class[k].get_coords(),
                            gtlist_img_class[l].get_coords())
                        if iou >= opts.threshold_iou:
                            iou_list.append(iou)
                            iou_idx.append(l)
                if len(iou_list) > 0:
                    iou_array = np.array(iou_list)
                    # Order iou in descending order:
                    ord_idx = np.argsort(-1 * iou_array)
                    iou_idx = np.array(iou_idx)[ord_idx]
                    # Assign ground truth box:
                    for l in range(len(iou_idx)):
                        if iou_idx[l] not in gt_used:
                            gt_used.append(iou_idx[l])
                            matches_gt = True
                            break
                if matches_gt:
                    predictions_matches[cl].append(
                        PredictionMatch(predboxes_img_class[k].confidence,
                                        True))
                else:
                    predictions_matches[cl].append(
                        PredictionMatch(predboxes_img_class[k].confidence,
                                        False))
    lapse = time.time() - ini
    # logging.debug('Correspondences ready (done in ' + str(lapse) + ' s).')

    # Compute precision and recall curves for every class:
    precision_allclasses = []
    precision_rec_allclasses = []
    recall_allclasses = []
    AP_allclasses = []
    for cl in range(nclasses):
        # Compute the precision-recall curve:
        thresholds, recall, precision = precision_recall_curve(
            predictions_matches[cl], nobj_allclasses[cl])

        if len(recall) > 0:

            # Rectify precision:
            precision_rect = rectify_precision(precision)

            # Interpolate precision-recall curve:
            recall_interp, precision_interp = interpolate_pr_curve(
                precision_rect, recall, opts.mean_ap_opts)

            # Average precision:
            AP = 1 / len(recall_interp) * np.sum(precision_interp)

            # Plot curve:
            plot_pr_curve(recall, precision_rect, recall_interp,
                          precision_interp, thresholds, classnames[cl], AP,
                          opts.outdir, opts.mean_ap_opts)

            precision_allclasses.append(precision)
            precision_rec_allclasses.append(precision_rect)
            recall_allclasses.append(recall)

        else:
            AP = 0

        AP_allclasses.append(AP)
        # logging.info('class ' + classnames[cl] + '  - ' + 'AP: ' + str(AP))

    # Mean average precision:
    mAP = 0
    for i in range(nclasses):
        mAP += AP_allclasses[i]
    mAP = mAP / nclasses
    logging.info('Mean average precision: ' + str(mAP))

    fintime = time.time()
    logging.debug('mAP computed in %.2f s' % (fintime - initime))

    return mAP