Example #1
0
 def validation_epoch_end(self, outputs: List[Tuple[List[MetadataDict], List[MetadataDict]]]) -> None:
     predictions, targets = self._restructure_validation_outputs(outputs)
     mAP, ap_per_class = mean_average_precision_for_boxes(
         targets,
         predictions,
         iou_threshold=self.conf.evaluation.iou_threshold,
         verbose=False
     )
     # TODO: Add DDP support
     self.log('mAP', mAP, prog_bar=True)
     self.log_ap_per_class(ap_per_class)
def zfturbo_compute_mAP(normalized_gt_df,
                        normalized_pred_df,
                        id_to_label,
                        gt_class_id="class_id",
                        pred_class_id="label",
                        verbose=False):
    """
    :gt: is ground truth dataframe. The co-ordinates are normalized based on image height and
    width using normalize_bb()
    :pred: is a prediction dataframe. The co-ordinates are normalized based on image height and
    width using normalize_bb()
    :id_to_label: maps the ids in gt and pred to string labels
    :return: tuple, where first value is mAP and second values is dict with AP for each class.
    """
    # create copies
    normalized_gt_df_copy = normalized_gt_df.copy(deep=True)
    normalized_pred_df_copy = normalized_pred_df.copy(deep=True)

    # replace numeric ids to string labels
    # https://stackoverflow.com/questions/22100130/pandas-replace-multiple-values-one-column
    normalized_gt_df_copy[gt_class_id] = normalized_gt_df_copy[
        gt_class_id].astype(int).map(id_to_label)
    normalized_pred_df_copy[pred_class_id] = normalized_pred_df_copy[
        pred_class_id].astype(int).map(id_to_label)

    # convert dataframe to numpy array format as required by package
    normalized_gt_df_np = normalized_gt_df_copy[[
        "image_id", gt_class_id, "x_min", "x_max", "y_min", "y_max"
    ]].values

    normalized_pred_df_np = normalized_pred_df_copy[[
        "image_id", pred_class_id, "confidence_score", "x_min", "x_max",
        "y_min", "y_max"
    ]].values

    # compute mAP
    mean_ap, average_precisions = mean_average_precision_for_boxes(
        normalized_gt_df_np,
        normalized_pred_df_np,
        iou_threshold=0.4,
        exclude_not_in_annotations=True,
        verbose=verbose)

    return mean_ap, sorted(average_precisions.items(),
                           key=operator.itemgetter(1),
                           reverse=True)
Example #3
0
def validate_v1(annotations, predictions, verbose=True):
    thr_value = 0.4
    num_classes = 15
    if type(annotations) is str:
        ann = pd.read_csv(annotations)
    else:
        ann = annotations
    if type(predictions) is str:
        pr = pd.read_csv(predictions)
    else:
        pr = predictions

    # Fix different IDs
    unique_preds = pr['image_id'].unique()
    if verbose:
        print('Prediction Ids: {}'.format(len(unique_preds)))
    unique_ann = ann['id'].unique()
    if verbose:
        print('Annotation Ids: {}'.format(len(unique_ann)))
    ann = ann[ann['id'].isin(unique_preds)]
    unique_ann = ann['id'].unique()
    if verbose:
        print('Reduced annotation Ids: {}'.format(len(unique_ann)))

    ann = prepare_annotations(ann)
    pr = prepare_predictions(pr)
    mean_ap, average_precisions = mean_average_precision_for_boxes(
        ann, pr, iou_threshold=thr_value, verbose=verbose)
    map = np.zeros(num_classes, dtype=np.float32)
    for i in range(num_classes):
        try:
            if verbose:
                print('Class: {:2d} Entries: {:5d} AP: {:.6f}'.format(
                    i, int(average_precisions[str(i)][1]),
                    average_precisions[str(i)][0]))
            map[i] = average_precisions[str(i)][0]
        except Exception as e:
            if verbose:
                print('No class found: {}'.format(i))
            map[i] = 0
    map_no_last_class = map[:-1].mean()
    if verbose:
        print('mAP value: {:.6f}'.format(mean_ap))
        print('mAP value no last class: {:.6f}'.format(map_no_last_class))
    return map_no_last_class
Example #4
0
def get_mean_average_precision(
        annotation_path,
        predictions_path,
        iou_threshold=0.4,
        meta_path='/home/semyon/data/VinBigData/train_meta.csv',
        verbose=False):
    '''
    param: annotation_path: path to .csv with columns ['image_id', 'class_name', 'x_min', 'x_max', 'y_min', 'y_max']
    param: predictions_path: path to .csv with columns ['image_id', 'class_name', 'rad_id', 'x_min', 'x_max', 'y_min', 'y_max'], where 'rad_id' contains confidence
    '''
    if isinstance(annotation_path, pd.DataFrame) and isinstance(
            predictions_path, pd.DataFrame):
        ann_df = annotation_path.copy()
        pred_df = predictions_path.copy()
    else:
        ann_df = pd.read_csv(annotation_path)
        pred_df = pd.read_csv(predictions_path)

    meta_df = pd.read_csv(meta_path).set_index('image_id')

    # inplace norm coordinates
    norm_coordinates(ann_df, meta_df)
    norm_coordinates(pred_df, meta_df)

    # annotations
    new_cols = ['ImageID', 'LabelName', 'XMin', 'XMax', 'YMin', 'YMax']
    old_cols = ['image_id', 'class_name', 'x_min', 'x_max', 'y_min', 'y_max']
    for new_col_name, old_col_name in zip(new_cols, old_cols):
        ann_df[new_col_name] = ann_df[old_col_name]
    # predictions
    new_cols = ['ImageID', 'LabelName', 'Conf', 'XMin', 'XMax', 'YMin', 'YMax']
    old_cols = [
        'image_id', 'class_name', 'rad_id', 'x_min', 'x_max', 'y_min', 'y_max'
    ]
    for new_col_name, old_col_name in zip(new_cols, old_cols):
        pred_df[new_col_name] = pred_df[old_col_name]

    ann = ann_df[['ImageID', 'LabelName', 'XMin', 'XMax', 'YMin',
                  'YMax']].values
    pred = pred_df[[
        'ImageID', 'LabelName', 'Conf', 'XMin', 'XMax', 'YMin', 'YMax'
    ]].values
    mean_ap, average_precisions = mean_average_precision_for_boxes(
        ann, pred, iou_threshold=iou_threshold, verbose=verbose)
    return mean_ap, average_precisions
def evaluate(*pred_df, outdir=evaldir, filtered=False, ensemble=False):
    ann = pd.read_csv(outdir / 'annotations_df.csv')
    if filtered:
        det = pd.read_csv(outdir / 'pred_df_filtered.csv')
    elif ensemble:
        preds = []
        for i, pred in enumerate(pred_df):
            preds.append(pred)
        ensemble_models(preds[0], preds[1], path=evaldir, evaluation=True)
        det = pd.read_csv(outdir / 'pred_df_ensembled.csv')
    else:
        det = pd.read_csv(outdir / 'pred_df.csv')

    ann = ann[['image_id', 'class_name', 'x_min', 'x_max', 'y_min',
               'y_max']].values
    det = det[[
        'image_id', 'class_name', 'score', 'x_min', 'x_max', 'y_min', 'y_max'
    ]].values
    mean_ap, average_precisions = mean_average_precision_for_boxes(
        ann, det, iou_threshold=0.4)
Example #6
0
File: train.py Project: shifop/yolo
                1. 加上左上点坐标还原到原图的坐标
                2. 再加一次极大值抑制
                3. 连接相邻的boxes
                """

                # 保存为csv格式
                # ImageID,LabelName,XMin,XMax,YMin,YMax
                with open('./model/%s/dev_%d_label.csv'%(save_f, global_steps),'w',encoding='utf-8') as f:
                    f.write('ImageID,LabelName,XMin,YMin,XMax,YMax\n')
                    for key in raw_label:
                        line = raw_label[key]
                        x,y = n2s[key][:2]
                        for _ in line:
                            f.write('%s,%s,%f,%f,%f,%f\n'%(key, i2n[int(_[-1])], _[0]/y, _[1]/x, _[2]/y, _[3]/x))


                # ImageID,LabelName,Conf,XMin,XMax,YMin,YMax
                with open('./model/%s/dev_%d.csv'%(save_f, global_steps),'w',encoding='utf-8') as f:
                    f.write('ImageID,LabelName,Conf,XMin,YMin,XMax,YMax\n')
                    for key in result:
                        line = result[key]
                        x,y = n2s[key][:2]
                        for _ in line:
                            f.write('%s,%s,%f,%f,%f,%f,%f\n'%(key, i2n[int(_[-1])],_[-2], _[0]/y, _[1]/x, _[2]/y, _[3]/x))

                ann = pd.read_csv('./model/%s/dev_%d_label.csv'%(save_f, global_steps))
                det = pd.read_csv('./model/%s/dev_%d.csv'%(save_f, global_steps))
                ann = ann[['ImageID', 'LabelName', 'XMin', 'XMax', 'YMin', 'YMax']].values
                det = det[['ImageID', 'LabelName', 'Conf', 'XMin', 'XMax', 'YMin', 'YMax']].values
                mean_ap, average_precisions = mean_average_precision_for_boxes(ann, det, iou_threshold=0.5)
                print('')
            save_prefix += '_' + str(params[el])
        save_prefix += '.csv'

        save_path1 = "{}/{}".format(params['out_folder'], save_prefix)
        if os.path.isfile(save_path1):
            print('File already exists: {}. Skip'.format(save_path1))
            continue

        ensemble_preds.to_csv(save_path1, index=False)
        ensemble_preds = ensemble_preds[[
            'img_id', 'label', 'score', 'x1', 'x2', 'y1', 'y2'
        ]].values
        ann_path = 'E:/Projects_M2/2019_06_Google_Open_Images/input/COCO2017/annotations/instances_val2017.csv'
        ann = pd.read_csv(ann_path, dtype={'img_id': np.str, 'label': np.str})
        ann_numpy = ann[['img_id', 'label', 'x1', 'x2', 'y1', 'y2']].values
        mean_ap, average_precisions = mean_average_precision_for_boxes(
            ann_numpy, ensemble_preds, iou_threshold=0.5, verbose=False)
        coco_preds, detections = convert_csv_predictions_to_coco(
            "{}/{}".format(params['out_folder'], save_prefix))
        print("Ensemble [{}] Weights: {} Params: {} mAP: {:.6f}".format(
            len(weights), weights, params, mean_ap))
        print("Coco preds: {}".format(coco_preds))

        out = open("{}/{}.txt".format(params['out_folder'], save_prefix[:-4]),
                   'w')
        out.write('{}\n'.format(pred_list))
        out.write('{}\n'.format(weights))
        out.write('{}\n'.format(params))
        out.write('{}\n'.format(mean_ap))
        out.write('{}\n'.format(coco_preds))
        out.close()
    def annotation_validation(self, dataloader: TrainingAbnormalDataSet, _model: BaseModel) -> dict:
        from src.modeling.models.retinaNetFPN.retinaNetFPN import RetinaNetFPN

        model = RetinaNetFPN()
        model.load_state_dict(_model.state_dict())
        model.to(config.validation_device)
        model.eval()

        self.log.info("Beginning Validation")

        dataloader.display_metrics(dataloader.get_metrics())

        data = iter(DataLoader(dataloader, batch_size=config.batch_size, num_workers=4, collate_fn=self.collater))
        total = (len(dataloader) // config.batch_size) + 1

        # idx 0 == correct, idx 1 == incorrect
        stats = {
            'healthy': [0, 0],
            'abnormal': [0, 0]
        }

        labels = ['healthy', 'abnormal']

        det = []
        ann = []

        image_id = 0
        image_id = 0

        for _, i in tqdm(enumerate(range(total)), total=len(range(total)), desc="Validating the model"):
            batch = next(data)

            for ky, val in batch.items():
                # If we can, try to load up the batched data into the device (try to only send what is needed)
                if isinstance(batch[ky], torch.Tensor):
                    batch[ky] = batch[ky].to(config.validation_device)

            predictions = model(batch)

            for idx, pred in enumerate(predictions):
                annotation = batch['annotations'][idx]

                for p_idx in range(len(pred['boxes'])):
                    det.append([f'{image_id}', pred['labels'][p_idx].item(), pred['scores'][p_idx].item(), pred['boxes'][p_idx][0].item() / 256.0, pred['boxes'][p_idx][1].item() / 256.0, pred['boxes'][p_idx][2].item() / 256.0, pred['boxes'][p_idx][3].item() / 256.0])

                for a_idx in range(len(batch['annotations'][idx]['boxes'])):
                    ann.append([f'{image_id}', torch.argmax(annotation['labels'][a_idx], 0).item(), annotation['boxes'][a_idx][0].item() / 256.0, annotation['boxes'][a_idx][1].item() / 256.0, annotation['boxes'][a_idx][2].item() / 256.0, annotation['boxes'][a_idx][3].item() / 256.0])

                image_id += 1

        for idx in range(len(ann)):
            ann[idx][1] = 'healthy' if ann[idx][1] == 0 else 'abnormal'
        for idx in range(len(det)):
            det[idx][1] = 'healthy' if det[idx][1] == 0 else 'abnormal'

        mean_ap, average_precisions = mean_average_precision_for_boxes(ann, det)


        # table = []
        # for stat in stats:
        #     table.append([stat, stats[stat][0], stats[stat][1]])
        #
        # self.log.info(f'\n-- Validation Report --\n{tabulate(table, headers=["Type", "Correct", "Incorrect"])}')

        return stats
Example #9
0
# coding:utf-8

from map_boxes import mean_average_precision_for_boxes

ann = ann[['ImageID', 'LabelName', 'XMin', 'XMax', 'YMin', 'YMax']].values
det = det[['ImageID', 'LabelName', 'Conf', 'XMin', 'XMax', 'YMin',
           'YMax']].values
mean_ap, average_precisions = mean_average_precision_for_boxes(ann, det)
print(mean_ap, average_precisions)
Example #10
0
def validate(cfg, Meta_data, split):
    notation_file = utils.load_obj(split)
    
    anns = []
    dets = []
    anns_medium = []
    dets_medium = []
    anns_small = []
    dets_small = []
    

    
    for i, image in enumerate(notation_file):
        print('Image {}/{} is under processing...'.format(i, len(notation_file)))
        image_height = image['height']
        image_width = image['width']

        # processing annotations
        ground_truth = np.zeros(len(CATEGORIES))
        
        objs = image["annotations"]

        #print('processing annotations...')
        for obj in objs:
            class_id = obj['category_id']
            ann_image_id = image['file_name']
            ann_label_name = CATEGORIES[class_id]
            ann_bbox = obj['bbox'] # x0, y0, x1, y1
            ann_bbox[0] /= image_width
            ann_bbox[1] /= image_height
            ann_bbox[2] /= image_width
            ann_bbox[3] /= image_height    

            ann = [ann_image_id, ann_label_name, ann_bbox[0], ann_bbox[2], ann_bbox[1], ann_bbox[3]]
            #print(ann)
            anns.append(ann)

            if (ann_bbox[2] - ann_bbox[0]) >= 0.02:
                anns_medium.append(ann)
            else:
                anns_small.append(ann)
        
        # processing predictions
        file_name = image['file_name']
        img = cv2.imread(file_name)

        t0 = time.clock()
        outputs = predictor(img)
        t1 = time.clock() - t0
        print('Exeution time is {}'.format(t1))
        
        scores = outputs["instances"].scores.to('cpu').numpy()
        pred_classes = outputs["instances"].pred_classes.to('cpu').numpy()
        bbox = outputs["instances"].pred_boxes.tensor.to('cpu').numpy()

        N = scores.shape[0]

        #print("processing predictions...")
        for i in range(N):
            det_image_id = image['file_name']
            class_id = pred_classes[i]
            det_label_name = CATEGORIES[class_id]
            det_bbox = bbox[i] # x0, y0, x1, y1
            det_bbox[0] /= image_width
            det_bbox[1] /= image_height
            det_bbox[2] /= image_width
            det_bbox[3] /= image_height   

            det = [det_image_id, det_label_name, scores[i], det_bbox[0], det_bbox[2], det_bbox[1], det_bbox[3]]
            #print(det)
            dets.append(det) 

            if (det_bbox[2] - det_bbox[0]) >= 0.02:
                dets_medium.append(det)
            else:
                dets_small.append(det)


    # ann = ann[['ImageID', 'LabelName', 'XMin', 'XMax', 'YMin', 'YMax']].values
    # det = det[['ImageID', 'LabelName', 'Conf', 'XMin', 'XMax', 'YMin', 'YMax']].values
    print("overall_metrics:")
    mean_ap, average_precisions = mean_average_precision_for_boxes(anns, dets)
    print("small_object_metrics:")
    mean_average_precision_for_boxes(anns_small, dets_small, obj_cate='small')
    print("medium_object_metrics:")
    mean_average_precision_for_boxes(anns_medium, dets_medium, obj_cate='medium')
Example #11
0
"""
Author: Roman Solovyev, IPPM RAS
URL: https://github.com/ZFTurbo
"""

from map_boxes import mean_average_precision_for_boxes
import pandas as pd

if __name__ == '__main__':
    # Version 1
    annotations_file = 'example/annotations.csv'
    detections_file = 'example/detections.csv'
    mean_ap, average_precisions = mean_average_precision_for_boxes(annotations_file, detections_file)

    # Version 2
    ann = pd.read_csv('example/annotations.csv')
    det = pd.read_csv('example/detections.csv')
    ann = ann[['ImageID', 'LabelName', 'XMin', 'XMax', 'YMin', 'YMax']].values
    det = det[['ImageID', 'LabelName', 'Conf', 'XMin', 'XMax', 'YMin', 'YMax']].values
    mean_ap, average_precisions = mean_average_precision_for_boxes(ann, det)