def __init__(self, gts, results, num_coors=4): """ Args: gts (str or COCO): DOTA-COCO兼容格式, img_info中必须含crop字段 results (str or dict): 子图检测结果,保存在文件中, results: { image_id: dets, # image_id必须是anns中有效的id image_id: dets, ... } dets: { 类别:[[位置坐标,得分], [...], ...], # np.array 类别: [[位置坐标,得分], [...], ...], ... }, 位置坐标支持左上角右下角两个点表示和四个点的表示方式 num_coors (int): 4表示左上角右下角box, 8表示四个点的box """ assert num_coors in (4, 8), "不支持的检测位置表示" self.coco = gts self.results = results if cvtools.is_str(gts): self.coco = cvtools.COCO(gts) if cvtools.is_str(results): self.results = cvtools.load_pkl(results) self.num_coors = num_coors self.img_to_results = {} # merge返回结果保存
def __init__(self, anns_file, num_coors=4): assert num_coors in (4, 8), "不支持的检测位置表示" self.coco = anns_file if cvtools.is_str(anns_file): self.coco = cvtools.COCO(anns_file) self.results = defaultdict() # 动态创建嵌套字典 self.num_coors = num_coors
def obj_from_dict(info, parent=None, default_args=None): """Initialize an object from dict. The dict must contain the key "type", which indicates the object type, it can be either a string or type, such as "list" or ``list``. Remaining fields are treated as the arguments for constructing the object. Args: info (dict): Object types and arguments. parent (:class:`module`): Module which may containing expected object classes. default_args (dict, optional): Default arguments for initializing the object. Returns: any type: Object built from the dict. """ assert isinstance(info, dict) and 'type' in info assert isinstance(default_args, dict) or default_args is None args = info.copy() obj_type = args.pop('type') if cvtools.is_str(obj_type): if parent is not None: obj_type = getattr(parent, obj_type) else: obj_type = sys.modules[obj_type] elif not isinstance(obj_type, type): raise TypeError('type must be a str or valid type, but got {}'.format( type(obj_type))) if default_args is not None: for name, value in default_args.items(): args.setdefault(name, value) return obj_type(**args)
def get_classes(dataset): """Get class names of a dataset.""" alias2name = {} for name, aliases in dataset_aliases.items(): for alias in aliases: alias2name[alias] = name if cvtools.is_str(dataset): if dataset in alias2name: labels = eval(alias2name[dataset] + '_classes()') else: raise ValueError('Unrecognized dataset: {}'.format(dataset)) else: raise TypeError('dataset must a str, but got {}'.format(type(dataset))) return labels
def print_map_summary(mean_ap, results, dataset=None): """Print mAP and results of each class. Args: mean_ap(float): calculated from `eval_map` results(list): calculated from `eval_map` dataset(None or str or list): dataset name or dataset classes. """ num_scales = len(results[0]['ap']) if isinstance(results[0]['ap'], np.ndarray) else 1 num_classes = len(results) recalls = np.zeros((num_scales, num_classes), dtype=np.float32) precisions = np.zeros((num_scales, num_classes), dtype=np.float32) aps = np.zeros((num_scales, num_classes), dtype=np.float32) num_gts = np.zeros((num_scales, num_classes), dtype=int) for i, cls_result in enumerate(results): if cls_result['recall'].size > 0: recalls[:, i] = np.array(cls_result['recall'], ndmin=2)[:, -1] precisions[:, i] = np.array(cls_result['precision'], ndmin=2)[:, -1] aps[:, i] = cls_result['ap'] num_gts[:, i] = cls_result['num_gts'] if dataset is None: label_names = [str(i) for i in range(1, num_classes + 1)] elif cvtools.is_str(dataset): label_names = get_classes(dataset) else: label_names = dataset if not isinstance(mean_ap, list): mean_ap = [mean_ap] header = ['class', 'gts', 'dets', 'recall', 'precision', 'ap'] for i in range(num_scales): table_data = [header] for j in range(num_classes): row_data = [ label_names[j], num_gts[i, j], results[j]['num_dets'], '{:.3f}'.format(recalls[i, j]), '{:.3f}'.format(precisions[i, j]), '{:.3f}'.format(aps[i, j]) ] table_data.append(row_data) table_data.append(['mAP', '', '', '', '', '{:.3f}'.format(mean_ap[i])]) table = AsciiTable(table_data) table.inner_footing_row_border = True print(table.table)
def __init__(self, model, batch_processor, optimizer=None, work_dir=None, log_level=logging.INFO, logger=None): assert callable(batch_processor) self.model = model if optimizer is not None: self.optimizer = self.init_optimizer(optimizer) else: self.optimizer = None self.batch_processor = batch_processor # create work_dir if cvtools.is_str(work_dir): self.work_dir = osp.abspath(work_dir) cvtools.mkdir_or_exist(self.work_dir) elif work_dir is None: self.work_dir = None else: raise TypeError('"work_dir" must be a str or None') # get model name from the model class if hasattr(self.model, 'module'): self._model_name = self.model.module.__class__.__name__ else: self._model_name = self.model.__class__.__name__ self._rank, self._world_size = get_dist_info() self.timestamp = get_time_str() if logger is None: self.logger = self.init_logger(work_dir, log_level) else: self.logger = logger self.log_buffer = LogBuffer() self.val_buffer = dict() self.mode = None self._hooks = [] self._epoch = 0 self._iter = 0 self._inner_iter = 0 self._max_epochs = 0 self._max_iters = 0
def __init__(self, ann_file, crop_ann_file, results=None, num_coors=4): assert num_coors in (4, 8), "不支持的检测位置表示" self.num_coors = num_coors self.coco = cvtools.COCO(ann_file) self.anns = self.coco.anns self.calc_ious = (bbox_overlaps if self.num_coors == 4 else poly_overlaps) if self.num_coors == 4: self.nms = cvtools.py_cpu_nms else: self.nms = poly_nms.poly_gpu_nms self.results = results if cvtools.is_str(crop_ann_file): if results is None: gt = cvtools.COCO2Dets(crop_ann_file) self.results = gt.convert() else: self.results = crop_ann_file dets = MergeCropDetResults(crop_ann_file, self.results, self.num_coors) self.merge_dets = dets.merge(self.nms)
def fast_eval_recall(results, coco, max_dets, iou_thrs=np.arange(0.5, 0.96, 0.05)): if cvtools.is_str(results): assert results.endswith('.pkl') results = cvtools.load(results) elif not isinstance(results, list): raise TypeError( 'results must be a list of numpy arrays or a filename, not {}'. format(type(results))) gt_bboxes = [] img_ids = coco.getImgIds() for i in range(len(img_ids)): ann_ids = coco.getAnnIds(imgIds=img_ids[i]) ann_info = coco.loadAnns(ann_ids) if len(ann_info) == 0: gt_bboxes.append(np.zeros((0, 4))) continue bboxes = [] for ann in ann_info: if ann.get('ignore', False) or ann['iscrowd']: continue x1, y1, w, h = ann['bbox'] bboxes.append([x1, y1, x1 + w - 1, y1 + h - 1]) bboxes = np.array(bboxes, dtype=np.float32) if bboxes.shape[0] == 0: bboxes = np.zeros((0, 4)) gt_bboxes.append(bboxes) recalls = eval_recalls(gt_bboxes, results, max_dets, iou_thrs, print_summary=False) ar = recalls.mean(axis=1) return ar