def main(): parser = argparse.ArgumentParser() parser.add_argument('--dataset', choices=('sbd', 'coco')) parser.add_argument('--model', choices=sorted(models.keys())) parser.add_argument('--pretrained-model') parser.add_argument('--batchsize', type=int) args = parser.parse_args() comm = chainermn.create_communicator('pure_nccl') device = comm.intra_rank dataset, label_names, eval_, model, batchsize = setup( args.dataset, args.model, args.pretrained_model, args.batchsize) chainer.cuda.get_device_from_id(device).use() model.to_gpu() if not comm.rank == 0: apply_to_iterator(model.predict, None, comm=comm) return iterator = iterators.MultithreadIterator( dataset, batchsize * comm.size, repeat=False, shuffle=False) in_values, out_values, rest_values = apply_to_iterator( model.predict, iterator, hook=ProgressHook(len(dataset)), comm=comm) # delete unused iterators explicitly del in_values eval_(out_values, rest_values)
def evaluate(self): target = self._targets['main'] if self.comm is not None and self.comm.rank != 0: apply_to_iterator(target.predict, None, comm=self.comm) return {} iterator = self._iterators['main'] if hasattr(iterator, 'reset'): iterator.reset() it = iterator else: it = copy.copy(iterator) in_values, out_values, rest_values = apply_to_iterator(target.predict, it, comm=self.comm) # delete unused iterators explicitly del in_values pred_masks, pred_labels, pred_scores = out_values if len(rest_values) == 2: gt_masks, gt_labels = rest_values gt_areas = None gt_crowdeds = None elif len(rest_values) == 4: gt_masks, gt_labels, gt_areas, gt_crowdeds =\ rest_values else: raise ValueError('the dataset should return ' 'sets of (img, mask, label) or sets of ' '(img, mask, label, area, crowded).') result = eval_instance_segmentation_coco(pred_masks, pred_labels, pred_scores, gt_masks, gt_labels, gt_areas, gt_crowdeds) report = {} for key in result.keys(): if key.startswith('map') or key.startswith('mar'): report[key] = result[key] if self.label_names is not None: for key in result.keys(): if key.startswith('ap') or key.startswith('ar'): for l, label_name in enumerate(self.label_names): report_key = '{}/{:s}'.format(key, label_name) try: report[report_key] = result[key][l] except IndexError: report[report_key] = np.nan observation = {} with reporter.report_scope(observation): reporter.report(report, target) return observation
def main(): parser = argparse.ArgumentParser() parser.add_argument('--dataset', choices=('cityscapes', 'ade20k', 'camvid', 'voc')) parser.add_argument('--model', choices=('pspnet_resnet101', 'segnet', 'deeplab_v3plus_xception65')) parser.add_argument('--pretrained-model') parser.add_argument('--input-size', type=int, default=None) args = parser.parse_args() comm = chainermn.create_communicator('pure_nccl') device = comm.intra_rank if args.input_size is None: input_size = None else: input_size = (args.input_size, args.input_size) dataset, label_names, model = get_dataset_and_model( args.dataset, args.model, args.pretrained_model, input_size) chainer.cuda.get_device_from_id(device).use() model.to_gpu() if not comm.rank == 0: apply_to_iterator(model.predict, None, comm=comm) return it = iterators.MultithreadIterator(dataset, comm.size, repeat=False, shuffle=False) in_values, out_values, rest_values = apply_to_iterator(model.predict, it, hook=ProgressHook( len(dataset)), comm=comm) # Delete an iterator of images to save memory usage. del in_values pred_labels, = out_values gt_labels, = rest_values result = eval_semantic_segmentation(pred_labels, gt_labels) for iu, label_name in zip(result['iou'], label_names): print('{:>23} : {:.4f}'.format(label_name, iu)) print('=' * 34) print('{:>23} : {:.4f}'.format('mean IoU', result['miou'])) print('{:>23} : {:.4f}'.format('Class average accuracy', result['mean_class_accuracy'])) print('{:>23} : {:.4f}'.format('Global average accuracy', result['pixel_accuracy']))
def test(net, test_data, metric, calc_weight_count=False, extended_log=False): tic = time.time() predictor = Predictor(model=net, transform=None) if calc_weight_count: weight_count = net.count_params() logging.info("Model: {} trainable parameters".format(weight_count)) _, out_values, rest_values = apply_to_iterator( func=predictor, iterator=test_data["iterator"], hook=ProgressHook(test_data["ds_len"])) assert (len(rest_values) == 1) assert (len(out_values) == 1) if False: labels = iter(rest_values[0]) preds = iter(out_values[0]) for label, pred in zip(labels, preds): metric.update(label, pred) else: import numpy as np metric.update(labels=np.array(list(rest_values[0])), preds=np.array(list(out_values[0]))) accuracy_msg = report_accuracy(metric=metric, extended_log=extended_log) logging.info("Test: {}".format(accuracy_msg)) logging.info("Time cost: {:.4f} sec".format(time.time() - tic))
def evaluate(self): iterator = self._iterators['main'] target = self._targets['main'] if hasattr(iterator, 'reset'): iterator.reset() it = iterator else: it = copy.copy(iterator) in_values, out_values, rest_values = apply_to_iterator( target.predict, it) # delete unused iterators explicitly del in_values points, labels, scores = out_values gt_points, gt_labels = rest_values result = eval_projected_3d_bbox_single( points, scores, gt_points, self.vertex, self.intrinsics, diam=self.diam) report = result observation = {} with reporter.report_scope(observation): reporter.report(report, target) return observation
def main(): parser = argparse.ArgumentParser() parser.add_argument( '--dataset', choices=('cityscapes', 'ade20k', 'camvid', 'voc')) parser.add_argument('--model', choices=sorted(models.keys())) parser.add_argument('--gpu', type=int, default=-1) parser.add_argument('--pretrained-model') parser.add_argument('--batchsize', type=int) parser.add_argument('--input-size', type=int, default=None) args = parser.parse_args() dataset, eval_, model, batchsize = setup( args.dataset, args.model, args.pretrained_model, args.batchsize, args.input_size) if args.gpu >= 0: chainer.cuda.get_device_from_id(args.gpu).use() model.to_gpu() iterator = iterators.SerialIterator( dataset, batchsize, repeat=False, shuffle=False) in_values, out_values, rest_values = apply_to_iterator( model.predict, iterator, hook=ProgressHook(len(dataset))) # Delete an iterator of images to save memory usage. del in_values eval_(out_values, rest_values)
def evaluate(self): iterator = self._iterators['main'] target = self._targets['main'] if hasattr(iterator, 'reset'): iterator.reset() it = iterator else: it = copy.copy(iterator) in_values, out_values, rest_values = apply_to_iterator( target.predict, it, n_input=2, ) # delete unused iterators explicitly del in_values pred_labels, = out_values gt_labels, = rest_values report = eval_sigmoid_segmentation(pred_labels, gt_labels, channel_names=self.channel_names) observation = {} with reporter.report_scope(observation): reporter.report(report, target) return observation
def evaluate(self): iterator = self._iterators['main'] target = self._targets['main'] if hasattr(iterator, 'reset'): iterator.reset() it = iterator else: it = copy.copy(iterator) in_values, out_values, rest_values = apply_to_iterator( target.predict, it) # delete unused iterators explicitly del in_values points, labels, scores = out_values gt_points, gt_labels = rest_values result = eval_projected_3d_bbox_single(points, scores, gt_points, self.vertex, self.intrinsics, diam=self.diam) report = result observation = {} with reporter.report_scope(observation): reporter.report(report, target) return observation
def main(): args = parse_args() cfg.merge_from_file(args.config) cfg.freeze model = setup_model(cfg) load_pretrained_model(cfg, args.config, model, args.pretrained_model) dataset = setup_dataset(cfg, 'eval') iterator = iterators.MultithreadIterator(dataset, args.batchsize, repeat=False, shuffle=False) model.use_preset('evaluate') if args.gpu >= 0: model.to_gpu(args.gpu) in_values, out_values, rest_values = apply_to_iterator(model.predict, iterator, hook=ProgressHook( len(dataset))) # delete unused iterators explicitly del in_values if cfg.dataset.eval == 'COCO': eval_coco(out_values, rest_values) elif cfg.dataset.eval == 'VOC': eval_voc(out_values, rest_values) else: raise ValueError()
def test(net, test_data, metric, use_gpus, calc_weight_count=False, extended_log=False): tic = time.time() predictor = test_data["predictor_class"](base_model=net) if use_gpus: predictor.to_gpu() if calc_weight_count: weight_count = net.count_params() logging.info("Model: {} trainable parameters".format(weight_count)) in_values, out_values, rest_values = apply_to_iterator( predictor.predict, test_data["iterator"], hook=ProgressHook(test_data["ds_len"])) del in_values pred_labels, = out_values gt_labels, = rest_values labels = iter(gt_labels) preds = iter(pred_labels) for label, pred in zip(labels, preds): metric.update(label, pred) accuracy_msg = report_accuracy(metric=metric, extended_log=extended_log) logging.info("Test: {}".format(accuracy_msg)) logging.info("Time cost: {:.4f} sec".format(time.time() - tic))
def evaluate(self): iterator = self._iterators['main'] target = self._targets['main'] if hasattr(iterator, 'reset'): iterator.reset() it = iterator else: it = copy.copy(iterator) in_values, out_values, rest_values = apply_to_iterator( self.predict_func, it) # delete unused iterators explicitly del in_values pred_labels, pred_scores = out_values gt_labels, = rest_values result = eval_multi_label_classification(pred_labels, pred_scores, gt_labels) report = {'map': result['map']} if self.label_names is not None: for l, label_name in enumerate(self.label_names): try: report['ap/{:s}'.format(label_name)] = result['ap'][l] except IndexError: report['ap/{:s}'.format(label_name)] = np.nan observation = {} with reporter.report_scope(observation): reporter.report(report, target) return observation
def evaluate(self): iterator = self._iterators['main'] target = self._targets['main'] if hasattr(iterator, 'reset'): iterator.reset() it = iterator else: it = copy.copy(iterator) in_values, out_values, rest_values = apply_to_iterator( self.predict_func, it) # delete unused iterators explicitly del in_values pred_labels, pred_scores = out_values gt_labels, = rest_values result = eval_multi_label_classification( pred_labels, pred_scores, gt_labels) report = {'map': result['map']} if self.label_names is not None: for l, label_name in enumerate(self.label_names): try: report['ap/{:s}'.format(label_name)] = result['ap'][l] except IndexError: report['ap/{:s}'.format(label_name)] = np.nan observation = {} with reporter.report_scope(observation): reporter.report(report, target) return observation
def evaluate(self): iterator = self._iterators['main'] target = self._targets['main'] if hasattr(iterator, 'reset'): iterator.reset() it = iterator else: it = copy.copy(iterator) in_values, out_values, rest_values = apply_to_iterator( target.predict, it) # delete unused iterators explicitly del in_values pred_imgs, = out_values gt_imgs, = rest_values mse_list = [] for pred_img, gt_img in zip(pred_imgs, gt_imgs): diff = (pred_img - gt_img).ravel() mse = diff.dot(diff) / diff.size mse_list.append(mse) report = { 'loss': np.mean(mse_list), } observation = {} with reporter.report_scope(observation): reporter.report(report, target) return observation
def eva_coco(dataset, func, limit = 1000, preset = 'evaluate'): total = limit if limit else len(dataset) orig_ids = dataset.ids.copy() dataset.ids = dataset.ids[:total] iterator = iterators.SerialIterator(dataset, 1, repeat=False, shuffle=False) in_values, out_values, rest_values = apply_to_iterator(func, iterator, hook=ProgressHook(len(dataset))) pred_bboxes, pred_labels, pred_scores = out_values gt_bboxes, gt_labels, gt_areas, gt_crowdeds = rest_values result = eval_detection_coco(pred_bboxes, pred_labels, pred_scores, gt_bboxes, gt_labels, gt_areas, gt_crowdeds) keys = [ 'map/iou=0.50:0.95/area=all/max_dets=100', 'map/iou=0.50/area=all/max_dets=100', 'map/iou=0.75/area=all/max_dets=100', 'map/iou=0.50:0.95/area=small/max_dets=100', 'map/iou=0.50:0.95/area=medium/max_dets=100', 'map/iou=0.50:0.95/area=large/max_dets=100', 'mar/iou=0.50:0.95/area=all/max_dets=1', 'mar/iou=0.50:0.95/area=all/max_dets=10', 'mar/iou=0.50:0.95/area=all/max_dets=100', 'mar/iou=0.50:0.95/area=small/max_dets=100', 'mar/iou=0.50:0.95/area=medium/max_dets=100', 'mar/iou=0.50:0.95/area=large/max_dets=100', ] print('') results = [] for key in keys: print('{:s}: {:f}'.format(key, result[key])) results.append(result[key]) dataset.ids = orig_ids return results
def test(net, val_iterator, val_dataset_len, num_gpus, calc_weight_count=False, extended_log=False): tic = time.time() predictor = CIFARPredictor(base_model=net) if num_gpus > 0: predictor.to_gpu() if calc_weight_count: weight_count = net.count_params() logging.info('Model: {} trainable parameters'.format(weight_count)) in_values, out_values, rest_values = apply_to_iterator( predictor.predict, val_iterator, hook=ProgressHook(val_dataset_len)) del in_values pred_probs, = out_values gt_labels, = rest_values y = np.array(list(pred_probs)) t = np.array(list(gt_labels)) acc_val_value = F.accuracy(y=y, t=t).data err_val = 1.0 - acc_val_value if extended_log: logging.info('Test: err={err:.4f} ({err})'.format(err=err_val)) else: logging.info('Test: err={err:.4f}'.format(err=err_val)) logging.info('Time cost: {:.4f} sec'.format(time.time() - tic))
def main(): parser = argparse.ArgumentParser() parser.add_argument('--dataset', choices=('voc', 'coco')) parser.add_argument('--model', choices=sorted(models.keys())) parser.add_argument('--pretrained-model') parser.add_argument('--batchsize', type=int) parser.add_argument('--gpu', type=int, default=-1) args = parser.parse_args() dataset, eval_, model, batchsize = setup(args.dataset, args.model, args.pretrained_model, args.batchsize) if args.gpu >= 0: chainer.cuda.get_device_from_id(args.gpu).use() model.to_gpu() model.use_preset('evaluate') iterator = iterators.MultithreadIterator(dataset, batchsize, repeat=False, shuffle=False) in_values, out_values, rest_values = apply_to_iterator(model.predict, iterator, hook=ProgressHook( len(dataset))) # delete unused iterators explicitly del in_values eval_(out_values, rest_values)
def _check_apply_to_iterator(self, comm=None): values = apply_to_iterator( self.func, self.iterator, n_input=self.n_input, hook=self.hook, comm=comm) if comm is not None and not comm.rank == 0: self.assertEqual(values, None) return in_values, out_values, rest_values = values self.assertEqual(len(in_values), self.n_input) for in_vals, in_vals_expect in \ zip_longest(in_values, self.in_values_expect): for in_val, in_val_expect in zip_longest(in_vals, in_vals_expect): np.testing.assert_equal(in_val, in_val_expect) self.assertEqual(len(out_values), self.n_output) for out_vals in out_values: self.assertEqual(len(list(out_vals)), len(self.dataset)) self.assertEqual(len(rest_values), self.n_rest) for rest_vals, rest_vals_expect in \ zip_longest(rest_values, self.rest_values_expect): for rest_val, rest_val_expect in \ zip_longest(rest_vals, rest_vals_expect): if isinstance(rest_val_expect, np.ndarray): np.testing.assert_equal(rest_val, rest_val_expect) else: self.assertEqual(rest_val, rest_val_expect)
def test_progress_hook_with_infinite_iterator(self): iterator = SerialIterator(self.dataset, 2) in_values, out_values, rest_values = apply_to_iterator( self.func, iterator, hook=ProgressHook()) for _ in range(10): next(in_values[0])
def evaluate(self): target = self._targets['main'] if self.comm is not None and self.comm.rank != 0: apply_to_iterator(target.predict, None, comm=self.comm) return {} iterator = self._iterators['main'] if hasattr(iterator, 'reset'): iterator.reset() it = iterator else: it = copy.copy(iterator) in_values, out_values, rest_values = apply_to_iterator(target.predict, it, comm=self.comm) # delete unused iterators explicitly del in_values pred_masks, pred_labels, pred_scores = out_values gt_masks, gt_labels = rest_values result = eval_instance_segmentation_voc( pred_masks, pred_labels, pred_scores, gt_masks, gt_labels, iou_thresh=self.iou_thresh, use_07_metric=self.use_07_metric) report = {'map': result['map']} if self.label_names is not None: for l, label_name in enumerate(self.label_names): try: report['ap/{:s}'.format(label_name)] = result['ap'][l] except IndexError: report['ap/{:s}'.format(label_name)] = np.nan observation = {} with reporter.report_scope(observation): reporter.report(report, target) return observation
def evaluate(self): target = self._targets['main'] if self.comm is not None and self.comm.rank != 0: apply_to_iterator(target.predict, None, comm=self.comm) return {} iterator = self._iterators['main'] if hasattr(iterator, 'reset'): iterator.reset() it = iterator else: it = copy.copy(iterator) in_values, out_values, rest_values = apply_to_iterator(target.predict, it, comm=self.comm) # delete unused iterators explicitly del in_values pred_labels, = out_values gt_labels, = rest_values result = eval_semantic_segmentation(pred_labels, gt_labels) report = { 'miou': result['miou'], 'pixel_accuracy': result['pixel_accuracy'], 'mean_class_accuracy': result['mean_class_accuracy'] } if self.label_names is not None: for l, label_name in enumerate(self.label_names): try: report['iou/{:s}'.format(label_name)] = result['iou'][l] report['class_accuracy/{:s}'.format(label_name)] =\ result['class_accuracy'][l] except IndexError: report['iou/{:s}'.format(label_name)] = np.nan report['class_accuracy/{:s}'.format(label_name)] = np.nan observation = {} with reporter.report_scope(observation): reporter.report(report, target) return observation
def main(): parser = argparse.ArgumentParser() parser.add_argument('--dataset', choices=('cityscapes', 'ade20k', 'camvid')) parser.add_argument('--model', choices=('pspnet_resnet101', 'segnet')) parser.add_argument('--pretrained-model') parser.add_argument('--input-size', type=int, default=None) args = parser.parse_args() comm = chainermn.create_communicator() device = comm.intra_rank dataset, label_names, model = get_dataset_and_model( args.dataset, args.model, args.pretrained_model, (args.input_size, args.input_size)) assert len(dataset) % comm.size == 0, \ "The size of the dataset should be a multiple "\ "of the number of GPUs" chainer.cuda.get_device_from_id(device).use() model.to_gpu() if comm.rank == 0: indices = np.arange(len(dataset)) else: indices = None indices = chainermn.scatter_dataset(indices, comm) dataset = dataset.slice[indices] it = iterators.SerialIterator(dataset, 1, repeat=False, shuffle=False) in_values, out_values, rest_values = apply_to_iterator(model.predict, it, hook=ProgressHook( len(dataset))) # Delete an iterator of images to save memory usage. del in_values pred_labels, = out_values gt_labels, = rest_values confusion = calc_semantic_segmentation_confusion(pred_labels, gt_labels) confusion = comm.allreduce(confusion) if comm.rank == 0: iou = calc_semantic_segmentation_iou(confusion) pixel_accuracy = np.diag(confusion).sum() / confusion.sum() class_accuracy = np.diag(confusion) / np.sum(confusion, axis=1) for iu, label_name in zip(iou, label_names): print('{:>23} : {:.4f}'.format(label_name, iu)) print('=' * 34) print('{:>23} : {:.4f}'.format('mean IoU', np.nanmean(iou))) print('{:>23} : {:.4f}'.format('Class average accuracy', np.nanmean(class_accuracy))) print('{:>23} : {:.4f}'.format('Global average accuracy', pixel_accuracy))
def test_progress_hook(self): iterator = SerialIterator(self.dataset, 2, repeat=False) in_values, out_values, rest_values = apply_to_iterator( self.func, iterator, hook=ProgressHook(n_total=len(self.dataset))) # consume all data for _ in in_values[0]: pass
def main(): parser = argparse.ArgumentParser() parser.add_argument('--model', choices=('fcis_psroi_align_resnet101', ), default='fcis_psroi_align_resnet101') parser.add_argument('--pretrained-model') parser.add_argument('--iou-thresh', type=float, default=0.5) parser.add_argument('--gpu', type=int, default=-1) args = parser.parse_args() if args.model == 'fcis_psroi_align_resnet101': if args.pretrained_model: model = FCISPSROIAlignResNet101( n_fg_class=len(sbd_instance_segmentation_label_names), pretrained_model=args.pretrained_model) else: model = FCISPSROIAlignResNet101(pretrained_model='sbd') model.use_preset('evaluate') if args.gpu >= 0: chainer.cuda.get_device_from_id(args.gpu).use() model.to_gpu() dataset = SBDInstanceSegmentationDataset(split='val') iterator = iterators.SerialIterator(dataset, 1, repeat=False, shuffle=False) in_values, out_values, rest_values = apply_to_iterator(model.predict, iterator, hook=ProgressHook( len(dataset))) # delete unused iterators explicitly del in_values pred_masks, pred_labels, pred_scores = out_values gt_masks, gt_labels = rest_values result = eval_instance_segmentation_voc(pred_masks, pred_labels, pred_scores, gt_masks, gt_labels, args.iou_thresh, use_07_metric=True) print('') print('mAP: {:f}'.format(result['map'])) for l, name in enumerate(sbd_instance_segmentation_label_names): if result['ap'][l]: print('{:s}: {:f}'.format(name, result['ap'][l])) else: print('{:s}: -'.format(name))
def test(net, val_iterator, val_dataset_len, num_gpus, input_image_size=224, resize_inv_factor=0.875, calc_weight_count=False, extended_log=False): assert (resize_inv_factor > 0.0) resize_value = int(math.ceil(float(input_image_size) / resize_inv_factor)) tic = time.time() predictor = ImagenetPredictor( base_model=net, scale_size=resize_value, crop_size=input_image_size) if num_gpus > 0: predictor.to_gpu() if calc_weight_count: weight_count = net.count_params() logging.info('Model: {} trainable parameters'.format(weight_count)) in_values, out_values, rest_values = apply_to_iterator( predictor.predict, val_iterator, hook=ProgressHook(val_dataset_len)) del in_values pred_probs, = out_values gt_labels, = rest_values y = np.array(list(pred_probs)) t = np.array(list(gt_labels)) top1_acc = F.accuracy( y=y, t=t).data top5_acc = top_k_accuracy( y=y, t=t, k=5).data err_top1_val = 1.0 - top1_acc err_top5_val = 1.0 - top5_acc if extended_log: logging.info('Test: err-top1={top1:.4f} ({top1})\terr-top5={top5:.4f} ({top5})'.format( top1=err_top1_val, top5=err_top5_val)) else: logging.info('Test: err-top1={top1:.4f}\terr-top5={top5:.4f}'.format( top1=err_top1_val, top5=err_top5_val)) logging.info('Time cost: {:.4f} sec'.format( time.time() - tic))
def main(): parser = argparse.ArgumentParser() parser.add_argument('--gpu', type=int, default=-1) parser.add_argument('--model', choices=('resnet50', 'resnet101')) parser.add_argument( '--mean', choices=('chainercv', 'detectron'), default='chainercv') parser.add_argument('--batchsize', type=int, default=1) group = parser.add_mutually_exclusive_group() group.add_argument('--pretrained-model') group.add_argument('--snapshot') args = parser.parse_args() if args.model == 'resnet50': model = FasterRCNNFPNResNet50(n_fg_class=len(coco_bbox_label_names), mean=args.mean) elif args.model == 'resnet101': model = FasterRCNNFPNResNet101(n_fg_class=len(coco_bbox_label_names), mean=args.mean) if args.pretrained_model: chainer.serializers.load_npz(args.pretrained_model, model) elif args.snapshot: chainer.serializers.load_npz( args.snapshot, model, path='updater/model:main/model/') if args.gpu >= 0: chainer.cuda.get_device_from_id(args.gpu).use() model.to_gpu() model.use_preset('evaluate') dataset = COCOBboxDataset( split='minival', use_crowded=True, return_area=True, return_crowded=True) iterator = iterators.MultithreadIterator( dataset, args.batchsize, repeat=False, shuffle=False) in_values, out_values, rest_values = apply_to_iterator( model.predict, iterator, hook=ProgressHook(len(dataset))) # delete unused iterators explicitly del in_values pred_bboxes, pred_labels, pred_scores = out_values gt_bboxes, gt_labels, gt_area, gt_crowded = rest_values result = eval_detection_coco( pred_bboxes, pred_labels, pred_scores, gt_bboxes, gt_labels, gt_area, gt_crowded) print() for area in ('all', 'large', 'medium', 'small'): print('mmAP ({}):'.format(area), result['map/iou=0.50:0.95/area={}/max_dets=100'.format(area)])
def main(): args = parse_args() cfg.merge_from_file(args.config) cfg.freeze comm = chainermn.create_communicator('pure_nccl') device = comm.intra_rank model = setup_model(cfg) load_pretrained_model(cfg, args.config, model, args.pretrained_model) dataset = setup_dataset(cfg, 'eval') model.use_preset('evaluate') chainer.cuda.get_device_from_id(device).use() model.to_gpu() if not comm.rank == 0: apply_to_iterator(model.predict, None, comm=comm) return iterator = iterators.MultithreadIterator(dataset, args.batchsize * comm.size, repeat=False, shuffle=False) in_values, out_values, rest_values = apply_to_iterator(model.predict, iterator, hook=ProgressHook( len(dataset)), comm=comm) # delete unused iterators explicitly del in_values if cfg.dataset.eval == 'COCO': eval_coco(out_values, rest_values) elif cfg.dataset.eval == 'VOC': eval_voc(out_values, rest_values) else: raise ValueError()
def main(): parser = argparse.ArgumentParser() parser.add_argument('--pretrained-model', default='coco') parser.add_argument('--gpu', type=int, default=-1) args = parser.parse_args() model = LightHeadRCNNResNet101( n_fg_class=len(coco_bbox_label_names), pretrained_model=args.pretrained_model) model.use_preset('evaluate') if args.gpu >= 0: chainer.cuda.get_device_from_id(args.gpu).use() model.to_gpu() dataset = COCOBboxDataset( split='minival', use_crowded=True, return_crowded=True, return_area=True) iterator = iterators.SerialIterator( dataset, 1, repeat=False, shuffle=False) in_values, out_values, rest_values = apply_to_iterator( model.predict, iterator, hook=ProgressHook(len(dataset))) # delete unused iterators explicitly del in_values pred_bboxes, pred_labels, pred_scores = out_values gt_bboxes, gt_labels, gt_areas, gt_crowdeds = rest_values result = eval_detection_coco( pred_bboxes, pred_labels, pred_scores, gt_bboxes, gt_labels, gt_areas, gt_crowdeds) keys = [ 'map/iou=0.50:0.95/area=all/max_dets=100', 'map/iou=0.50/area=all/max_dets=100', 'map/iou=0.75/area=all/max_dets=100', 'map/iou=0.50:0.95/area=small/max_dets=100', 'map/iou=0.50:0.95/area=medium/max_dets=100', 'map/iou=0.50:0.95/area=large/max_dets=100', 'mar/iou=0.50:0.95/area=all/max_dets=1', 'mar/iou=0.50:0.95/area=all/max_dets=10', 'mar/iou=0.50:0.95/area=all/max_dets=100', 'mar/iou=0.50:0.95/area=small/max_dets=100', 'mar/iou=0.50:0.95/area=medium/max_dets=100', 'mar/iou=0.50:0.95/area=large/max_dets=100', ] print('') for key in keys: print('{:s}: {:f}'.format(key, result[key]))
def main(): parser = argparse.ArgumentParser() parser.add_argument('--dataset', choices=('cityscapes', 'ade20k', 'camvid', 'voc')) parser.add_argument('--model', choices=sorted(models.keys())) parser.add_argument('--pretrained-model') parser.add_argument('--batchsize', type=int) parser.add_argument('--input-size', type=int, default=None) args = parser.parse_args() comm = chainermn.create_communicator('pure_nccl') device = comm.intra_rank dataset, eval_, model, batchsize = setup(args.dataset, args.model, args.pretrained_model, args.batchsize, args.input_size) chainer.cuda.get_device_from_id(device).use() model.to_gpu() if not comm.rank == 0: apply_to_iterator(model.predict, None, comm=comm) return it = iterators.MultithreadIterator(dataset, batchsize * comm.size, repeat=False, shuffle=False) in_values, out_values, rest_values = apply_to_iterator(model.predict, it, hook=ProgressHook( len(dataset)), comm=comm) # Delete an iterator of images to save memory usage. del in_values eval_(out_values, rest_values)
def main(): parser = argparse.ArgumentParser( description='Learning convnet from ILSVRC2012 dataset') parser.add_argument('val', help='Path to root of the validation dataset') parser.add_argument( '--model', choices=('vgg16', 'resnet50', 'resnet101', 'resnet152')) parser.add_argument('--pretrained_model', default='imagenet') parser.add_argument('--gpu', type=int, default=-1) parser.add_argument('--batchsize', type=int, default=32) parser.add_argument('--crop', choices=('center', '10'), default='center') parser.add_argument('--resnet_mode', default='he') args = parser.parse_args() dataset = DirectoryParsingLabelDataset(args.val) label_names = directory_parsing_label_names(args.val) n_class = len(label_names) iterator = iterators.MultiprocessIterator( dataset, args.batchsize, repeat=False, shuffle=False, n_processes=6, shared_mem=300000000) if args.model == 'vgg16': extractor = VGG16(n_class, args.pretrained_model) elif args.model == 'resnet50': extractor = ResNet50( n_class, args.pretrained_model, mode=args.resnet_mode) elif args.model == 'resnet101': extractor = ResNet101( n_class, args.pretrained_model, mode=args.resnet_mode) elif args.model == 'resnet152': extractor = ResNet152( n_class, args.pretrained_model, mode=args.resnet_mode) model = FeaturePredictor( extractor, crop_size=224, scale_size=256, crop=args.crop) if args.gpu >= 0: chainer.cuda.get_device(args.gpu).use() model.to_gpu() print('Model has been prepared. Evaluation starts.') in_values, out_values, rest_values = apply_to_iterator( model.predict, iterator, hook=ProgressHook(len(dataset))) del in_values pred_probs, = out_values gt_labels, = rest_values accuracy = F.accuracy( np.array(list(pred_probs)), np.array(list(gt_labels))).data print() print('Top 1 Error {}'.format(1. - accuracy))
def test(net, test_data, metric, calc_weight_count=False, extended_log=False): """ Main test routine. Parameters: ---------- net : Chain Model. test_data : dict Data loader. metric : EvalMetric Metric object instance. calc_weight_count : bool, default False Whether to calculate count of weights. extended_log : bool, default False Whether to log more precise accuracy values. """ tic = time.time() predictor = Predictor(model=net, transform=None) if calc_weight_count: weight_count = net.count_params() logging.info("Model: {} trainable parameters".format(weight_count)) in_values, out_values, rest_values = apply_to_iterator( func=predictor, iterator=test_data["iterator"], hook=ProgressHook(test_data["ds_len"])) assert (len(rest_values) == 1) assert (len(out_values) == 1) assert (len(in_values) == 1) if True: labels = iter(rest_values[0]) preds = iter(out_values[0]) inputs = iter(in_values[0]) for label, pred, inputi in zip(labels, preds, inputs): metric.update(label, pred) del label del pred del inputi else: import numpy as np metric.update(labels=np.array(list(rest_values[0])), preds=np.array(list(out_values[0]))) accuracy_msg = report_accuracy(metric=metric, extended_log=extended_log) logging.info("Test: {}".format(accuracy_msg)) logging.info("Time cost: {:.4f} sec".format(time.time() - tic))
def evaluate(self): iterator = self._iterators['main'] target = self._targets['main'] if hasattr(iterator, 'reset'): iterator.reset() it = iterator else: it = copy.copy(iterator) if self._show_progress: it = tqdm.tqdm(it, total=len(it.dataset)) in_values, out_values, rest_values = apply_to_iterator( target.predict, it) del in_values pred_bboxes, pred_masks, pred_labels, pred_scores = out_values if len(rest_values) == 5: gt_bboxes, gt_labels, gt_masks, gt_crowdeds, gt_areas = rest_values elif len(rest_values) == 3: gt_bboxes, gt_labels, gt_masks = rest_values gt_crowdeds = None gt_areas = None # evaluate result = utils.eval_instseg_coco( pred_masks, pred_labels, pred_scores, gt_masks, gt_labels, gt_crowdeds, gt_areas) report = { 'map': result['map/iou=0.50:0.95/area=all/maxDets=100'], '[email protected]': result['map/iou=0.50/area=all/maxDets=100'], '[email protected]': result['map/iou=0.75/area=all/maxDets=100'], } if self.label_names is not None: for l, label_name in enumerate(self.label_names): try: report['ap/{:s}'.format(label_name)] = \ result['ap/iou=0.50:0.95/area=all/maxDets=100'][l] except IndexError: report['ap/{:s}'.format(label_name)] = np.nan observation = dict() with reporter.report_scope(observation): reporter.report(report, target) return observation
def evaluate(self): iterator = self._iterators['main'] target = self._targets['main'] if hasattr(iterator, 'reset'): iterator.reset() it = iterator else: it = copy.copy(iterator) in_values, out_values, rest_values = apply_to_iterator( target.predict, it) # delete unused iterators explicitly del in_values pred_labels, pred_labels_occ = out_values gt_labels, gt_labels_occ = rest_values result_vis = eval_semantic_segmentation(pred_labels, gt_labels) result_occ = eval_occlusion_segmentation(pred_labels_occ, gt_labels_occ) report = { 'miou': (result_vis['miou'] + result_occ['miou']) / 2., 'miou/vis': result_vis['miou'], 'miou/occ': result_occ['miou'], } # if self.label_names is not None: # for l, label_name in enumerate(self.label_names): # try: # report['iou/{:s}'.format(label_name)] = result['iou'][l] # except IndexError: # report['iou/{:s}'.format(label_name)] = np.nan # # if l == 0: # continue # # try: # report['iou_occ/{:s}'.format(label_name)] = \ # result_occ['iou'][l - 1] # except IndexError: # result_occ['iou_occ/{:s}'.format(label_name)] = np.nan observation = {} with reporter.report_scope(observation): reporter.report(report, target) return observation
def test_apply_to_iterator_with_infinite_iterator(self): def func(*in_values): n_sample = len(in_values[0]) return [np.random.uniform(size=(48, 64)) for _ in range(n_sample)] dataset = [] for _ in range(5): H, W = np.random.randint(8, 16, size=2) dataset.append(np.random.randint(0, 256, size=(3, H, W))) iterator = SerialIterator(dataset, 2) in_values, out_values, rest_values = apply_to_iterator(func, iterator) for _ in range(10): next(in_values[0]) for _ in range(10): next(out_values[0])
def evaluate(self): iterator = self._iterators['main'] target = self._targets['main'] if hasattr(iterator, 'reset'): iterator.reset() it = iterator else: it = copy.copy(iterator) in_values, out_values, rest_values = apply_to_iterator( target.predict, it) # delete unused iterators explicitly del in_values pred_bboxes, pred_labels, pred_scores = out_values if len(rest_values) == 3: gt_bboxes, gt_labels, gt_difficults = rest_values elif len(rest_values) == 2: gt_bboxes, gt_labels = rest_values gt_difficults = None result = eval_detection_voc( pred_bboxes, pred_labels, pred_scores, gt_bboxes, gt_labels, gt_difficults, use_07_metric=self.use_07_metric) report = {'map': result['map']} if self.label_names is not None: for l, label_name in enumerate(self.label_names): try: report['ap/{:s}'.format(label_name)] = result['ap'][l] except IndexError: report['ap/{:s}'.format(label_name)] = np.nan observation = {} with reporter.report_scope(observation): reporter.report(report, target) return observation
def evaluate(self): iterator = self._iterators['main'] target = self._targets['main'] if hasattr(iterator, 'reset'): iterator.reset() it = iterator else: it = copy.copy(iterator) in_values, out_values, rest_values = apply_to_iterator( target.predict, it) # delete unused iterators explicitly del in_values pred_labels, = out_values gt_labels, = rest_values result = eval_semantic_segmentation(pred_labels, gt_labels) report = {'miou': result['miou'], 'pixel_accuracy': result['pixel_accuracy'], 'mean_class_accuracy': result['mean_class_accuracy']} if self.label_names is not None: for l, label_name in enumerate(self.label_names): try: report['iou/{:s}'.format(label_name)] = result['iou'][l] report['class_accuracy/{:s}'.format(label_name)] =\ result['class_accuracy'][l] except IndexError: report['iou/{:s}'.format(label_name)] = np.nan report['class_accuracy/{:s}'.format(label_name)] = np.nan observation = {} with reporter.report_scope(observation): reporter.report(report, target) return observation
def main(): parser = argparse.ArgumentParser() parser.add_argument('--gpu', type=int, default=-1) parser.add_argument('--pretrained-model') args = parser.parse_args() model = ResNet50( pretrained_model=args.pretrained_model, n_class=len(voc_bbox_label_names), arch='he') model.pick = 'fc6' if args.gpu >= 0: chainer.cuda.get_device_from_id(args.gpu).use() model.to_gpu() dataset = VOCBboxDataset( split='test', year='2007', use_difficult=False) dataset = TransformDataset( dataset, ('img', 'bbox'), bbox_to_multi_label) iterator = iterators.SerialIterator( dataset, 8, repeat=False, shuffle=False) in_values, out_values, rest_values = apply_to_iterator( PredictFunc(model, thresh=0), iterator, hook=ProgressHook(len(dataset))) # delete unused iterators explicitly del in_values pred_labels, pred_scores = out_values gt_labels, = rest_values result = eval_multi_label_classification( pred_labels, pred_scores, gt_labels) print() print('mAP: {:f}'.format(result['map'])) for l, name in enumerate(voc_bbox_label_names): if result['ap'][l]: print('{:s}: {:f}'.format(name, result['ap'][l])) else: print('{:s}: -'.format(name))
parser.add_argument('--gpu', type=int, default=-1) parser.add_argument('--pretrained-model', type=str) parser.add_argument('object') args = parser.parse_args() model = SSPYOLOv2() chainer.serializers.load_npz(args.pretrained_model, model) if args.gpu >= 0: chainer.cuda.get_device_from_id(args.gpu).use() model.to_gpu() test = LinemodDataset('.', obj_name=args.object, split='test') it = chainer.iterators.SerialIterator( test, batch_size=1, repeat=False, shuffle=False) in_values, out_values, rest_values = apply_to_iterator( model.predict, it, hook=ProgressHook(len(test))) del in_values points, labels, scores = out_values gt_points, gt_labels = rest_values intrinsics = get_linemod_intrinsics() mesh = MeshPly('LINEMOD/{}/{}.ply'.format(args.object, args.object)) vertex = np.c_[ np.array(mesh.vertices), np.ones((len(mesh.vertices), 1))] result = eval_projected_3d_bbox_single( points, scores, gt_points, vertex, intrinsics, diam=linemod_object_diameters[args.object]) print('Acc using 5 px 2D projection = {0:.2f}'.format(result['proj_acc'])) print('Acc using 10% threshold: 3D transformation = {0:.2f}'.format(
def test_apply_to_iterator(self): if self.multi_in_values: n_input = 2 else: n_input = 1 in_values_expect = [] for _ in range(n_input): in_value = [] for _ in range(5): H, W = np.random.randint(8, 16, size=2) in_value.append(np.random.randint(0, 256, size=(3, H, W))) in_values_expect.append(in_value) in_values_expect = tuple(in_values_expect) if self.multi_out_values: def func(*in_values): n_sample = len(in_values[0]) return ( [np.random.uniform(size=(10, 4)) for _ in range(n_sample)], [np.random.uniform(size=10) for _ in range(n_sample)], [np.random.uniform(size=10) for _ in range(n_sample)]) n_output = 3 else: def func(*in_values): n_sample = len(in_values[0]) return [np.random.uniform(size=(48, 64)) for _ in range(n_sample)] n_output = 1 if self.with_rest_values: strs = ['a', 'bc', 'def', 'ghij', 'klmno'] nums = [0, 1, 2, 3, 4] arrays = [np.random.uniform(size=10) for _ in range(5)] rest_values_expect = (strs, nums, arrays) n_rest = 3 dataset = chainer.datasets.TupleDataset( *(in_values_expect + rest_values_expect)) else: rest_values_expect = () n_rest = 0 dataset = list(zip(*in_values_expect)) iterator = SerialIterator(dataset, 2, repeat=False, shuffle=False) if self.with_hook: def hook(in_values, out_values, rest_values): n_sample = len(in_values[0]) self.assertEqual(len(in_values), n_input) for in_vals in in_values: self.assertEqual(len(in_vals), n_sample) self.assertEqual(len(out_values), n_output) for out_vals in out_values: self.assertEqual(len(out_vals), n_sample) self.assertEqual(len(rest_values), n_rest) for rest_vals in rest_values: self.assertEqual(len(rest_vals), n_sample) else: hook = None in_values, out_values, rest_values = apply_to_iterator( func, iterator, n_input=n_input, hook=hook) self.assertEqual(len(in_values), n_input) for in_vals, in_vals_expect in \ zip_longest(in_values, in_values_expect): for in_val, in_val_expect in zip_longest(in_vals, in_vals_expect): np.testing.assert_equal(in_val, in_val_expect) self.assertEqual(len(out_values), n_output) for out_vals in out_values: self.assertEqual(len(list(out_vals)), len(dataset)) self.assertEqual(len(rest_values), n_rest) for rest_vals, rest_vals_expect in \ zip_longest(rest_values, rest_values_expect): for rest_val, rest_val_expect in \ zip_longest(rest_vals, rest_vals_expect): if isinstance(rest_val_expect, np.ndarray): np.testing.assert_equal(rest_val, rest_val_expect) else: self.assertEqual(rest_val, rest_val_expect)
def main(): parser = argparse.ArgumentParser() parser.add_argument( '--model', choices=('faster_rcnn', 'ssd300', 'ssd512'), default='ssd300') parser.add_argument('--pretrained_model') parser.add_argument('--gpu', type=int, default=-1) parser.add_argument('--batchsize', type=int, default=32) args = parser.parse_args() if args.model == 'faster_rcnn': if args.pretrained_model: model = FasterRCNNVGG16( n_fg_class=20, pretrained_model=args.pretrained_model) else: model = FasterRCNNVGG16(pretrained_model='voc07') elif args.model == 'ssd300': if args.pretrained_model: model = SSD300( n_fg_class=20, pretrained_model=args.pretrained_model) else: model = SSD300(pretrained_model='voc0712') elif args.model == 'ssd512': if args.pretrained_model: model = SSD512( n_fg_class=20, pretrained_model=args.pretrained_model) else: model = SSD512(pretrained_model='voc0712') if args.gpu >= 0: chainer.cuda.get_device_from_id(args.gpu).use() model.to_gpu() model.use_preset('evaluate') dataset = VOCBboxDataset( year='2007', split='test', use_difficult=True, return_difficult=True) iterator = iterators.SerialIterator( dataset, args.batchsize, repeat=False, shuffle=False) in_values, out_values, rest_values = apply_to_iterator( model.predict, iterator, hook=ProgressHook(len(dataset))) # delete unused iterators explicitly del in_values pred_bboxes, pred_labels, pred_scores = out_values gt_bboxes, gt_labels, gt_difficults = rest_values result = eval_detection_voc( pred_bboxes, pred_labels, pred_scores, gt_bboxes, gt_labels, gt_difficults, use_07_metric=True) print() print('mAP: {:f}'.format(result['map'])) for l, name in enumerate(voc_bbox_label_names): if result['ap'][l]: print('{:s}: {:f}'.format(name, result['ap'][l])) else: print('{:s}: -'.format(name))