def test_calculate_metric(iter_nums): if args.net == 'vnet': net = VNet(n_channels=1, num_classes=args.num_classes, normalization='batchnorm', has_dropout=False) elif args.net == 'unet': net = UNet3D(in_channels=1, num_classes=args.num_classes) elif args.net == 'segtran': get_default(args, 'num_modes', default_settings, -1, [args.net, 'num_modes', args.in_fpn_layers]) if args.segtran_type == '25d': set_segtran25d_config(args) net = Segtran25d(config25d) else: set_segtran3d_config(args) net = Segtran3d(config3d) net.cuda() net.eval() preproc_fn = None if not args.checkpoint_dir: if args.vis_mode is not None: visualize_model(net, args.vis_mode) return if args.eval_robustness: eval_robustness(net, testloader, args.aug_degree) return for iter_num in iter_nums: if args.checkpoint_dir: checkpoint_path = os.path.join(args.checkpoint_dir, 'iter_' + str(iter_num) + '.pth') load_model(net, args, checkpoint_path) if args.vis_mode is not None: visualize_model(net, args.vis_mode) continue if args.eval_robustness: eval_robustness(net, testloader, args.aug_degree) continue save_result = not args.test_interp if save_result: test_save_paths = [] test_save_dirs = [] test_save_dir = "%s-%s-%s-%d" % (args.net, args.job_name, timestamp, iter_num) test_save_path = "../prediction/%s" % (test_save_dir) if not os.path.exists(test_save_path): os.makedirs(test_save_path) test_save_dirs.append(test_save_dir) test_save_paths.append(test_save_path) else: test_save_paths = [None] test_save_dirs = [None] # No need to use dataloader to pass data, # as one 3D image is split into many patches to do segmentation. allcls_avg_metric = test_all_cases( net, db_test, task_name=args.task_name, net_type=args.net, num_classes=args.num_classes, batch_size=args.batch_size, orig_patch_size=args.orig_patch_size, input_patch_size=args.input_patch_size, stride_xy=args.orig_patch_size[0] // 2, stride_z=args.orig_patch_size[2] // 2, save_result=save_result, test_save_path=test_save_paths[0], preproc_fn=preproc_fn, test_interp=args.test_interp, has_mask=has_mask) print("%d scores:" % iter_num) for cls in range(1, args.num_classes): dice, jc, hd, asd = allcls_avg_metric[cls - 1] print('%d: dice: %.3f, jc: %.3f, hd: %.3f, asd: %.3f' % (cls, dice, jc, hd, asd)) if save_result: FNULL = open(os.devnull, 'w') # Currently only save hard predictions. for pred_type, test_save_dir, test_save_path in zip( ('hard', ), test_save_dirs, test_save_paths): do_tar = subprocess.run( ["tar", "cvf", "%s.tar" % test_save_dir, test_save_dir], cwd="../prediction", stdout=FNULL, stderr=subprocess.STDOUT) # print(do_tar) print("{} tarball:\n{}.tar".format( pred_type, os.path.abspath(test_save_path))) return allcls_avg_metric
'xyz_permute': None, 'orig_input_size': None, # each dim of the orig_patch_size should always be multiply of 8. 'orig_patch_size': 112, 'input_scale': (1, 1, 1), 'D_pool_K': 2, 'has_mask': { 'train': True }, 'weight': { 'train': 1 } }, } get_default(args, 'orig_input_size', default_settings, None, [args.task_name, 'orig_input_size']) get_default(args, 'orig_patch_size', default_settings, None, [args.task_name, 'orig_patch_size']) get_default(args, 'test_ds_name', default_settings, None, [args.task_name, 'test_ds_name']) get_default(args, 'input_scale', default_settings, None, [args.task_name, 'input_scale']) get_default(args, 'D_pool_K', default_settings, -1, [args.task_name, 'D_pool_K']) get_default(args, 'xyz_permute', default_settings, None, [args.task_name, 'xyz_permute']) get_default(args, 'chosen_modality', default_settings, -1, [args.task_name, 'chosen_modality']) get_default(args, 'num_classes', default_settings, -1, [args.task_name, 'num_classes']) args.binarize = (args.num_classes == 2)
'xyz_permute': None, 'orig_input_size': None, # each dim of the orig_patch_size should always be multiply of 8. 'orig_patch_size': 112, 'input_scale': (1, 1, 1), 'D_pool_K': 2, 'has_mask': { 'train': True }, 'weight': { 'train': 1 } }, } get_default(args, 'orig_input_size', default_settings, None, [args.task_name, 'orig_input_size']) get_default(args, 'orig_patch_size', default_settings, None, [args.task_name, 'orig_patch_size']) get_default(args, 'input_scale', default_settings, None, [args.task_name, 'input_scale']) get_default(args, 'D_pool_K', default_settings, -1, [args.task_name, 'D_pool_K']) get_default(args, 'xyz_permute', default_settings, None, [args.task_name, 'xyz_permute']) get_default(args, 'chosen_modality', default_settings, -1, [args.task_name, 'chosen_modality']) get_default(args, 'num_classes', default_settings, None, [args.task_name, 'num_classes']) args.binarize = (args.num_classes == 2) if type(args.orig_patch_size) == str:
def test_calculate_metric(iter_nums): if args.net == 'unet': # timm-efficientnet performs slightly worse. if not args.vis_mode: backbone_type = re.sub("^eff", "efficientnet", args.backbone_type) net = smp.Unet(backbone_type, classes=args.num_classes, encoder_weights='imagenet') else: net = VanillaUNet(n_channels=3, n_classes=args.num_classes) elif args.net == 'unet-scratch': net = UNet(num_classes=args.num_classes) elif args.net == 'nestedunet': net = NestedUNet(num_classes=args.num_classes) elif args.net == 'unet3plus': net = UNet_3Plus(n_classes=args.num_classes) elif args.net == 'pranet': net = PraNet(num_classes=args.num_classes - 1) elif args.net.startswith('deeplab'): use_smp_deeplab = args.net.endswith('smp') if use_smp_deeplab: backbone_type = re.sub("^eff", "efficientnet", args.backbone_type) net = smp.DeepLabV3Plus(backbone_type, classes=args.num_classes, encoder_weights='imagenet') else: model_name = args.net + "_" + args.backbone_type model_map = { 'deeplabv3_resnet50': deeplab.deeplabv3_resnet50, 'deeplabv3plus_resnet50': deeplab.deeplabv3plus_resnet50, 'deeplabv3_resnet101': deeplab.deeplabv3_resnet101, 'deeplabv3plus_resnet101': deeplab.deeplabv3plus_resnet101, 'deeplabv3_mobilenet': deeplab.deeplabv3_mobilenet, 'deeplabv3plus_mobilenet': deeplab.deeplabv3plus_mobilenet } net = model_map[model_name](num_classes=args.num_classes, output_stride=8) elif args.net == 'segtran': get_default(args, 'num_modes', default_settings, -1, [args.net, 'num_modes', args.in_fpn_layers]) set_segtran2d_config(args) print(args) net = Segtran2d(config) else: breakpoint() net.cuda() net.eval() # Currently colormap is used only for OCT task. colormap = get_seg_colormap(args.num_classes, return_torch=True).cuda() # prepred: pre-prediction. postpred: post-prediction. task2mask_prepred = { 'refuge': refuge_map_mask, 'polyp': polyp_map_mask, 'oct': partial(index_to_onehot, num_classes=args.num_classes) } task2mask_postpred = { 'refuge': refuge_inv_map_mask, 'polyp': polyp_inv_map_mask, 'oct': partial(onehot_inv_map, colormap=colormap) } mask_prepred_mapping_func = task2mask_prepred[args.task_name] mask_postpred_mapping_funcs = [ task2mask_postpred[args.task_name] ] if args.do_remove_frag: remove_frag = lambda segmap: remove_fragmentary_segs(segmap, 255) mask_postpred_mapping_funcs.append(remove_frag) if not args.checkpoint_dir: if args.vis_mode is not None: visualize_model(net, args.vis_mode, db_test) return if args.eval_robustness: eval_robustness(net, testloader, args.aug_degree) return allcls_avg_metric = None for iter_num in iter_nums: if args.checkpoint_dir: checkpoint_path = os.path.join(args.checkpoint_dir, 'iter_' + str(iter_num) + '.pth') load_model(net, args, checkpoint_path) if args.vis_mode is not None: visualize_model(net, args.vis_mode) continue if args.eval_robustness: eval_robustness(net, testloader, args.aug_degree) continue save_result = not args.test_interp if save_result: test_save_paths = [] test_save_dirs = [] test_save_dir_tmpl = "%s-%s-%s-%d" %(args.net, args.job_name, timestamp, iter_num) for suffix in ("-soft", "-%.1f" %args.mask_thres): test_save_dir = test_save_dir_tmpl + suffix test_save_path = "../prediction/%s" %(test_save_dir) if not os.path.exists(test_save_path): os.makedirs(test_save_path) test_save_dirs.append(test_save_dir) test_save_paths.append(test_save_path) else: test_save_paths = None test_save_dirs = None allcls_avg_metric, allcls_metric_count = \ test_all_cases(net, testloader, task_name=args.task_name, num_classes=args.num_classes, mask_thres=args.mask_thres, model_type=args.net, orig_input_size=args.orig_input_size, patch_size=args.patch_size, stride=(args.orig_input_size[0] // 2, args.orig_input_size[1] // 2), test_save_paths=test_save_paths, out_origsize=args.out_origsize, mask_prepred_mapping_func=mask_prepred_mapping_func, mask_postpred_mapping_funcs=mask_postpred_mapping_funcs, reload_mask=args.reload_mask, test_interp=args.test_interp, verbose=args.verbose_output) print("Iter-%d scores on %d images:" %(iter_num, allcls_metric_count[0])) dice_sum = 0 for cls in range(1, args.num_classes): dice = allcls_avg_metric[cls-1] print('class %d: dice = %.3f' %(cls, dice)) dice_sum += dice avg_dice = dice_sum / (args.num_classes - 1) print("Average dice: %.3f" %avg_dice) if args.net == 'segtran': max_attn, avg_attn, clamp_count, call_count = \ [ segtran_shared.__dict__[v] for v in ('max_attn', 'avg_attn', 'clamp_count', 'call_count') ] print("max_attn={:.2f}, avg_attn={:.2f}, clamp_count={}, call_count={}".format( max_attn, avg_attn, clamp_count, call_count)) if save_result: FNULL = open(os.devnull, 'w') for pred_type, test_save_dir, test_save_path in zip(('soft', 'hard'), test_save_dirs, test_save_paths): do_tar = subprocess.run(["tar", "cvf", "%s.tar" %test_save_dir, test_save_dir], cwd="../prediction", stdout=FNULL, stderr=subprocess.STDOUT) # print(do_tar) print("{} tarball:\n{}.tar".format(pred_type, os.path.abspath(test_save_path))) return allcls_avg_metric
'ds_class': 'SegWhole', 'ds_names': 'duke', # Actual images are at various sizes. As the dataset is SegWhole, orig_input_size is ignored. # But output_upscale is computed as the ratio between orig_input_size and patch_size. # If you want to avoid output upscaling, set orig_input_size to the same as patch_size. # The actual resolution of duke is (296, 500~542). # Set to (288, 512) will crop the central areas. # The actual resolution of pcv is (633, 720). Removing 9 pixels doesn't matter. 'orig_input_size': { 'duke': (288, 512), 'seed': (1024, 512), 'pcv': (624, 720) } , 'patch_size': { 'duke': (288, 512), 'seed': (512, 256), 'pcv': (312, 360) }, 'has_mask': { 'duke': True, 'seed': False, 'pcv': False }, 'weight': { 'duke': 1, 'seed': 1, 'pcv': 1 } }, } get_default(args, 'orig_input_size', default_settings, None, [args.task_name, 'orig_input_size']) get_default(args, 'patch_size', default_settings, None, [args.task_name, 'patch_size']) if type(args.patch_size) == str: args.patch_size = [ int(length) for length in args.patch_size.split(",") ] if len(args.patch_size) == 1: args.patch_size = (args.patch_size[0], args.patch_size[0]) if type(args.patch_size) == int: args.patch_size = (args.patch_size, args.patch_size) if type(args.orig_input_size) == str: args.orig_input_size = [ int(length) for length in args.orig_input_size.split(",") ] if type(args.orig_input_size) == int: args.orig_input_size = (args.orig_input_size, args.orig_input_size) if args.orig_input_size[0] > 0: args.output_upscale = args.orig_input_size[0] / args.patch_size[0] else: args.output_upscale = 1
'pcv': (312, 360) }, 'has_mask': { 'duke': True, 'seed': False, 'pcv': False }, 'weight': { 'duke': 1, 'seed': 1, 'pcv': 1 } }, } get_default(args, 'orig_input_size', default_settings, None, [args.task_name, 'orig_input_size']) get_default(args, 'patch_size', default_settings, None, [args.task_name, 'patch_size']) if type(args.patch_size) == str: args.patch_size = [int(length) for length in args.patch_size.split(",")] if len(args.patch_size) == 1: args.patch_size = (args.patch_size[0], args.patch_size[0]) if type(args.patch_size) == int: args.patch_size = (args.patch_size, args.patch_size) if type(args.orig_input_size) == str: args.orig_input_size = [ int(length) for length in args.orig_input_size.split(",") ] if type(args.orig_input_size) == int: args.orig_input_size = (args.orig_input_size, args.orig_input_size) if args.orig_input_size[0] > 0: