コード例 #1
0
def test_calculate_metric(epoch_num,
                          patch_size=(128, 128, 64),
                          stride_xy=64,
                          stride_z=32,
                          device='cuda'):
    net = VNet(n_channels=1,
               n_classes=num_classes,
               normalization='batchnorm',
               has_dropout=False).to(device)
    save_mode_path = os.path.join(snapshot_path,
                                  'iter_' + str(epoch_num) + '.pth')
    print(save_mode_path)
    net.load_state_dict(torch.load(save_mode_path))
    print("init weight from {}".format(save_mode_path))
    net.eval()

    metrics = test_all_case(net,
                            image_list,
                            num_classes=num_classes,
                            name_classes=name_classes,
                            patch_size=patch_size,
                            stride_xy=stride_xy,
                            stride_z=stride_z,
                            save_result=True,
                            test_save_path=test_save_path,
                            device=device)

    return metrics
コード例 #2
0
def test_calculate_metric(epoch_num):
    net = VNet(n_channels=1, n_classes=num_classes, normalization='batchnorm', has_dropout=False).cuda()
    save_mode_path = os.path.join(snapshot_path, 'iter_' + str(epoch_num) + '.pth')
    net.load_state_dict(torch.load(save_mode_path))
    print("init weight from {}".format(save_mode_path))
    net.eval()

    avg_metric = test_all_case(net, image_list, num_classes=num_classes,
                               patch_size=(112, 112, 80), stride_xy=18, stride_z=4,
                               save_result=True, test_save_path=test_save_path)

    return avg_metric
コード例 #3
0
def test_calculate_metric(args):
    net = VNet(n_channels=1,
               n_classes=args.num_classes,
               normalization='batchnorm',
               has_dropout=False).cuda()
    save_mode_path = os.path.join(args.snapshot_path,
                                  'iter_' + str(args.start_epoch) + '.pth')
    net.load_state_dict(torch.load(save_mode_path))
    print("init weight from {}".format(save_mode_path))
    net.eval()

    avg_metric = test_all_case(net,
                               args.testloader,
                               num_classes=args.num_classes,
                               patch_size=(128, 64, 128),
                               stride_xy=18,
                               stride_z=4,
                               save_result=True,
                               test_save_path=args.test_save_path)

    return avg_metric
コード例 #4
0
def test_calculate_metric(iter_nums):
    if args.net == 'vnet':
        net = VNet(n_channels=1,
                   num_classes=args.num_classes,
                   normalization='batchnorm',
                   has_dropout=False)
    elif args.net == 'unet':
        net = UNet3D(in_channels=1, num_classes=args.num_classes)
    elif args.net == 'segtran':
        get_default(args, 'num_modes', default_settings, -1,
                    [args.net, 'num_modes', args.in_fpn_layers])
        if args.segtran_type == '25d':
            set_segtran25d_config(args)
            net = Segtran25d(config25d)
        else:
            set_segtran3d_config(args)
            net = Segtran3d(config3d)

    net.cuda()
    net.eval()
    preproc_fn = None

    if not args.checkpoint_dir:
        if args.vis_mode is not None:
            visualize_model(net, args.vis_mode)
            return

        if args.eval_robustness:
            eval_robustness(net, testloader, args.aug_degree)
            return

    for iter_num in iter_nums:
        if args.checkpoint_dir:
            checkpoint_path = os.path.join(args.checkpoint_dir,
                                           'iter_' + str(iter_num) + '.pth')
            load_model(net, args, checkpoint_path)

            if args.vis_mode is not None:
                visualize_model(net, args.vis_mode)
                continue

            if args.eval_robustness:
                eval_robustness(net, testloader, args.aug_degree)
                continue

        save_result = not args.test_interp

        if save_result:
            test_save_paths = []
            test_save_dirs = []
            test_save_dir = "%s-%s-%s-%d" % (args.net, args.job_name,
                                             timestamp, iter_num)
            test_save_path = "../prediction/%s" % (test_save_dir)
            if not os.path.exists(test_save_path):
                os.makedirs(test_save_path)
            test_save_dirs.append(test_save_dir)
            test_save_paths.append(test_save_path)
        else:
            test_save_paths = [None]
            test_save_dirs = [None]

        # No need to use dataloader to pass data,
        # as one 3D image is split into many patches to do segmentation.
        allcls_avg_metric = test_all_cases(
            net,
            db_test,
            task_name=args.task_name,
            net_type=args.net,
            num_classes=args.num_classes,
            batch_size=args.batch_size,
            orig_patch_size=args.orig_patch_size,
            input_patch_size=args.input_patch_size,
            stride_xy=args.orig_patch_size[0] // 2,
            stride_z=args.orig_patch_size[2] // 2,
            save_result=save_result,
            test_save_path=test_save_paths[0],
            preproc_fn=preproc_fn,
            test_interp=args.test_interp,
            has_mask=has_mask)

        print("%d scores:" % iter_num)
        for cls in range(1, args.num_classes):
            dice, jc, hd, asd = allcls_avg_metric[cls - 1]
            print('%d: dice: %.3f, jc: %.3f, hd: %.3f, asd: %.3f' %
                  (cls, dice, jc, hd, asd))

        if save_result:
            FNULL = open(os.devnull, 'w')
            # Currently only save hard predictions.
            for pred_type, test_save_dir, test_save_path in zip(
                ('hard', ), test_save_dirs, test_save_paths):
                do_tar = subprocess.run(
                    ["tar", "cvf",
                     "%s.tar" % test_save_dir, test_save_dir],
                    cwd="../prediction",
                    stdout=FNULL,
                    stderr=subprocess.STDOUT)
                # print(do_tar)
                print("{} tarball:\n{}.tar".format(
                    pred_type, os.path.abspath(test_save_path)))

    return allcls_avg_metric
コード例 #5
0
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            iter_num = iter_num + 1
            writer.add_scalar('lr', lr_, iter_num)
            writer.add_scalar('loss/loss_seg', loss_seg, iter_num)
            writer.add_scalar('loss/loss_seg_dice', loss_seg_dice, iter_num)
            writer.add_scalar('loss/loss', loss, iter_num)
            logging.info(
                'epoch %d, iteration %d : loss : %f, loss_seg : %f, loss_seg_dice : %f'
                % (epoch_num, iter_num, loss.item(), loss_seg.item(),
                   loss_seg_dice.item()))

        # validation
        net.eval()
        with torch.no_grad():
            loss_seg_dice_valid = 0
            for i_batch, sampled_batch in enumerate(validloader):
                volume_batch, label_batch = sampled_batch[
                    'image'], sampled_batch['label']
                volume_batch, label_batch = volume_batch.cuda(
                ), label_batch.cuda()
                outputs = net(volume_batch)
                outputs_soft = F.softmax(outputs, dim=1)
                loss_seg_dice = 0
                for i in range(num_classes):
                    loss_mid = losses.dice_loss(outputs_soft[:, i, :, :, :],
                                                label_batch == i)
                    loss_seg_dice += loss_mid
                loss_seg_dice_valid += loss_seg_dice.cpu().numpy()