コード例 #1
0
def main():
    opt = Options(isTrain=False)
    opt.parse()
    opt.save_options()

    os.environ['CUDA_VISIBLE_DEVICES'] = ','.join(
        str(x) for x in opt.test['gpus'])

    img_dir = opt.test['img_dir']
    label_dir = opt.test['label_dir']
    save_dir = opt.test['save_dir']
    model_path = opt.test['model_path']
    save_flag = opt.test['save_flag']

    # data transforms
    test_transform = get_transforms(opt.transform['test'])

    model = ResUNet34(pretrained=opt.model['pretrained'])
    model = torch.nn.DataParallel(model)
    model = model.cuda()
    cudnn.benchmark = True

    # ----- load trained model ----- #
    print("=> loading trained model")
    checkpoint = torch.load(model_path)
    model.load_state_dict(checkpoint['state_dict'])
    print("=> loaded model at epoch {}".format(checkpoint['epoch']))
    model = model.module

    # switch to evaluate mode
    model.eval()
    counter = 0
    print("=> Test begins:")

    img_names = os.listdir(img_dir)

    if save_flag:
        if not os.path.exists(save_dir):
            os.mkdir(save_dir)
        strs = img_dir.split('/')
        prob_maps_folder = '{:s}/{:s}_prob_maps'.format(save_dir, strs[-1])
        seg_folder = '{:s}/{:s}_segmentation'.format(save_dir, strs[-1])
        if not os.path.exists(prob_maps_folder):
            os.mkdir(prob_maps_folder)
        if not os.path.exists(seg_folder):
            os.mkdir(seg_folder)

    metric_names = ['acc', 'p_F1', 'p_recall', 'p_precision', 'dice', 'aji']
    test_results = dict()
    all_result = utils.AverageMeter(len(metric_names))

    for img_name in img_names:
        # load test image
        print('=> Processing image {:s}'.format(img_name))
        img_path = '{:s}/{:s}'.format(img_dir, img_name)
        img = Image.open(img_path)
        ori_h = img.size[1]
        ori_w = img.size[0]
        name = os.path.splitext(img_name)[0]
        label_path = '{:s}/{:s}_label.png'.format(label_dir, name)
        gt = misc.imread(label_path)

        input = test_transform((img, ))[0].unsqueeze(0)

        print('\tComputing output probability maps...')
        prob_maps = get_probmaps(input, model, opt)
        pred = np.argmax(prob_maps, axis=0)  # prediction

        pred_labeled = measure.label(pred)
        pred_labeled = morph.remove_small_objects(pred_labeled,
                                                  opt.post['min_area'])
        pred_labeled = ndi_morph.binary_fill_holes(pred_labeled > 0)
        pred_labeled = measure.label(pred_labeled)

        print('\tComputing metrics...')
        metrics = compute_metrics(pred_labeled, gt, metric_names)

        # save result for each image
        test_results[name] = [
            metrics['acc'], metrics['p_F1'], metrics['p_recall'],
            metrics['p_precision'], metrics['dice'], metrics['aji']
        ]

        # update the average result
        all_result.update([
            metrics['acc'], metrics['p_F1'], metrics['p_recall'],
            metrics['p_precision'], metrics['dice'], metrics['aji']
        ])

        # save image
        if save_flag:
            print('\tSaving image results...')
            misc.imsave('{:s}/{:s}_pred.png'.format(prob_maps_folder, name),
                        pred.astype(np.uint8) * 255)
            misc.imsave('{:s}/{:s}_prob.png'.format(prob_maps_folder, name),
                        prob_maps[1, :, :])
            final_pred = Image.fromarray(pred_labeled.astype(np.uint16))
            final_pred.save('{:s}/{:s}_seg.tiff'.format(seg_folder, name))

            # save colored objects
            pred_colored_instance = np.zeros((ori_h, ori_w, 3))
            for k in range(1, pred_labeled.max() + 1):
                pred_colored_instance[pred_labeled == k, :] = np.array(
                    utils.get_random_color())
            filename = '{:s}/{:s}_seg_colored.png'.format(seg_folder, name)
            misc.imsave(filename, pred_colored_instance)

        counter += 1
        if counter % 10 == 0:
            print('\tProcessed {:d} images'.format(counter))

    print('=> Processed all {:d} images'.format(counter))
    print('Average Acc: {r[0]:.4f}\nF1: {r[1]:.4f}\nRecall: {r[2]:.4f}\n'
          'Precision: {r[3]:.4f}\nDice: {r[4]:.4f}\nAJI: {r[5]:.4f}\n'.format(
              r=all_result.avg))

    header = metric_names
    utils.save_results(header, all_result.avg, test_results,
                       '{:s}/test_results.txt'.format(save_dir))
コード例 #2
0
def main(opt):
    global best_score, logger, logger_results
    best_score = 0
    opt.save_options()

    os.environ['CUDA_VISIBLE_DEVICES'] = ','.join(
        str(x) for x in opt.train['gpus'])

    # set up logger
    logger, logger_results = setup_logging(opt)
    opt.print_options(logger)

    if opt.train['random_seed'] >= 0:
        # logger.info("=> Using random seed {:d}".format(opt.train['random_seed']))
        torch.manual_seed(opt.train['random_seed'])
        torch.cuda.manual_seed(opt.train['random_seed'])
        torch.backends.cudnn.deterministic = True
        torch.backends.cudnn.benchmark = False
        np.random.seed(opt.train['random_seed'])
        random.seed(opt.train['random_seed'])
    else:
        torch.backends.cudnn.benchmark = True

    # ----- create model ----- #
    model = ResUNet34(pretrained=opt.model['pretrained'],
                      with_uncertainty=opt.with_uncertainty)
    # model = nn.DataParallel(model)
    model = model.cuda()

    # ----- define optimizer ----- #
    optimizer = torch.optim.Adam(model.parameters(),
                                 opt.train['lr'],
                                 betas=(0.9, 0.99),
                                 weight_decay=opt.train['weight_decay'])

    # ----- define criterion ----- #
    criterion = torch.nn.NLLLoss(ignore_index=2).cuda()

    # ----- load data ----- #
    data_transforms = {
        'train': get_transforms(opt.transform['train']),
        'val': get_transforms(opt.transform['val'])
    }

    img_dir = '{:s}/train'.format(opt.train['img_dir'])
    target_vor_dir = '{:s}/train'.format(opt.train['label_vor_dir'])
    target_cluster_dir = '{:s}/train'.format(opt.train['label_cluster_dir'])
    dir_list = [img_dir, target_vor_dir, target_cluster_dir]
    post_fix = ['label_vor.png', 'label_cluster.png']
    num_channels = [3, 3, 3]
    train_set = DataFolder(dir_list, post_fix, num_channels,
                           data_transforms['train'])
    train_loader = DataLoader(train_set,
                              batch_size=opt.train['batch_size'],
                              shuffle=True,
                              num_workers=opt.train['workers'])

    # ----- optionally load from a checkpoint for validation or resuming training ----- #
    if opt.train['checkpoint']:
        if os.path.isfile(opt.train['checkpoint']):
            logger.info("=> loading checkpoint '{}'".format(
                opt.train['checkpoint']))
            checkpoint = torch.load(opt.train['checkpoint'])
            opt.train['start_epoch'] = checkpoint['epoch']
            model.load_state_dict(checkpoint['state_dict'])
            optimizer.load_state_dict(checkpoint['optimizer'])
            logger.info("=> loaded checkpoint '{}' (epoch {})".format(
                opt.train['checkpoint'], checkpoint['epoch']))
        else:
            logger.info("=> no checkpoint found at '{}'".format(
                opt.train['checkpoint']))

    # ----- training and validation ----- #
    num_epochs = opt.train['num_epochs']

    for epoch in range(opt.train['start_epoch'], num_epochs):
        # train for one epoch or len(train_loader) iterations
        logger.info('Epoch: [{:d}/{:d}]'.format(epoch + 1, num_epochs))
        train_loss, train_loss_vor, train_loss_cluster = train(
            opt, train_loader, model, optimizer, criterion)

        # evaluate on val set
        with torch.no_grad():
            val_acc, val_aji = validate(opt, model, data_transforms['val'])

        # check if it is the best accuracy
        is_best = val_aji > best_score
        best_score = max(val_aji, best_score)

        cp_flag = (epoch + 1) % opt.train['checkpoint_freq'] == 0
        save_checkpoint(
            {
                'epoch': epoch + 1,
                'state_dict': model.state_dict(),
                'optimizer': optimizer.state_dict(),
            }, epoch, opt.train['save_dir'], is_best, cp_flag)

        # save the training results to txt files
        logger_results.info(
            '{:d}\t{:.4f}\t{:.4f}\t{:.4f}\t{:.4f}\t{:.4f}'.format(
                epoch + 1, train_loss, train_loss_vor, train_loss_cluster,
                val_acc, val_aji))

    for i in list(logger.handlers):
        logger.removeHandler(i)
        i.flush()
        i.close()
    for i in list(logger_results.handlers):
        logger_results.removeHandler(i)
        i.flush()
        i.close()
コード例 #3
0
def main():
    params = Params()
    img_dir = params.test['img_dir']
    label_dir = params.test['label_dir']
    save_dir = params.test['save_dir']
    if not os.path.exists(save_dir):
        os.mkdir(save_dir)
    model_path = params.test['model_path']
    save_flag = params.test['save_flag']
    tta = params.test['tta']

    params.save_params('{:s}/test_params.txt'.format(params.test['save_dir']),
                       test=True)

    # check if it is needed to compute accuracies
    eval_flag = True if label_dir else False
    if eval_flag:
        test_results = dict()
        # recall, precision, F1, dice, iou, haus
        tumor_result = utils.AverageMeter(7)
        lym_result = utils.AverageMeter(7)
        stroma_result = utils.AverageMeter(7)
        all_result = utils.AverageMeter(7)
        conf_matrix = np.zeros((3, 3))

    # data transforms
    test_transform = get_transforms(params.transform['test'])

    model_name = params.model['name']
    if model_name == 'ResUNet34':
        model = ResUNet34(params.model['out_c'],
                          fixed_feature=params.model['fix_params'])
    elif params.model['name'] == 'UNet':
        model = UNet(3, params.model['out_c'])
    else:
        raise NotImplementedError()
    model = torch.nn.DataParallel(model)
    model = model.cuda()
    cudnn.benchmark = True

    # ----- load trained model ----- #
    print("=> loading trained model")
    best_checkpoint = torch.load(model_path)
    model.load_state_dict(best_checkpoint['state_dict'])
    print("=> loaded model at epoch {}".format(best_checkpoint['epoch']))
    model = model.module

    # switch to evaluate mode
    model.eval()
    counter = 0
    print("=> Test begins:")

    img_names = os.listdir(img_dir)

    if save_flag:
        if not os.path.exists(save_dir):
            os.mkdir(save_dir)
        strs = img_dir.split('/')
        prob_maps_folder = '{:s}/{:s}_prob_maps'.format(save_dir, strs[-1])
        seg_folder = '{:s}/{:s}_segmentation'.format(save_dir, strs[-1])
        if not os.path.exists(prob_maps_folder):
            os.mkdir(prob_maps_folder)
        if not os.path.exists(seg_folder):
            os.mkdir(seg_folder)

    # img_names = ['193-adca-5']
    # total_time = 0.0
    for img_name in img_names:
        # load test image
        print('=> Processing image {:s}'.format(img_name))
        img_path = '{:s}/{:s}'.format(img_dir, img_name)
        img = Image.open(img_path)
        ori_h = img.size[1]
        ori_w = img.size[0]
        name = os.path.splitext(img_name)[0]
        if eval_flag:
            label_path = '{:s}/{:s}_label.png'.format(label_dir, name)
            gt = misc.imread(label_path)

        input = test_transform((img, ))[0].unsqueeze(0)

        print('\tComputing output probability maps...')
        prob_maps = get_probmaps(input, model, params)
        if tta:
            img_hf = img.transpose(Image.FLIP_LEFT_RIGHT)  # horizontal flip
            img_vf = img.transpose(Image.FLIP_TOP_BOTTOM)  # vertical flip
            img_hvf = img_hf.transpose(
                Image.FLIP_TOP_BOTTOM)  # horizontal and vertical flips

            input_hf = test_transform(
                (img_hf, ))[0].unsqueeze(0)  # horizontal flip input
            input_vf = test_transform(
                (img_vf, ))[0].unsqueeze(0)  # vertical flip input
            input_hvf = test_transform((img_hvf, ))[0].unsqueeze(
                0)  # horizontal and vertical flip input

            prob_maps_hf = get_probmaps(input_hf, model, params)
            prob_maps_vf = get_probmaps(input_vf, model, params)
            prob_maps_hvf = get_probmaps(input_hvf, model, params)

            # re flip
            prob_maps_hf = np.flip(prob_maps_hf, 2)
            prob_maps_vf = np.flip(prob_maps_vf, 1)
            prob_maps_hvf = np.flip(np.flip(prob_maps_hvf, 1), 2)

            # rotation 90 and flips
            img_r90 = img.rotate(90, expand=True)
            img_r90_hf = img_r90.transpose(
                Image.FLIP_LEFT_RIGHT)  # horizontal flip
            img_r90_vf = img_r90.transpose(
                Image.FLIP_TOP_BOTTOM)  # vertical flip
            img_r90_hvf = img_r90_hf.transpose(
                Image.FLIP_TOP_BOTTOM)  # horizontal and vertical flips

            input_r90 = test_transform((img_r90, ))[0].unsqueeze(0)
            input_r90_hf = test_transform(
                (img_r90_hf, ))[0].unsqueeze(0)  # horizontal flip input
            input_r90_vf = test_transform(
                (img_r90_vf, ))[0].unsqueeze(0)  # vertical flip input
            input_r90_hvf = test_transform((img_r90_hvf, ))[0].unsqueeze(
                0)  # horizontal and vertical flip input

            prob_maps_r90 = get_probmaps(input_r90, model, params)
            prob_maps_r90_hf = get_probmaps(input_r90_hf, model, params)
            prob_maps_r90_vf = get_probmaps(input_r90_vf, model, params)
            prob_maps_r90_hvf = get_probmaps(input_r90_hvf, model, params)

            # re flip
            prob_maps_r90 = np.rot90(prob_maps_r90, k=3, axes=(1, 2))
            prob_maps_r90_hf = np.rot90(np.flip(prob_maps_r90_hf, 2),
                                        k=3,
                                        axes=(1, 2))
            prob_maps_r90_vf = np.rot90(np.flip(prob_maps_r90_vf, 1),
                                        k=3,
                                        axes=(1, 2))
            prob_maps_r90_hvf = np.rot90(np.flip(np.flip(prob_maps_r90_hvf, 1),
                                                 2),
                                         k=3,
                                         axes=(1, 2))

            # utils.show_figures((np.array(img), np.array(img_r90_hvf),
            #                     np.swapaxes(np.swapaxes(prob_maps_r90_hvf, 0, 1), 1, 2)))

            prob_maps = (prob_maps + prob_maps_hf + prob_maps_vf +
                         prob_maps_hvf + prob_maps_r90 + prob_maps_r90_hf +
                         prob_maps_r90_vf + prob_maps_r90_hvf) / 8

        pred = np.argmax(prob_maps, axis=0)  # prediction
        pred_inside = pred.copy()
        pred_inside[pred == 4] = 0  # set contours to background
        pred_nuclei_inside_labeled = measure.label(pred_inside > 0)

        pred_tumor_inside = pred_inside == 1
        pred_lym_inside = pred_inside == 2
        pred_stroma_inside = pred_inside == 3
        pred_3types_inside = pred_tumor_inside + pred_lym_inside * 2 + pred_stroma_inside * 3

        # find the correct class for each segmented nucleus
        N_nuclei = len(np.unique(pred_nuclei_inside_labeled))
        N_class = len(np.unique(pred_3types_inside))
        intersection = np.histogram2d(pred_nuclei_inside_labeled.flatten(),
                                      pred_3types_inside.flatten(),
                                      bins=(N_nuclei, N_class))[0]
        classes = np.argmax(intersection, axis=1)
        tumor_nuclei_indices = np.nonzero(classes == 1)
        lym_nuclei_indices = np.nonzero(classes == 2)
        stroma_nuclei_indices = np.nonzero(classes == 3)

        # solve the problem of one nucleus assigned with different labels
        pred_tumor_inside = np.isin(pred_nuclei_inside_labeled,
                                    tumor_nuclei_indices)
        pred_lym_inside = np.isin(pred_nuclei_inside_labeled,
                                  lym_nuclei_indices)
        pred_stroma_inside = np.isin(pred_nuclei_inside_labeled,
                                     stroma_nuclei_indices)

        # remove small objects
        pred_tumor_inside = morph.remove_small_objects(pred_tumor_inside,
                                                       params.post['min_area'])
        pred_lym_inside = morph.remove_small_objects(pred_lym_inside,
                                                     params.post['min_area'])
        pred_stroma_inside = morph.remove_small_objects(
            pred_stroma_inside, params.post['min_area'])

        # connected component labeling
        pred_tumor_inside_labeled = measure.label(pred_tumor_inside)
        pred_lym_inside_labeled = measure.label(pred_lym_inside)
        pred_stroma_inside_labeled = measure.label(pred_stroma_inside)
        pred_all_inside_labeled = pred_tumor_inside_labeled * 3 \
                                  + (pred_lym_inside_labeled * 3 - 2) * (pred_lym_inside_labeled>0) \
                                  + (pred_stroma_inside_labeled * 3 - 1) * (pred_stroma_inside_labeled>0)

        # dilation
        pred_tumor_labeled = morph.dilation(pred_tumor_inside_labeled,
                                            selem=morph.selem.disk(
                                                params.post['radius']))
        pred_lym_labeled = morph.dilation(pred_lym_inside_labeled,
                                          selem=morph.selem.disk(
                                              params.post['radius']))
        pred_stroma_labeled = morph.dilation(pred_stroma_inside_labeled,
                                             selem=morph.selem.disk(
                                                 params.post['radius']))
        pred_all_labeled = morph.dilation(pred_all_inside_labeled,
                                          selem=morph.selem.disk(
                                              params.post['radius']))

        # utils.show_figures([pred, pred2, pred_labeled])

        if eval_flag:
            print('\tComputing metrics...')
            gt_tumor = (gt % 3 == 0) * gt
            gt_lym = (gt % 3 == 1) * gt
            gt_stroma = (gt % 3 == 2) * gt

            tumor_detect_metrics = utils.accuracy_detection_clas(
                pred_tumor_labeled, gt_tumor, clas_flag=False)
            lym_detect_metrics = utils.accuracy_detection_clas(
                pred_lym_labeled, gt_lym, clas_flag=False)
            stroma_detect_metrics = utils.accuracy_detection_clas(
                pred_stroma_labeled, gt_stroma, clas_flag=False)
            all_detect_metrics = utils.accuracy_detection_clas(
                pred_all_labeled, gt, clas_flag=True)

            tumor_seg_metrics = utils.accuracy_object_level(
                pred_tumor_labeled, gt_tumor, hausdorff_flag=False)
            lym_seg_metrics = utils.accuracy_object_level(pred_lym_labeled,
                                                          gt_lym,
                                                          hausdorff_flag=False)
            stroma_seg_metrics = utils.accuracy_object_level(
                pred_stroma_labeled, gt_stroma, hausdorff_flag=False)
            all_seg_metrics = utils.accuracy_object_level(pred_all_labeled,
                                                          gt,
                                                          hausdorff_flag=True)

            tumor_metrics = [*tumor_detect_metrics[:-1], *tumor_seg_metrics]
            lym_metrics = [*lym_detect_metrics[:-1], *lym_seg_metrics]
            stroma_metrics = [*stroma_detect_metrics[:-1], *stroma_seg_metrics]
            all_metrics = [*all_detect_metrics[:-1], *all_seg_metrics]
            conf_matrix += np.array(all_detect_metrics[-1])

            # save result for each image
            test_results[name] = {
                'tumor': tumor_metrics,
                'lym': lym_metrics,
                'stroma': stroma_metrics,
                'all': all_metrics
            }

            # update the average result
            tumor_result.update(tumor_metrics)
            lym_result.update(lym_metrics)
            stroma_result.update(stroma_metrics)
            all_result.update(all_metrics)

        # save image
        if save_flag:
            print('\tSaving image results...')
            misc.imsave('{:s}/{:s}_pred.png'.format(prob_maps_folder, name),
                        pred.astype(np.uint8) * 50)
            misc.imsave(
                '{:s}/{:s}_prob_tumor.png'.format(prob_maps_folder, name),
                prob_maps[1, :, :])
            misc.imsave(
                '{:s}/{:s}_prob_lym.png'.format(prob_maps_folder, name),
                prob_maps[2, :, :])
            misc.imsave(
                '{:s}/{:s}_prob_stroma.png'.format(prob_maps_folder, name),
                prob_maps[3, :, :])
            # np.save('{:s}/{:s}_prob.npy'.format(prob_maps_folder, name), prob_maps)
            # np.save('{:s}/{:s}_seg.npy'.format(seg_folder, name), pred_all_labeled)
            final_pred = Image.fromarray(pred_all_labeled.astype(np.uint16))
            final_pred.save('{:s}/{:s}_seg.tiff'.format(seg_folder, name))

            # save colored objects
            pred_colored = np.zeros((ori_h, ori_w, 3))
            pred_colored_instance = np.zeros((ori_h, ori_w, 3))
            pred_colored[pred_tumor_labeled > 0] = np.array([255, 0, 0])
            pred_colored[pred_lym_labeled > 0] = np.array([0, 255, 0])
            pred_colored[pred_stroma_labeled > 0] = np.array([0, 0, 255])
            filename = '{:s}/{:s}_seg_colored_3types.png'.format(
                seg_folder, name)
            misc.imsave(filename, pred_colored)
            for k in range(1, pred_all_labeled.max() + 1):
                pred_colored_instance[pred_all_labeled == k, :] = np.array(
                    utils.get_random_color())
            filename = '{:s}/{:s}_seg_colored.png'.format(seg_folder, name)
            misc.imsave(filename, pred_colored_instance)

            # img_overlaid = utils.overlay_edges(label_img, pred_labeled2, img)
            # filename = '{:s}/{:s}_comparison.png'.format(seg_folder, name)
            # misc.imsave(filename, img_overlaid)

        counter += 1
        if counter % 10 == 0:
            print('\tProcessed {:d} images'.format(counter))

    # print('Time: {:4f}'.format(total_time/counter))

    print('=> Processed all {:d} images'.format(counter))
    if eval_flag:
        print(
            'Average: clas_acc\trecall\tprecision\tF1\tdice\tiou\thausdorff\n'
            'tumor: {t[0]:.4f}, {t[1]:.4f}, {t[2]:.4f}, {t[3]:.4f}, {t[4]:.4f}, {t[5]:.4f}, {t[6]:.4f}\n'
            'lym: {l[0]:.4f}, {l[1]:.4f}, {l[2]:.4f}, {l[3]:.4f}, {l[4]:.4f}, {l[5]:.4f}, {l[6]:.4f}\n'
            'stroma: {s[0]:.4f}, {s[1]:.4f}, {s[2]:.4f}, {s[3]:.4f}, {s[4]:.4f}, {s[5]:.4f}, {s[6]:.4f}\n'
            'all: {a[0]:.4f}, {a[1]:.4f}, {a[2]:.4f}, {a[3]:.4f}, {a[4]:.4f}, {a[5]:.4f}, {a[6]:.4f}'
            .format(t=tumor_result.avg,
                    l=lym_result.avg,
                    s=stroma_result.avg,
                    a=all_result.avg))

        header = [
            'clas_acc', 'recall', 'precision', 'F1', 'Dice', 'IoU', 'Hausdorff'
        ]
        save_results(header, tumor_result.avg, lym_result.avg,
                     stroma_result.avg, all_result.avg, test_results,
                     conf_matrix, '{:s}/test_result.txt'.format(save_dir))
コード例 #4
0
def main():
    global opt, num_iter, tb_writer, logger, logger_results
    opt = Options(isTrain=True)
    opt.parse()
    opt.save_options()

    tb_writer = SummaryWriter('{:s}/tb_logs'.format(opt.train['save_dir']))

    os.environ['CUDA_VISIBLE_DEVICES'] = ','.join(
        str(x) for x in opt.train['gpus'])

    # set up logger
    logger, logger_results = setup_logging(opt)

    # ----- create model ----- #
    model = ResUNet34(pretrained=opt.model['pretrained'])
    # if not opt.train['checkpoint']:
    #     logger.info(model)
    model = nn.DataParallel(model)
    model = model.cuda()
    cudnn.benchmark = True

    # ----- define optimizer ----- #
    optimizer = torch.optim.Adam(model.parameters(),
                                 opt.train['lr'],
                                 betas=(0.9, 0.99),
                                 weight_decay=opt.train['weight_decay'])

    # ----- define criterion ----- #
    criterion = torch.nn.NLLLoss(ignore_index=2).cuda()
    if opt.train['crf_weight'] > 0:
        logger.info('=> Using CRF loss...')
        global criterion_crf
        criterion_crf = CRFLoss(opt.train['sigmas'][0], opt.train['sigmas'][1])

    # ----- load data ----- #
    data_transforms = {
        'train': get_transforms(opt.transform['train']),
        'test': get_transforms(opt.transform['test'])
    }

    img_dir = '{:s}/train'.format(opt.train['img_dir'])
    target_vor_dir = '{:s}/train'.format(opt.train['label_vor_dir'])
    target_cluster_dir = '{:s}/train'.format(opt.train['label_cluster_dir'])
    dir_list = [img_dir, target_vor_dir, target_cluster_dir]
    post_fix = ['label_vor.png', 'label_cluster.png']
    num_channels = [3, 3, 3]
    train_set = DataFolder(dir_list, post_fix, num_channels,
                           data_transforms['train'])
    train_loader = DataLoader(train_set,
                              batch_size=opt.train['batch_size'],
                              shuffle=True,
                              num_workers=opt.train['workers'])

    # ----- optionally load from a checkpoint for validation or resuming training ----- #
    if opt.train['checkpoint']:
        if os.path.isfile(opt.train['checkpoint']):
            logger.info("=> loading checkpoint '{}'".format(
                opt.train['checkpoint']))
            checkpoint = torch.load(opt.train['checkpoint'])
            opt.train['start_epoch'] = checkpoint['epoch']
            model.load_state_dict(checkpoint['state_dict'])
            optimizer.load_state_dict(checkpoint['optimizer'])
            logger.info("=> loaded checkpoint '{}' (epoch {})".format(
                opt.train['checkpoint'], checkpoint['epoch']))
        else:
            logger.info("=> no checkpoint found at '{}'".format(
                opt.train['checkpoint']))

    # ----- training and validation ----- #
    num_epoch = opt.train['train_epochs'] + opt.train['finetune_epochs']
    num_iter = num_epoch * len(train_loader)
    # print training parameters
    logger.info("=> Initial learning rate: {:g}".format(opt.train['lr']))
    logger.info("=> Batch size: {:d}".format(opt.train['batch_size']))
    logger.info("=> Number of training iterations: {:d}".format(num_iter))
    logger.info("=> Training epochs: {:d}".format(opt.train['train_epochs']))
    logger.info("=> Fine-tune epochs using dense CRF loss: {:d}".format(
        opt.train['finetune_epochs']))
    logger.info("=> CRF loss weight: {:.2g}".format(opt.train['crf_weight']))

    for epoch in range(opt.train['start_epoch'], num_epoch):
        # train for one epoch or len(train_loader) iterations
        logger.info('Epoch: [{:d}/{:d}]'.format(epoch + 1, num_epoch))
        finetune_flag = False if epoch < opt.train['train_epochs'] else True
        if epoch == opt.train['train_epochs']:
            logger.info("Fine-tune begins, lr = {:.2g}".format(
                opt.train['lr'] * 0.1))
            for param_group in optimizer.param_groups:
                param_group['lr'] = opt.train['lr'] * 0.1

        train_results = train(train_loader, model, optimizer, criterion,
                              finetune_flag)
        train_loss, train_loss_vor, train_loss_cluster, train_loss_crf = train_results

        cp_flag = (epoch + 1) % opt.train['checkpoint_freq'] == 0
        save_checkpoint(
            {
                'epoch': epoch + 1,
                'state_dict': model.state_dict(),
                'optimizer': optimizer.state_dict(),
            }, epoch, opt.train['save_dir'], cp_flag)

        # save the training results to txt files
        logger_results.info('{:d}\t{:.4f}\t{:.4f}\t{:.4f}\t{:.4f}'.format(
            epoch + 1, train_loss, train_loss_vor, train_loss_cluster,
            train_loss_crf))
        # tensorboard logs
        tb_writer.add_scalars(
            'epoch_losses', {
                'train_loss': train_loss,
                'train_loss_vor': train_loss_vor,
                'train_loss_cluster': train_loss_cluster,
                'train_loss_crf': train_loss_crf
            }, epoch)
    tb_writer.close()
    for i in list(logger.handlers):
        logger.removeHandler(i)
        i.flush()
        i.close()
    for i in list(logger_results.handlers):
        logger_results.removeHandler(i)
        i.flush()
        i.close()
コード例 #5
0
def main(opt, save_dir):
    os.environ['CUDA_VISIBLE_DEVICES'] = ','.join(
        str(x) for x in opt.test['gpus'])

    # img_dir = opt.test['img_dir']
    ratio = opt.ratio
    img_dir = './data/{:s}/images'.format(opt.dataset)
    label_dir = './data/{:s}/labels_point'.format(opt.dataset)
    label_instance_dir = './data/{:s}/labels_instance'.format(opt.dataset)
    # save_dir = './data/{:s}/selected_masks'.format(opt.dataset)
    if not os.path.exists(save_dir):
        os.makedirs(save_dir, exist_ok=True)
    model_path = opt.test['model_path']

    # data transforms
    test_transform = get_transforms(opt.transform['test'])

    model = ResUNet34(pretrained=opt.model['pretrained'],
                      with_uncertainty=opt.with_uncertainty)
    model = model.cuda()
    cudnn.benchmark = True

    # ----- load trained model ----- #
    # print("=> loading trained model")
    checkpoint = torch.load(model_path)
    model.load_state_dict(checkpoint['state_dict'])
    # print("=> loaded model at epoch {}".format(checkpoint['epoch']))

    # switch to evaluate mode
    model.eval()
    apply_dropout(model)

    with open('./data/{:s}/train_val_test.json'.format(opt.dataset),
              'r') as file:
        data_list = json.load(file)
        train_list = data_list['train']

    for img_name in tqdm(train_list):
        # load test image
        # print('=> Processing image {:s}'.format(img_name))
        img_path = '{:s}/{:s}'.format(img_dir, img_name)
        img = Image.open(img_path)
        ori_h = img.size[1]
        ori_w = img.size[0]
        name = os.path.splitext(img_name)[0]
        label_point = misc.imread('{:s}/{:s}_label_point.png'.format(
            label_dir, name))

        input = test_transform((img, ))[0].unsqueeze(0)
        # print('\tComputing unertainty maps...')
        mean_sigma = np.zeros((2, ori_h, ori_w))
        mean_sigma_normalized = np.zeros((2, ori_h, ori_w))
        mean_prob = np.zeros((2, ori_h, ori_w))
        for _ in range(opt.T):
            output, log_var = get_probmaps(input, model, opt)
            output = output.astype(np.float64)
            log_var = log_var.astype(np.float64)
            sigma_map = np.exp(log_var / 2)
            sigma_map_normalized = sigma_map / (np.exp(output) + 1e-8)

            mean_prob += np.exp(output) / np.sum(np.exp(output), axis=0)
            mean_sigma += sigma_map
            mean_sigma_normalized += sigma_map_normalized

        mean_prob /= opt.T
        mean_sigma /= opt.T
        mean_sigma_normalized /= opt.T

        un_data_normalized = mean_sigma_normalized**2

        pred = np.argmax(mean_prob, axis=0)
        un_data_normalized = np.sum(un_data_normalized *
                                    utils.onehot_encoding(pred),
                                    axis=0)

        # find the area of largest uncertainty for visualization
        threshed = un_data_normalized > 1.0
        large_unc_area = morph.opening(threshed, selem=morph.disk(1))
        large_unc_area = morph.remove_small_objects(large_unc_area,
                                                    min_size=64)
        un_data_smoothed = gaussian_filter(un_data_normalized * large_unc_area,
                                           sigma=5)

        # cmap = plt.cm.jet
        # plt.imsave('{:s}/{:s}_uncertainty.png'.format(save_dir, name), cmap(un_data_normalized))

        points = measure.label(label_point)
        uncertainty_list = []
        radius = 10
        for k in range(1, np.max(points) + 1):
            x, y = np.argwhere(points == k)[0]
            r1 = x - radius if x - radius > 0 else 0
            r2 = x + radius if x + radius < ori_h else ori_h
            c1 = y - radius if y - radius > 0 else 0
            c2 = y + radius if y + radius < ori_w else ori_w
            uncertainty = np.mean(un_data_smoothed[r1:r2, c1:c2])
            uncertainty_list.append([k, uncertainty])

        uncertainty_list = np.array(uncertainty_list)
        sorted_list = uncertainty_list[uncertainty_list[:, 1].argsort()[::-1]]
        indices = sorted_list[:int(ratio * np.max(points)), 0]

        # annotation
        label_instance = misc.imread('{:s}/{:s}_label.png'.format(
            label_instance_dir, name))
        new_anno = np.zeros_like(label_instance)
        counter = 1
        for idx in indices:
            nuclei_idx = np.unique(label_instance[points == idx])[0]
            if nuclei_idx == 0:
                continue
            new_anno += (label_instance == nuclei_idx) * counter
            counter += 1
            # utils.show_figures((new_anno,))

        misc.imsave('{:s}/{:s}_label_partial_mask.png'.format(save_dir, name),
                    new_anno.astype(np.uint8))
        misc.imsave(
            '{:s}/{:s}_label_partial_mask_binary.png'.format(save_dir, name),
            (new_anno > 0).astype(np.uint8) * 255)

    print('=> Processed all images')
コード例 #6
0
def main():
    global params, best_iou, num_iter, tb_writer, logger, logger_results
    best_iou = 0
    params = Params()
    params.save_params('{:s}/params.txt'.format(params.paths['save_dir']))
    tb_writer = SummaryWriter('{:s}/tb_logs'.format(params.paths['save_dir']))

    os.environ['CUDA_VISIBLE_DEVICES'] = ','.join(
        str(x) for x in params.train['gpu'])

    # set up logger
    logger, logger_results = setup_logging(params)

    # ----- create model ----- #
    model_name = params.model['name']
    if model_name == 'ResUNet34':
        model = ResUNet34(params.model['out_c'],
                          fixed_feature=params.model['fix_params'])
    elif params.model['name'] == 'UNet':
        model = UNet(3, params.model['out_c'])
    else:
        raise NotImplementedError()

    logger.info('Model: {:s}'.format(model_name))
    # if not params.train['checkpoint']:
    #     logger.info(model)
    model = nn.DataParallel(model)
    model = model.cuda()
    global vgg_model
    logger.info('=> Using VGG16 for perceptual loss...')
    vgg_model = vgg16_feat()
    vgg_model = nn.DataParallel(vgg_model).cuda()
    cudnn.benchmark = True

    # ----- define optimizer ----- #
    optimizer = torch.optim.Adam(model.parameters(),
                                 params.train['lr'],
                                 betas=(0.9, 0.99),
                                 weight_decay=params.train['weight_decay'])

    # ----- get pixel weights and define criterion ----- #
    if not params.train['weight_map']:
        criterion = torch.nn.NLLLoss().cuda()
    else:
        logger.info('=> Using weight maps...')
        criterion = torch.nn.NLLLoss(reduction='none').cuda()

    if params.train['beta'] > 0:
        logger.info('=> Using perceptual loss...')
        global criterion_perceptual
        criterion_perceptual = perceptual_loss()

    data_transforms = {
        'train': get_transforms(params.transform['train']),
        'val': get_transforms(params.transform['val'])
    }

    # ----- load data ----- #
    dsets = {}
    for x in ['train', 'val']:
        img_dir = '{:s}/{:s}'.format(params.paths['img_dir'], x)
        target_dir = '{:s}/{:s}'.format(params.paths['label_dir'], x)
        if params.train['weight_map']:
            weight_map_dir = '{:s}/{:s}'.format(params.paths['weight_map_dir'],
                                                x)
            dir_list = [img_dir, weight_map_dir, target_dir]
            postfix = ['weight.png', 'label_with_contours.png']
            num_channels = [3, 1, 3]
        else:
            dir_list = [img_dir, target_dir]
            postfix = ['label_with_contours.png']
            num_channels = [3, 3]
        dsets[x] = DataFolder(dir_list, postfix, num_channels,
                              data_transforms[x])
    train_loader = DataLoader(dsets['train'],
                              batch_size=params.train['batch_size'],
                              shuffle=True,
                              num_workers=params.train['workers'])
    val_loader = DataLoader(dsets['val'],
                            batch_size=params.train['val_batch_size'],
                            shuffle=False,
                            num_workers=params.train['workers'])

    # ----- optionally load from a checkpoint for validation or resuming training ----- #
    if params.train['checkpoint']:
        if os.path.isfile(params.train['checkpoint']):
            logger.info("=> loading checkpoint '{}'".format(
                params.train['checkpoint']))
            checkpoint = torch.load(params.train['checkpoint'])
            params.train['start_epoch'] = checkpoint['epoch']
            best_iou = checkpoint['best_iou']
            model.load_state_dict(checkpoint['state_dict'])
            optimizer.load_state_dict(checkpoint['optimizer'])
            logger.info("=> loaded checkpoint '{}' (epoch {})".format(
                params.train['checkpoint'], checkpoint['epoch']))
        else:
            logger.info("=> no checkpoint found at '{}'".format(
                params.train['checkpoint']))

    # ----- training and validation ----- #
    num_iter = params.train['num_epochs'] * len(train_loader)

    # print training parameters
    logger.info("=> Initial learning rate: {:g}".format(params.train['lr']))
    logger.info("=> Batch size: {:d}".format(params.train['batch_size']))
    # logger.info("=> Number of training iterations: {:d}".format(num_iter))
    logger.info("=> Training epochs: {:d}".format(params.train['num_epochs']))
    logger.info("=> beta: {:.1f}".format(params.train['beta']))

    for epoch in range(params.train['start_epoch'],
                       params.train['num_epochs']):
        # train for one epoch or len(train_loader) iterations
        logger.info('Epoch: [{:d}/{:d}]'.format(epoch + 1,
                                                params.train['num_epochs']))
        train_results = train(train_loader, model, optimizer, criterion, epoch)
        train_loss, train_loss_ce, train_loss_var, train_iou_nuclei, train_iou = train_results

        # evaluate on validation set
        with torch.no_grad():
            val_results = validate(val_loader, model, criterion)
            val_loss, val_loss_ce, val_loss_var, val_iou_nuclei, val_iou = val_results

        # check if it is the best accuracy
        combined_iou = (val_iou_nuclei + val_iou) / 2
        is_best = combined_iou > best_iou
        best_iou = max(combined_iou, best_iou)

        cp_flag = (epoch + 1) % params.train['checkpoint_freq'] == 0

        save_checkpoint(
            {
                'epoch': epoch + 1,
                'state_dict': model.state_dict(),
                'best_iou': best_iou,
                'optimizer': optimizer.state_dict(),
            }, epoch, is_best, params.paths['save_dir'], cp_flag)

        # save the training results to txt files
        logger_results.info(
            '{:d}\t{:.4f}\t{:.4f}\t{:.4f}\t{:.4f}\t{:.4f}\t{:.4f}\t{:.4f}\t{:.4f}'
            .format(epoch + 1, train_loss, train_loss_ce, train_loss_var,
                    train_iou_nuclei, train_iou, val_loss, val_iou_nuclei,
                    val_iou))
        # tensorboard logs
        tb_writer.add_scalars(
            'epoch_losses', {
                'train_loss': train_loss,
                'train_loss_ce': train_loss_ce,
                'train_loss_var': train_loss_var,
                'val_loss': val_loss
            }, epoch)
        tb_writer.add_scalars(
            'epoch_accuracies', {
                'train_iou_nuclei': train_iou_nuclei,
                'train_iou': train_iou,
                'val_iou_nuclei': val_iou_nuclei,
                'val_iou': val_iou
            }, epoch)
    tb_writer.close()