コード例 #1
0
def TestVdnet2dOutputChannels(args):
    assert args.in_channels > 0
    assert args.out_channels > 0

    model = VNet2d(num_in_channels=args.in_channels,
                   num_out_channels=args.out_channels)
    model = torch.nn.DataParallel(model).cuda()
    model = model.cuda()
    print('Total params: %.2fM' % (sum(p.numel()
                                       for p in model.parameters()) / kMega))

    in_images = torch.zeros(
        [args.batch_size, args.in_channels, args.dim_y, args.dim_x])
    in_images = in_images.cuda()
    in_images_v = Variable(in_images, requires_grad=False)

    reconstructions, residues = model(in_images_v)
    assert reconstructions.size()[0] == args.batch_size
    assert reconstructions.size()[1] == args.out_channels

    print("input shape = ", in_images.shape)
    print("reconstructions shape = ", reconstructions.shape)
    print("residue shape = ", residues.shape)
    print("min value of reconstructions = ", reconstructions.min())
    print("max value of reconstructions = ", reconstructions.max())
def TestMicroCalcificationReconstruction(args):
    prediction_saving_dir = os.path.join(
        args.model_saving_dir,
        'reconstruction_results_dataset_{}_epoch_{}'.format(
            args.dataset_type, args.epoch_idx))
    visualization_saving_dir = os.path.join(prediction_saving_dir,
                                            'qualitative_results')
    visualization_TP_saving_dir = os.path.join(visualization_saving_dir,
                                               'TPs_only')
    visualization_FP_saving_dir = os.path.join(visualization_saving_dir,
                                               'FPs_only')
    visualization_FN_saving_dir = os.path.join(visualization_saving_dir,
                                               'FNs_only')
    visualization_FP_FN_saving_dir = os.path.join(visualization_saving_dir,
                                                  'FPs_FNs_both')

    # remove existing dir which has the same name and create clean dir
    if os.path.exists(prediction_saving_dir):
        shutil.rmtree(prediction_saving_dir)
    os.mkdir(prediction_saving_dir)
    os.mkdir(visualization_saving_dir)
    os.mkdir(visualization_TP_saving_dir)
    os.mkdir(visualization_FP_saving_dir)
    os.mkdir(visualization_FN_saving_dir)
    os.mkdir(visualization_FP_FN_saving_dir)

    # initialize logger
    logger = Logger(prediction_saving_dir, 'quantitative_results.txt')
    logger.write_and_print('Dataset: {}'.format(args.data_root_dir))
    logger.write_and_print('Dataset type: {}'.format(args.dataset_type))

    # define the network
    network = VNet2d(num_in_channels=cfg.net.in_channels,
                     num_out_channels=cfg.net.out_channels)

    # load the specified ckpt
    ckpt_dir = os.path.join(args.model_saving_dir, 'ckpt')
    # epoch_idx is specified -> load the specified ckpt
    if args.epoch_idx >= 0:
        ckpt_path = os.path.join(ckpt_dir,
                                 'net_epoch_{}.pth'.format(args.epoch_idx))
    # epoch_idx is not specified -> load the best ckpt
    else:
        saved_ckpt_list = os.listdir(ckpt_dir)
        best_ckpt_filename = [
            best_ckpt_filename for best_ckpt_filename in saved_ckpt_list
            if 'net_best_on_validation_set' in best_ckpt_filename
        ][0]
        ckpt_path = os.path.join(ckpt_dir, best_ckpt_filename)

    # transfer net into gpu devices
    net = copy.deepcopy(network)
    net = torch.nn.DataParallel(net).cuda()
    net.load_state_dict(torch.load(ckpt_path))
    net = net.eval()

    logger.write_and_print(
        'Load ckpt: {0} for evaluating...'.format(ckpt_path))

    # get calculate_uncertainty global variance
    calculate_uncertainty = True if len(args.mc_epoch_indexes) > 0 else False

    # get net list for imitating MC dropout process
    net_list = None
    if calculate_uncertainty:
        net_list = get_net_list(network, ckpt_dir, args.mc_epoch_indexes,
                                logger)

    # create dataset
    dataset = MicroCalcificationDataset(
        data_root_dir=args.data_root_dir,
        mode=args.dataset_type,
        enable_random_sampling=False,
        pos_to_neg_ratio=cfg.dataset.pos_to_neg_ratio,
        image_channels=cfg.dataset.image_channels,
        cropping_size=cfg.dataset.cropping_size,
        dilation_radius=args.dilation_radius,
        load_uncertainty_map=False,
        calculate_micro_calcification_number=cfg.dataset.
        calculate_micro_calcification_number,
        enable_data_augmentation=False)

    # create data loader
    data_loader = DataLoader(dataset,
                             batch_size=args.batch_size,
                             shuffle=False,
                             num_workers=cfg.train.num_threads)

    metrics = MetricsReconstruction(args.prob_threshold, args.area_threshold,
                                    args.distance_threshold,
                                    args.slack_for_recall)

    calcification_num = 0
    recall_num = 0
    FP_num = 0

    for batch_idx, (images_tensor, pixel_level_labels_tensor,
                    pixel_level_labels_dilated_tensor, _,
                    image_level_labels_tensor, _,
                    filenames) in enumerate(data_loader):
        logger.write_and_print('Evaluating batch: {}'.format(batch_idx))

        # start time of this batch
        start_time_for_batch = time()

        # transfer the tensor into gpu device
        images_tensor = images_tensor.cuda()

        # network forward
        reconstructed_images_tensor, prediction_residues_tensor = net(
            images_tensor)

        # MC dropout
        uncertainty_maps_np = generate_uncertainty_maps(
            net_list, images_tensor) if calculate_uncertainty else None

        # evaluation
        post_process_preds_np, calcification_num_batch_level, recall_num_batch_level, FP_num_batch_level, \
        result_flag_list = metrics.metric_batch_level(prediction_residues_tensor, pixel_level_labels_tensor)

        calcification_num += calcification_num_batch_level
        recall_num += recall_num_batch_level
        FP_num += FP_num_batch_level

        # print logging information
        logger.write_and_print(
            'The number of the annotated calcifications of this batch = {}'.
            format(calcification_num_batch_level))
        logger.write_and_print(
            'The number of the recalled calcifications of this batch = {}'.
            format(recall_num_batch_level))
        logger.write_and_print(
            'The number of the false positive calcifications of this batch = {}'
            .format(FP_num_batch_level))
        logger.write_and_print(
            'Consuming time: {:.4f}s'.format(time() - start_time_for_batch))
        logger.write_and_print(
            '--------------------------------------------------------------------------------------'
        )

        save_tensor_in_png_and_nii_format(images_tensor,
                                          reconstructed_images_tensor,
                                          prediction_residues_tensor,
                                          post_process_preds_np,
                                          pixel_level_labels_tensor,
                                          pixel_level_labels_dilated_tensor,
                                          uncertainty_maps_np,
                                          filenames,
                                          result_flag_list,
                                          visualization_saving_dir,
                                          save_nii=args.save_nii)

        logger.flush()

    logger.write_and_print(
        'The number of the annotated calcifications of this dataset = {}'.
        format(calcification_num))
    logger.write_and_print(
        'The number of the recalled calcifications of this dataset = {}'.
        format(recall_num))
    logger.write_and_print(
        'The number of the false positive calcifications of this dataset = {}'.
        format(FP_num))

    return
def RdsidueDistributionSTA(args):
    # define the network
    net = VNet2d(num_in_channels=cfg.net.in_channels, num_out_channels=cfg.net.out_channels)

    # load the specified ckpt
    ckpt_dir = os.path.join(args.model_saving_dir, 'ckpt')
    # epoch_idx is specified -> load the specified ckpt
    if args.epoch_idx >= 0:
        ckpt_path = os.path.join(ckpt_dir, 'net_epoch_{}.pth'.format(args.epoch_idx))
    # epoch_idx is not specified -> load the best ckpt
    else:
        saved_ckpt_list = os.listdir(ckpt_dir)
        best_ckpt_filename = [best_ckpt_filename for best_ckpt_filename in saved_ckpt_list if
                              'net_best_on_validation_set' in best_ckpt_filename][0]
        ckpt_path = os.path.join(ckpt_dir, best_ckpt_filename)

    # transfer net into gpu devices
    net = torch.nn.DataParallel(net).cuda()
    net.load_state_dict(torch.load(ckpt_path))
    net = net.eval()

    # create dataset
    dataset = MicroCalcificationDataset(data_root_dir=args.data_root_dir,
                                        mode=args.dataset_type,
                                        enable_random_sampling=False,
                                        pos_to_neg_ratio=cfg.dataset.pos_to_neg_ratio,
                                        image_channels=cfg.dataset.image_channels,
                                        cropping_size=cfg.dataset.cropping_size,
                                        dilation_radius=args.dilation_radius,
                                        calculate_micro_calcification_number=cfg.dataset.calculate_micro_calcification_number,
                                        enable_data_augmentation=False)

    # create data loader
    data_loader = DataLoader(dataset, batch_size=args.batch_size,
                             shuffle=False, num_workers=cfg.train.num_threads)

    metrics = MetricsReconstruction(args.prob_threshold, args.area_threshold, args.distance_threshold,
                                    args.slack_for_recall)

    residue_in_dataset = np.zeros(args.histogram_bins)
    mask_positive_residue_in_dataset = np.zeros(args.histogram_bins)
    mask_negative_residue_in_dataset = np.zeros(args.histogram_bins)
    recon_positive_residue_in_dataset = np.zeros(args.histogram_bins)
    recon_negative_residue_in_dataset = np.zeros(args.histogram_bins)

    for batch_idx, (images_tensor, pixel_level_labels_tensor, pixel_level_labels_dilated_tensor,
                    image_level_labels_tensor, _, filenames) in enumerate(data_loader):
        # start time of this batch
        start_time_for_batch = time()

        # transfer the tensor into gpu device
        images_tensor = images_tensor.cuda()

        # network forward
        reconstructed_images_tensor, prediction_residues_tensor = net(images_tensor)

        # evaluation

        post_process_preds_np, calcification_num_batch_level, recall_num_batch_level, FP_num_batch_level = metrics.metric_batch_level(
            prediction_residues_tensor, pixel_level_labels_tensor)

        # dilated label  , predict label
        pixel_level_labels_dilated = pixel_level_labels_dilated_tensor.cpu().view(-1).numpy()
        process_preds = post_process_preds_np.reshape(-1)

        residues = prediction_residues_tensor.cpu().view(-1).detach().numpy()
        residues_hist, _ = np.histogram(residues, bins=args.histogram_bins, range=(0, 1))
        residue_in_dataset += residues_hist

        assert residues.shape == pixel_level_labels_dilated.shape
        assert residues.shape == process_preds.shape

        mask_positive_residue = residues[pixel_level_labels_dilated == 1]
        mask_positive_residue_hist, _ = np.histogram(mask_positive_residue, bins=args.histogram_bins, range=(0, 1))
        mask_positive_residue_in_dataset += mask_positive_residue_hist

        mask_negative_residue = residues[pixel_level_labels_dilated == 0]
        mask_negative_residue_hist, _ = np.histogram(mask_negative_residue, bins=args.histogram_bins, range=(0, 1))
        mask_negative_residue_in_dataset += mask_negative_residue_hist

        process_positive_residue = residues[process_preds == 1]
        process_positive_residue_hist, _ = np.histogram(process_positive_residue, bins=args.histogram_bins,
                                                        range=(0, 1))
        recon_positive_residue_in_dataset += process_positive_residue_hist

        process_negative_residue = residues[process_preds == 0]
        process_negative_residue_hist, _ = np.histogram(process_negative_residue, bins=args.histogram_bins,
                                                        range=(0, 1))
        recon_negative_residue_in_dataset += process_negative_residue_hist

    residue_in_dataset[residue_in_dataset > 15000] = 15000
    mask_negative_residue_in_dataset[mask_negative_residue_in_dataset > 15000] = 15000
    recon_negative_residue_in_dataset[recon_negative_residue_in_dataset > 15000] = 15000
    pltsave(residue_in_dataset, args.data_save_dir, 'total residues')
    pltsave(mask_positive_residue_in_dataset, args.data_save_dir, 'mask positive residues')
    pltsave(mask_negative_residue_in_dataset, args.data_save_dir, 'mask negative residues')
    pltsave(recon_positive_residue_in_dataset, args.data_save_dir, 'predict positive residues')
    pltsave(recon_negative_residue_in_dataset, args.data_save_dir, 'predict negative residues')

    print('on dataset {0} with {1} dilation {2} histogram bins'.format(args.dataset_type, args.dilation_radius,
                                                                       args.histogram_bins))
    print('the whole residues distribution is {}'.format(np.around(residue_in_dataset, 3)))
    print('in dilated mask label, the positive residues distribution is {0}\n'
          'the negative residues distribution is {1}.'.format(np.around(mask_positive_residue_hist, 3),
                                                              np.around(mask_negative_residue_hist, 3)))
    print('in predicted label, the positive residues distribution is {0}\n'
          'the negative residues distribution is {1}'.format(np.around(recon_positive_residue_in_dataset, 3),
                                                             np.around(recon_negative_residue_in_dataset, 3)))

    return
def UncertaintySTA(args):
    prediction_saving_dir = os.path.join(args.model_saving_dir,
                                         'reconstruction_results_dataset_{}_epoch_{}'.format(args.dataset_type,
                                                                                             args.epoch_idx))
    # initialize logger
    if os.path.exists(args.sta_save_dir):
        shutil.rmtree(args.sta_save_dir)
    os.mkdir(args.sta_save_dir)
    logger = Logger(args.sta_save_dir, 'uncertainty_distribution_sta.txt')
    logger.write_and_print('Dataset: {}'.format(args.data_root_dir))
    logger.write_and_print('Dataset type: {}'.format(args.dataset_type))
    # define the network
    network = VNet2d(num_in_channels=cfg.net.in_channels, num_out_channels=cfg.net.out_channels)

    # load the specified ckpt
    ckpt_dir = os.path.join(args.model_saving_dir, 'ckpt')
    # epoch_idx is specified -> load the specified ckpt
    if args.epoch_idx >= 0:
        ckpt_path = os.path.join(ckpt_dir, 'net_epoch_{}.pth'.format(args.epoch_idx))
    # epoch_idx is not specified -> load the best ckpt
    else:
        saved_ckpt_list = os.listdir(ckpt_dir)
        best_ckpt_filename = [best_ckpt_filename for best_ckpt_filename in saved_ckpt_list if
                              'net_best_on_validation_set' in best_ckpt_filename][0]
        ckpt_path = os.path.join(ckpt_dir, best_ckpt_filename)

    # transfer net into gpu devices
    net = copy.deepcopy(network)
    net = torch.nn.DataParallel(net).cuda()
    net.load_state_dict(torch.load(ckpt_path))
    net = net.eval()

    # get calculate_uncertainty global variance
    calculate_uncertainty = True if len(args.mc_epoch_indexes) > 0 else False

    # get net list for imitating MC dropout process
    net_list = None
    if calculate_uncertainty:
        net_list = get_net_list(network, ckpt_dir, args.mc_epoch_indexes, logger)

    # create dataset

    dataset = MicroCalcificationDataset(data_root_dir=args.data_root_dir,
                                        mode=args.dataset_type,
                                        enable_random_sampling=False,
                                        pos_to_neg_ratio=cfg.dataset.pos_to_neg_ratio,
                                        image_channels=cfg.dataset.image_channels,
                                        cropping_size=cfg.dataset.cropping_size,
                                        dilation_radius=args.dilation_radius,
                                        load_uncertainty_map=False,
                                        calculate_micro_calcification_number=cfg.dataset.calculate_micro_calcification_number,
                                        enable_data_augmentation=False)

    # create data loader
    data_loader = DataLoader(dataset, batch_size=args.batch_size,
                             shuffle=False, num_workers=cfg.train.num_threads)

    metrics = MetricsReconstruction(args.prob_threshold, args.area_threshold, args.distance_threshold,
                                    args.slack_for_recall)

    all_positive_uncertainty_in_dataset = np.zeros(args.bins)
    tp_uncertainty_in_dataset = np.zeros(args.bins)
    fn_uncertainty_in_dataset = np.zeros(args.bins)
    fp_uncertainty_in_dataset = np.zeros(args.bins)
    uncertainty_max=0

    for batch_idx, (images_tensor, pixel_level_labels_tensor, pixel_level_labels_dilated_tensor, _,
                    image_level_labels_tensor, _, filenames) in enumerate(data_loader):
        # transfer the tensor into gpu device
        images_tensor = images_tensor.cuda()

        # network forward
        reconstructed_images_tensor, prediction_residues_tensor = net(images_tensor)

        # MC dropout
        uncertainty_maps_np = generate_uncertainty_maps(net_list, images_tensor) if calculate_uncertainty else None

        # in tp, fn label area  uncertainty value distribution
        post_process_preds_np, calcification_num_batch_level, recall_num_batch_level, FP_num_batch_level, \
        result_flag_list = metrics.metric_batch_level(prediction_residues_tensor, pixel_level_labels_tensor)

        pixel_level_labels_dilated = pixel_level_labels_dilated_tensor.view(-1).numpy()
        preds_positive = post_process_preds_np.reshape(-1)
        uncertainty_maps = uncertainty_maps_np.reshape(-1)
        if uncertainty_max< np.amax(uncertainty_maps):
            uncertainty_max = np.amax(uncertainty_maps)

        all_positive_uncertainty_batch = uncertainty_maps[pixel_level_labels_dilated == 1]
        all_positive_uncertainty_distr_batch, _ = np.histogram(all_positive_uncertainty_batch,
                                                               bins=args.bins, range=(0, args.bin_range))
        all_positive_uncertainty_in_dataset += all_positive_uncertainty_distr_batch

        pixel_level_unlabels_dilated = np.subtract(np.ones_like(pixel_level_labels_dilated), pixel_level_labels_dilated)
        fp_location = np.multiply(pixel_level_unlabels_dilated, preds_positive)
        tp_location = np.multiply(pixel_level_labels_dilated, preds_positive)
        fn_location = np.zeros_like(preds_positive)
        fn_location[pixel_level_labels_dilated == 1] = 1
        fn_location[preds_positive == 1] = 0

        tp_uncertainty_batch = uncertainty_maps[tp_location == 1]
        tp_uncertainty_distr_batch, _ = np.histogram(tp_uncertainty_batch, bins=args.bins, range=(0, args.bin_range))
        tp_uncertainty_in_dataset += tp_uncertainty_distr_batch

        fn_uncertainty_batch = uncertainty_maps[fn_location == 1]
        fn_uncertainty_distr_batch, _ = np.histogram(fn_uncertainty_batch, bins=args.bins, range=(0, args.bin_range))
        fn_uncertainty_in_dataset += fn_uncertainty_distr_batch

        fp_uncertainty_batch = uncertainty_maps[fp_location == 1]
        fp_uncertainty_distr_batch, _ = np.histogram(fp_uncertainty_batch, bins=args.bins, range=(0, args.bin_range))
        fp_uncertainty_in_dataset += fp_uncertainty_distr_batch

    # debug only
    # print(all_positive_uncertainty_in_dataset[0:5])
    # print(tp_uncertainty_in_dataset[0:5])
    # print(fn_uncertainty_in_dataset[0:5])

    all_positive_uncertainty_in_dataset[all_positive_uncertainty_in_dataset > 1000] = 1000
    tp_uncertainty_in_dataset[tp_uncertainty_in_dataset > 1000] = 1000
    fn_uncertainty_in_dataset[fn_uncertainty_in_dataset > 1000] = 1000
    fp_uncertainty_in_dataset[fp_uncertainty_in_dataset > 1000] = 1000

    pltsave(all_positive_uncertainty_in_dataset, dir=args.sta_save_dir, name='all positive uncertainty')
    pltsave(tp_uncertainty_in_dataset, dir=args.sta_save_dir, name='True Positive uncertainty')
    pltsave(fn_uncertainty_in_dataset, dir=args.sta_save_dir, name='False Negative uncertainty')
    pltsave(fp_uncertainty_in_dataset, dir=args.sta_save_dir, name='False Positive uncertainty')

    fp_uncertainty_in_dataset_filtered = gaussian_filter1d(fp_uncertainty_in_dataset, sigma=3)
    tp_uncertainty_in_dataset_filtered = gaussian_filter1d(tp_uncertainty_in_dataset, sigma=3)
    fn_uncertainty_in_dataset_filtered = gaussian_filter1d(fn_uncertainty_in_dataset, sigma=3)
    fp_and_fn = fp_uncertainty_in_dataset_filtered + fn_uncertainty_in_dataset_filtered

    pltsave(fp_and_fn, dir=args.sta_save_dir, name='FP & FN uncertainty filtered')
    pltsave(tp_uncertainty_in_dataset_filtered, dir=args.sta_save_dir, name='True Positive uncertainty filtered')
    pltsave(fn_uncertainty_in_dataset_filtered, dir=args.sta_save_dir, name='False Negative uncertainty filtered')
    pltsave(fp_uncertainty_in_dataset_filtered, dir=args.sta_save_dir, name='False Positive uncertainty filtered')

    fp_mean, fp_var = find_mean_and_var(fp_uncertainty_in_dataset_filtered, start=args.bins / 10, end=args.bins,
                                        bins=args.bins, bin_range=args.bin_range)
    tp_mean, tp_var = find_mean_and_var(tp_uncertainty_in_dataset_filtered, start=args.bins / 10, end=args.bins,
                                        bins=args.bins,
                                        bin_range=args.bin_range)
    fn_mean, fn_var = find_mean_and_var(fn_uncertainty_in_dataset_filtered, start=args.bins / 10, end=args.bins,
                                        bins=args.bins,
                                        bin_range=args.bin_range)
    logger.write_and_print('max uncertainty is {0}  '.format(uncertainty_max))
    logger.write_and_print('fp uncertainty mean is {0}  variance is {1}'.format(fp_mean, fp_var))
    logger.write_and_print('tp uncertainty mean is {0}  variance is {1}'.format(tp_mean, tp_var))
    logger.write_and_print('fn uncertainty mean is {0}  variance is {1}'.format(fn_mean, fn_var))

    return
コード例 #5
0
def TestUncertaintyMapLabelWeightsGeneration(args):
    positive_patch_results_saving_dir = os.path.join(args.dst_data_root_dir,
                                                     'positive_patches',
                                                     args.dataset_type,
                                                     'uncertainty-maps')
    negative_patch_results_saving_dir = os.path.join(args.dst_data_root_dir,
                                                     'negative_patches',
                                                     args.dataset_type,
                                                     'uncertainty-maps')

    # create dir when it does not exist
    if not os.path.exists(positive_patch_results_saving_dir):
        os.mkdir(positive_patch_results_saving_dir)
    if not os.path.exists(negative_patch_results_saving_dir):
        os.mkdir(negative_patch_results_saving_dir)

    # initialize logger
    logger = Logger(args.src_data_root_dir, 'uncertainty.txt')
    logger.write_and_print('Dataset: {}'.format(args.src_data_root_dir))
    logger.write_and_print('Dataset type: {}'.format(args.dataset_type))

    # define the network
    network = VNet2d(num_in_channels=cfg.net.in_channels,
                     num_out_channels=cfg.net.out_channels)

    ckpt_dir = os.path.join(args.model_saving_dir, 'ckpt')

    # get net list for imitating MC dropout process
    net_list = get_net_list(network, ckpt_dir, args.mc_epoch_indexes, logger)

    # create dataset
    dataset = MicroCalcificationDataset(
        data_root_dir=args.src_data_root_dir,
        mode=args.dataset_type,
        enable_random_sampling=False,
        pos_to_neg_ratio=cfg.dataset.pos_to_neg_ratio,
        image_channels=cfg.dataset.image_channels,
        cropping_size=cfg.dataset.cropping_size,
        dilation_radius=0,
        load_uncertainty_map=False,
        calculate_micro_calcification_number=False,
        enable_data_augmentation=False)

    # create data loader
    data_loader = DataLoader(dataset,
                             batch_size=args.batch_size,
                             shuffle=False,
                             num_workers=cfg.train.num_threads)

    for batch_idx, (images_tensor, _, _, _, _, _,
                    filenames) in enumerate(data_loader):
        logger.write_and_print('Evaluating batch: {}'.format(batch_idx))

        # start time of this batch
        start_time_for_batch = time()

        # transfer the tensor into gpu device
        images_tensor = images_tensor.cuda()

        # imitating MC dropout
        uncertainty_maps_np = generate_uncertainty_maps(
            net_list, images_tensor)
        save_uncertainty_maps(uncertainty_maps_np, filenames,
                              positive_patch_results_saving_dir,
                              negative_patch_results_saving_dir, logger)

        logger.write_and_print(
            'Finished evaluating, consuming time = {:.4f}s'.format(
                time() - start_time_for_batch))
        logger.write_and_print(
            '--------------------------------------------------------------------------------------'
        )

        logger.flush()

    return
def TestMicroCalcificationRadiographLevelDetection(args):
    # start time of this dataset
    start_time_for_dataset = time()

    # remove existing dir which has the same name and create clean dir
    if os.path.exists(args.prediction_saving_dir):
        shutil.rmtree(args.prediction_saving_dir)
    os.mkdir(args.prediction_saving_dir)

    # create dir for saving visualization results
    patch_level_visualization_saving_dir = None
    if args.save_visualization_results:
        visualization_saving_dir = os.path.join(args.prediction_saving_dir,
                                                'qualitative_results')
        radiograph_level_visualization_saving_dir = os.path.join(
            visualization_saving_dir, 'radiograph_level')
        patch_level_visualization_saving_dir = os.path.join(
            visualization_saving_dir, 'patch_level')
        #
        os.mkdir(visualization_saving_dir)
        os.mkdir(radiograph_level_visualization_saving_dir)
        os.mkdir(patch_level_visualization_saving_dir)

    # initialize logger
    logger = Logger(args.prediction_saving_dir, 'quantitative_results.txt')
    logger.write_and_print('Dataset: {}'.format(args.data_root_dir))
    logger.write_and_print('Dataset type: {}'.format(args.dataset_type))
    logger.write_and_print('Reconstruction model saving dir: {}'.format(
        args.reconstruction_model_saving_dir))
    logger.write_and_print('Reconstruction ckpt index: {}'.format(
        args.reconstruction_epoch_idx))
    logger.write_and_print('Classification model saving dir: {}'.format(
        args.classification_model_saving_dir))
    logger.write_and_print('Classification ckpt index: {}'.format(
        args.classification_epoch_idx))

    # define the reconstruction network
    reconstruction_net = VNet2d(num_in_channels=r_cfg.net.in_channels,
                                num_out_channels=r_cfg.net.out_channels)
    #
    mc_reconstruction_net = copy.deepcopy(reconstruction_net)
    #
    # get the reconstruction absolute ckpt path
    reconstruction_ckpt_path = get_ckpt_path(
        args.reconstruction_model_saving_dir, args.reconstruction_epoch_idx)
    #
    # load ckpt and transfer net into gpu devices
    reconstruction_net = torch.nn.DataParallel(reconstruction_net).cuda()
    reconstruction_net.load_state_dict(torch.load(reconstruction_ckpt_path))
    reconstruction_net = reconstruction_net.eval()
    #
    logger.write_and_print(
        'Load ckpt: {0}...'.format(reconstruction_ckpt_path))

    # get calculate_uncertainty global variance
    calculate_uncertainty = True if len(args.mc_epoch_indexes) > 0 else False

    # get net list for imitating MC dropout process
    net_list = None
    if calculate_uncertainty:
        net_list = get_net_list(
            mc_reconstruction_net,
            os.path.join(args.reconstruction_model_saving_dir, 'ckpt'),
            args.mc_epoch_indexes, logger)

    # import the network package
    try:
        net_package = importlib.import_module('net.{}'.format(
            args.classification_net_name))
    except BaseException:
        print('failed to import package: {}'.format(
            'net.' + args.classification_net_name))
    #
    # define the classification network
    classification_net = net_package.ResNet18(
        in_channels=c_cfg.net.in_channels, num_classes=c_cfg.net.num_classes)
    #
    # get the classification absolute ckpt path
    classification_ckpt_path = get_ckpt_path(
        args.classification_model_saving_dir, args.classification_epoch_idx)
    #
    # load ckpt and transfer net into gpu devices
    classification_net = torch.nn.DataParallel(classification_net).cuda()
    classification_net.load_state_dict(torch.load(classification_ckpt_path))
    classification_net = classification_net.eval()
    #
    logger.write_and_print(
        'Load ckpt: {0}...'.format(classification_ckpt_path))

    # create dataset
    dataset = MicroCalcificationRadiographLevelDataset(
        data_root_dir=args.data_root_dir, mode=args.dataset_type)

    # create data loader
    data_loader = DataLoader(dataset,
                             batch_size=1,
                             shuffle=False,
                             num_workers=r_cfg.train.num_threads)

    # set up metrics object
    metrics = MetricsRadiographLevelDetection(args.distance_threshold,
                                              args.score_threshold_stride)

    for radiograph_idx, (images_tensor, pixel_level_labels_tensor, _,
                         filenames) in enumerate(data_loader):
        filename = filenames[0]

        # logging
        logger.write_and_print(
            '--------------------------------------------------------------------------------------'
        )
        logger.write_and_print(
            'Start evaluating radiograph {} out of {}: {}...'.format(
                radiograph_idx + 1, dataset.__len__(), filename))

        # start time of this radiograph
        start_time_for_radiograph = time()

        # transfer the tensor into gpu device
        images_tensor = images_tensor.cuda()

        # transfer the tensor into ndarray format
        pixel_level_label_np = pixel_level_labels_tensor.cpu().numpy().squeeze(
        )

        # generated raw radiograph-level residue
        _, raw_residue_radiograph_np = generate_radiograph_level_reconstructed_and_residue_result(
            images_tensor, reconstruction_net, pixel_level_label_np,
            args.reconstruction_patch_size, args.patch_stride)

        # post-process the raw radiograph-level residue
        processed_residue_radiograph_np = post_process_residue_radiograph(
            raw_residue_radiograph_np, pixel_level_label_np,
            args.prob_threshold, args.area_threshold)

        # generate coordinates and score list for the post-processed radiograph-level residue
        pred_coord_list, pred_score_list = generate_coordinate_and_score_list(
            images_tensor, classification_net, pixel_level_label_np,
            raw_residue_radiograph_np, processed_residue_radiograph_np,
            filename, patch_level_visualization_saving_dir,
            args.crop_patch_size, args.resampled_patch_size, net_list)

        # generate coordinates list for the mask
        label_coord_list, _ = generate_coordinate_and_score_list(
            images_tensor,
            classification_net,
            pixel_level_label_np,
            raw_residue_radiograph_np,
            processed_residue_radiograph_np,
            filename,
            patch_level_visualization_saving_dir,
            args.crop_patch_size,
            args.resampled_patch_size,
            net_list,
            mode='annotated')

        # evaluate based on the above three lists
        if args.slack_for_recall:
            detection_result_record_radiograph_level = metrics.metric_all_score_thresholds(
                pred_coord_list, pred_score_list, label_coord_list,
                processed_residue_radiograph_np)
        else:
            detection_result_record_radiograph_level = metrics.metric_all_score_thresholds(
                pred_coord_list, pred_score_list, label_coord_list)
        # save radiograph-level visualization results
        if args.save_visualization_results:
            save_radiograph_level_results(
                images_tensor, pixel_level_label_np, raw_residue_radiograph_np,
                processed_residue_radiograph_np, filename,
                radiograph_level_visualization_saving_dir)

        # logging
        # print logging information of this radiograph
        logger.write_and_print(
            'Finish evaluating radiograph: {}, consuming time: {:.4f}s'.format(
                radiograph_idx + 1,
                time() - start_time_for_radiograph))
        detection_result_record_radiograph_level.print(logger)
        logger.write_and_print(
            '--------------------------------------------------------------------------------------'
        )
        logger.flush()

    # print logging information of this dataset
    logger.write_and_print(
        '&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&'
    )
    logger.write_and_print(
        'Finished evaluating this dataset, consuming time: {:.4f}s'.format(
            time() - start_time_for_dataset))
    metrics.detection_result_record_dataset_level.print(logger)
    logger.write_and_print(
        '&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&'
    )

    return
コード例 #7
0
def TestMicroCalcificationDetectionPatchLevel(args):
    visualization_saving_dir = os.path.join(args.prediction_saving_dir, 'qualitative_results')

    # remove existing dir which has the same name and create clean dir
    if os.path.exists(args.prediction_saving_dir):
        shutil.rmtree(args.prediction_saving_dir)
    os.mkdir(args.prediction_saving_dir)
    os.mkdir(visualization_saving_dir)

    # initialize logger
    logger = Logger(args.prediction_saving_dir, 'quantitative_results.txt')
    logger.write_and_print('Dataset: {}'.format(args.data_root_dir))
    logger.write_and_print('Dataset type: {}'.format(args.dataset_type))
    logger.write_and_print('Reconstruction model saving dir: {}'.format(args.reconstruction_model_saving_dir))
    logger.write_and_print('Reconstruction ckpt index: {}'.format(args.reconstruction_epoch_idx))
    logger.write_and_print('Classification model saving dir: {}'.format(args.classification_model_saving_dir))
    logger.write_and_print('Classification ckpt index: {}'.format(args.classification_epoch_idx))

    # define the reconstruction network
    reconstruction_net = VNet2d(num_in_channels=r_cfg.net.in_channels, num_out_channels=r_cfg.net.out_channels)
    #
    # get the reconstruction absolute ckpt path
    reconstruction_ckpt_path = get_ckpt_path(args.reconstruction_model_saving_dir, args.reconstruction_epoch_idx)
    #
    # load ckpt and transfer net into gpu devices
    reconstruction_net = torch.nn.DataParallel(reconstruction_net).cuda()
    reconstruction_net.load_state_dict(torch.load(reconstruction_ckpt_path))
    reconstruction_net = reconstruction_net.eval()
    #
    logger.write_and_print('Load ckpt: {0}...'.format(reconstruction_ckpt_path))

    # define the classification network
    classification_net = ResNet18(in_channels=c_cfg.net.in_channels, num_classes=c_cfg.net.num_classes)
    #
    # get the classification absolute ckpt path
    classification_ckpt_path = get_ckpt_path(args.classification_model_saving_dir, args.classification_epoch_idx)
    #
    # load ckpt and transfer net into gpu devices
    classification_net = torch.nn.DataParallel(classification_net).cuda()
    classification_net.load_state_dict(torch.load(classification_ckpt_path))
    classification_net = classification_net.eval()
    #
    logger.write_and_print('Load ckpt: {0}...'.format(classification_ckpt_path))

    # create dataset and data loader
    dataset = MicroCalcificationDataset(data_root_dir=args.data_root_dir,
                                        mode=args.dataset_type,
                                        enable_random_sampling=False,
                                        pos_to_neg_ratio=r_cfg.dataset.pos_to_neg_ratio,
                                        image_channels=r_cfg.dataset.image_channels,
                                        cropping_size=r_cfg.dataset.cropping_size,
                                        dilation_radius=args.dilation_radius,
                                        load_uncertainty_map=False,
                                        calculate_micro_calcification_number=r_cfg.dataset.calculate_micro_calcification_number,
                                        enable_data_augmentation=False)
    #
    data_loader = DataLoader(dataset, batch_size=args.batch_size,
                             shuffle=False, num_workers=r_cfg.train.num_threads)

    metrics = MetricsReconstruction(args.prob_threshold, args.area_threshold, args.distance_threshold)

    calcification_num = 0
    recall_num = 0
    FP_num = 0

    for batch_idx, (images_tensor, pixel_level_labels_tensor, pixel_level_labels_dilated_tensor, _,
                    image_level_labels_tensor, _, filenames) in enumerate(data_loader):
        # start time of this batch
        start_time_for_batch = time()

        # transfer the tensor into gpu device
        images_tensor = images_tensor.cuda()

        # reconstruction network forward
        reconstructed_images_tensor, prediction_residues_tensor = reconstruction_net(images_tensor)

        # classification network forward
        classification_preds_tensor = classification_net(images_tensor)

        # merge the reconstruction and the classification results
        detection_results_np = micro_calcification_detection_batch_level(prediction_residues_tensor,
                                                                         classification_preds_tensor)

        # evaluation
        post_process_preds_np, calcification_num_batch_level, recall_num_batch_level, FP_num_batch_level = \
            metrics.metric_batch_level(detection_results_np, pixel_level_labels_tensor)

        calcification_num += calcification_num_batch_level
        recall_num += recall_num_batch_level
        FP_num += FP_num_batch_level

        # print logging information
        logger.write_and_print(
            'The number of the annotated calcifications of this batch = {}'.format(calcification_num_batch_level))
        logger.write_and_print(
            'The number of the recalled calcifications of this batch = {}'.format(recall_num_batch_level))
        logger.write_and_print(
            'The number of the false positive calcifications of this batch = {}'.format(FP_num_batch_level))
        logger.write_and_print('batch: {}, consuming time: {:.4f}s'.format(batch_idx, time() - start_time_for_batch))
        logger.write_and_print('--------------------------------------------------------------------------------------')

        save_tensor_in_png_and_nii_format(images_tensor, reconstructed_images_tensor, prediction_residues_tensor,
                                          post_process_preds_np, pixel_level_labels_tensor,
                                          pixel_level_labels_dilated_tensor, filenames, visualization_saving_dir)

        logger.flush()

    logger.write_and_print('The number of the annotated calcifications of this dataset = {}'.format(calcification_num))
    logger.write_and_print('The number of the recalled calcifications of this dataset = {}'.format(recall_num))
    logger.write_and_print('The number of the false positive calcifications of this dataset = {}'.format(FP_num))

    return
コード例 #8
0
def TestMicroCalcificationRadiographLevelDetection(args):
    # start time of this dataset
    start_time_for_dataset = time()

    # create clean dir
    if not os.path.exists(args.dst_data_root_dir):
        os.mkdir(args.dst_data_root_dir)
        for patch_type in ['positive_patches', 'negative_patches']:
            os.mkdir(os.path.join(args.dst_data_root_dir, patch_type))
            for dataset_type in ['training', 'validation', 'test']:
                os.mkdir(
                    os.path.join(args.dst_data_root_dir, patch_type,
                                 dataset_type))
                for image_type in ['images', 'labels', 'uncertainty-maps']:
                    os.mkdir(
                        os.path.join(args.dst_data_root_dir, patch_type,
                                     dataset_type, image_type))

    # initialize logger
    logger = Logger(args.dst_data_root_dir)
    logger.write_and_print('Dataset: {}'.format(args.src_data_root_dir))
    logger.write_and_print('Reconstruction model saving dir: {}'.format(
        args.reconstruction_model_saving_dir))
    logger.write_and_print('Reconstruction ckpt index: {}'.format(
        args.reconstruction_epoch_idx))

    # get net list for imitating MC dropout process
    net_for_mc = VNet2d(num_in_channels=cfg.net.in_channels,
                        num_out_channels=cfg.net.out_channels)
    uncertainty_model_ckpt_dir = os.path.join(
        args.uncertainty_model_saving_dir, 'ckpt')
    net_list = get_net_list(net_for_mc, uncertainty_model_ckpt_dir,
                            args.mc_epoch_indexes, logger)

    # define the reconstruction network
    reconstruction_net = VNet2d(num_in_channels=cfg.net.in_channels,
                                num_out_channels=cfg.net.out_channels)
    #
    # get the reconstruction absolute ckpt path
    reconstruction_ckpt_path = get_ckpt_path(
        args.reconstruction_model_saving_dir, args.reconstruction_epoch_idx)
    #
    # load ckpt and transfer net into gpu devices
    reconstruction_net = torch.nn.DataParallel(reconstruction_net).cuda()
    reconstruction_net.load_state_dict(torch.load(reconstruction_ckpt_path))
    reconstruction_net = reconstruction_net.eval()
    #
    logger.write_and_print(
        'Load ckpt: {0}...'.format(reconstruction_ckpt_path))

    for dataset_type in ['training', 'validation', 'test']:
        positive_dataset_type_dir = os.path.join(args.dst_data_root_dir,
                                                 'positive_patches',
                                                 dataset_type, 'images')
        negative_dataset_type_dir = os.path.join(args.dst_data_root_dir,
                                                 'negative_patches',
                                                 dataset_type, 'images')

        # create dataset
        dataset = MicroCalcificationRadiographLevelDataset(
            data_root_dir=args.src_data_root_dir, mode=dataset_type)

        # create data loader
        data_loader = DataLoader(dataset,
                                 batch_size=1,
                                 shuffle=False,
                                 num_workers=cfg.train.num_threads)

        positive_patch_num_dataset_type_level = 0
        negative_patch_num_dataset_type_level = 0

        logger.write_and_print(
            '  &&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&'
        )
        logger.write_and_print(
            '  Start evaluating {} set...'.format(dataset_type))

        for radiograph_idx, (images_tensor, pixel_level_labels_tensor, _,
                             filenames) in enumerate(data_loader):
            filename = filenames[0]

            # logging
            logger.write_and_print(
                '    Start evaluating {} set radiograph {} out of {}: {}...'.
                format(dataset_type, radiograph_idx + 1, dataset.__len__(),
                       filename))

            # start time of this radiograph
            start_time_for_radiograph = time()

            # transfer the tensor into gpu device
            images_tensor = images_tensor.cuda()

            # transfer the tensor into ndarray format
            image_np = images_tensor.cpu().numpy().squeeze()
            pixel_level_label_np = pixel_level_labels_tensor.cpu().numpy(
            ).squeeze()

            # generated raw radiograph-level residue
            _, raw_residue_radiograph_np = generate_radiograph_level_reconstructed_and_residue_result(
                images_tensor, reconstruction_net, pixel_level_label_np,
                args.reconstruction_patch_size,
                args.reconstruction_patch_stride)

            # post-process the raw radiograph-level residue
            processed_residue_radiograph_np = post_process_residue_radiograph(
                raw_residue_radiograph_np, pixel_level_label_np,
                args.prob_threshold, args.area_threshold)

            # generate coordinates list for the post-processed radiograph-level residue
            pred_coord_list = generate_coordinate_list(
                processed_residue_radiograph_np)

            # generate coordinates list for the mask
            label_coord_list = generate_coordinate_list(pixel_level_label_np,
                                                        mode='annotated')

            # merge pred_coord_list and label_coord_list
            coord_list = merge_coord_list(pred_coord_list, label_coord_list)

            positive_patch_num_radiograph_level, negative_patch_num_radiograph_level = \
                save_images_labels_uncertainty_maps(coord_list, images_tensor, image_np, pixel_level_label_np, net_list,
                                                    filename, positive_dataset_type_dir, negative_dataset_type_dir,
                                                    args.reconstruction_patch_size, args.patch_size)

            positive_patch_num_dataset_type_level += positive_patch_num_radiograph_level
            negative_patch_num_dataset_type_level += negative_patch_num_radiograph_level

            # logging
            # print logging information of this radiograph
            logger.write_and_print(
                '    Finish evaluating radiograph: {}, consuming time: {:.4f}s'
                .format(radiograph_idx + 1,
                        time() - start_time_for_radiograph))
            logger.write_and_print(
                '    This radiograph contains {} positive patches and {} negative patches.'
                .format(positive_patch_num_radiograph_level,
                        negative_patch_num_radiograph_level))
            logger.write_and_print(
                '    ------------------------------------------------------------------------------'
            )
            logger.flush()

        # print logging information of this dataset
        logger.write_and_print(
            '  Finished evaluating {} set, consuming time: {:.4f}s'.format(
                dataset_type,
                time() - start_time_for_dataset))
        logger.write_and_print(
            '  This {} set contains {} positive patches and {} negative patches.'
            .format(dataset_type, positive_patch_num_dataset_type_level,
                    negative_patch_num_dataset_type_level))
        logger.write_and_print(
            '  &&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&'
        )

    return