Exemplo n.º 1
0
        iterate_for_an_epoch(training=True,
                             epoch_idx=epoch_idx,
                             data_loader=training_data_loader,
                             net=net,
                             loss_func=loss_func,
                             metrics=metrics,
                             visdom_obj=visdom_obj,
                             logger=logger,
                             optimizer=optimizer)

        iterate_for_an_epoch(training=False,
                             epoch_idx=epoch_idx,
                             data_loader=validation_data_loader,
                             net=net,
                             loss_func=loss_func,
                             metrics=metrics,
                             visdom_obj=visdom_obj,
                             logger=logger)
        lr_scheduler.step()

        logger.flush()

        # whether to save this model according to config
        if epoch_idx % cfg.train.save_epochs is 0:
            torch.save(
                net.state_dict(),
                os.path.join(ckpt_dir, 'net_epoch_{}.pth'.format(epoch_idx)))

        # save this model in case that this is the currently best model on validation set
        save_best_ckpt(metrics, net, ckpt_dir, epoch_idx)
def TestMicroCalcificationReconstruction(args):
    prediction_saving_dir = os.path.join(
        args.model_saving_dir,
        'reconstruction_results_dataset_{}_epoch_{}'.format(
            args.dataset_type, args.epoch_idx))
    visualization_saving_dir = os.path.join(prediction_saving_dir,
                                            'qualitative_results')
    visualization_TP_saving_dir = os.path.join(visualization_saving_dir,
                                               'TPs_only')
    visualization_FP_saving_dir = os.path.join(visualization_saving_dir,
                                               'FPs_only')
    visualization_FN_saving_dir = os.path.join(visualization_saving_dir,
                                               'FNs_only')
    visualization_FP_FN_saving_dir = os.path.join(visualization_saving_dir,
                                                  'FPs_FNs_both')

    # remove existing dir which has the same name and create clean dir
    if os.path.exists(prediction_saving_dir):
        shutil.rmtree(prediction_saving_dir)
    os.mkdir(prediction_saving_dir)
    os.mkdir(visualization_saving_dir)
    os.mkdir(visualization_TP_saving_dir)
    os.mkdir(visualization_FP_saving_dir)
    os.mkdir(visualization_FN_saving_dir)
    os.mkdir(visualization_FP_FN_saving_dir)

    # initialize logger
    logger = Logger(prediction_saving_dir, 'quantitative_results.txt')
    logger.write_and_print('Dataset: {}'.format(args.data_root_dir))
    logger.write_and_print('Dataset type: {}'.format(args.dataset_type))

    # define the network
    network = VNet2d(num_in_channels=cfg.net.in_channels,
                     num_out_channels=cfg.net.out_channels)

    # load the specified ckpt
    ckpt_dir = os.path.join(args.model_saving_dir, 'ckpt')
    # epoch_idx is specified -> load the specified ckpt
    if args.epoch_idx >= 0:
        ckpt_path = os.path.join(ckpt_dir,
                                 'net_epoch_{}.pth'.format(args.epoch_idx))
    # epoch_idx is not specified -> load the best ckpt
    else:
        saved_ckpt_list = os.listdir(ckpt_dir)
        best_ckpt_filename = [
            best_ckpt_filename for best_ckpt_filename in saved_ckpt_list
            if 'net_best_on_validation_set' in best_ckpt_filename
        ][0]
        ckpt_path = os.path.join(ckpt_dir, best_ckpt_filename)

    # transfer net into gpu devices
    net = copy.deepcopy(network)
    net = torch.nn.DataParallel(net).cuda()
    net.load_state_dict(torch.load(ckpt_path))
    net = net.eval()

    logger.write_and_print(
        'Load ckpt: {0} for evaluating...'.format(ckpt_path))

    # get calculate_uncertainty global variance
    calculate_uncertainty = True if len(args.mc_epoch_indexes) > 0 else False

    # get net list for imitating MC dropout process
    net_list = None
    if calculate_uncertainty:
        net_list = get_net_list(network, ckpt_dir, args.mc_epoch_indexes,
                                logger)

    # create dataset
    dataset = MicroCalcificationDataset(
        data_root_dir=args.data_root_dir,
        mode=args.dataset_type,
        enable_random_sampling=False,
        pos_to_neg_ratio=cfg.dataset.pos_to_neg_ratio,
        image_channels=cfg.dataset.image_channels,
        cropping_size=cfg.dataset.cropping_size,
        dilation_radius=args.dilation_radius,
        load_uncertainty_map=False,
        calculate_micro_calcification_number=cfg.dataset.
        calculate_micro_calcification_number,
        enable_data_augmentation=False)

    # create data loader
    data_loader = DataLoader(dataset,
                             batch_size=args.batch_size,
                             shuffle=False,
                             num_workers=cfg.train.num_threads)

    metrics = MetricsReconstruction(args.prob_threshold, args.area_threshold,
                                    args.distance_threshold,
                                    args.slack_for_recall)

    calcification_num = 0
    recall_num = 0
    FP_num = 0

    for batch_idx, (images_tensor, pixel_level_labels_tensor,
                    pixel_level_labels_dilated_tensor, _,
                    image_level_labels_tensor, _,
                    filenames) in enumerate(data_loader):
        logger.write_and_print('Evaluating batch: {}'.format(batch_idx))

        # start time of this batch
        start_time_for_batch = time()

        # transfer the tensor into gpu device
        images_tensor = images_tensor.cuda()

        # network forward
        reconstructed_images_tensor, prediction_residues_tensor = net(
            images_tensor)

        # MC dropout
        uncertainty_maps_np = generate_uncertainty_maps(
            net_list, images_tensor) if calculate_uncertainty else None

        # evaluation
        post_process_preds_np, calcification_num_batch_level, recall_num_batch_level, FP_num_batch_level, \
        result_flag_list = metrics.metric_batch_level(prediction_residues_tensor, pixel_level_labels_tensor)

        calcification_num += calcification_num_batch_level
        recall_num += recall_num_batch_level
        FP_num += FP_num_batch_level

        # print logging information
        logger.write_and_print(
            'The number of the annotated calcifications of this batch = {}'.
            format(calcification_num_batch_level))
        logger.write_and_print(
            'The number of the recalled calcifications of this batch = {}'.
            format(recall_num_batch_level))
        logger.write_and_print(
            'The number of the false positive calcifications of this batch = {}'
            .format(FP_num_batch_level))
        logger.write_and_print(
            'Consuming time: {:.4f}s'.format(time() - start_time_for_batch))
        logger.write_and_print(
            '--------------------------------------------------------------------------------------'
        )

        save_tensor_in_png_and_nii_format(images_tensor,
                                          reconstructed_images_tensor,
                                          prediction_residues_tensor,
                                          post_process_preds_np,
                                          pixel_level_labels_tensor,
                                          pixel_level_labels_dilated_tensor,
                                          uncertainty_maps_np,
                                          filenames,
                                          result_flag_list,
                                          visualization_saving_dir,
                                          save_nii=args.save_nii)

        logger.flush()

    logger.write_and_print(
        'The number of the annotated calcifications of this dataset = {}'.
        format(calcification_num))
    logger.write_and_print(
        'The number of the recalled calcifications of this dataset = {}'.
        format(recall_num))
    logger.write_and_print(
        'The number of the false positive calcifications of this dataset = {}'.
        format(FP_num))

    return
Exemplo n.º 3
0
def TestUncertaintyMapLabelWeightsGeneration(args):
    positive_patch_results_saving_dir = os.path.join(args.dst_data_root_dir,
                                                     'positive_patches',
                                                     args.dataset_type,
                                                     'uncertainty-maps')
    negative_patch_results_saving_dir = os.path.join(args.dst_data_root_dir,
                                                     'negative_patches',
                                                     args.dataset_type,
                                                     'uncertainty-maps')

    # create dir when it does not exist
    if not os.path.exists(positive_patch_results_saving_dir):
        os.mkdir(positive_patch_results_saving_dir)
    if not os.path.exists(negative_patch_results_saving_dir):
        os.mkdir(negative_patch_results_saving_dir)

    # initialize logger
    logger = Logger(args.src_data_root_dir, 'uncertainty.txt')
    logger.write_and_print('Dataset: {}'.format(args.src_data_root_dir))
    logger.write_and_print('Dataset type: {}'.format(args.dataset_type))

    # define the network
    network = VNet2d(num_in_channels=cfg.net.in_channels,
                     num_out_channels=cfg.net.out_channels)

    ckpt_dir = os.path.join(args.model_saving_dir, 'ckpt')

    # get net list for imitating MC dropout process
    net_list = get_net_list(network, ckpt_dir, args.mc_epoch_indexes, logger)

    # create dataset
    dataset = MicroCalcificationDataset(
        data_root_dir=args.src_data_root_dir,
        mode=args.dataset_type,
        enable_random_sampling=False,
        pos_to_neg_ratio=cfg.dataset.pos_to_neg_ratio,
        image_channels=cfg.dataset.image_channels,
        cropping_size=cfg.dataset.cropping_size,
        dilation_radius=0,
        load_uncertainty_map=False,
        calculate_micro_calcification_number=False,
        enable_data_augmentation=False)

    # create data loader
    data_loader = DataLoader(dataset,
                             batch_size=args.batch_size,
                             shuffle=False,
                             num_workers=cfg.train.num_threads)

    for batch_idx, (images_tensor, _, _, _, _, _,
                    filenames) in enumerate(data_loader):
        logger.write_and_print('Evaluating batch: {}'.format(batch_idx))

        # start time of this batch
        start_time_for_batch = time()

        # transfer the tensor into gpu device
        images_tensor = images_tensor.cuda()

        # imitating MC dropout
        uncertainty_maps_np = generate_uncertainty_maps(
            net_list, images_tensor)
        save_uncertainty_maps(uncertainty_maps_np, filenames,
                              positive_patch_results_saving_dir,
                              negative_patch_results_saving_dir, logger)

        logger.write_and_print(
            'Finished evaluating, consuming time = {:.4f}s'.format(
                time() - start_time_for_batch))
        logger.write_and_print(
            '--------------------------------------------------------------------------------------'
        )

        logger.flush()

    return
def TestNoiseIdentification(args):
    assert args.CL_type in [
        'Cij', 'Qij', 'intersection', 'union', 'prune_by_class',
        'prune_by_noise_rate', 'both'
    ]

    image_dir = os.path.join(args.clean_data_root_dir, args.dataset_type,
                             'images')
    clean_mask_dir = os.path.join(args.clean_data_root_dir, args.dataset_type,
                                  args.label_class_name)
    noisy_mask_dir = os.path.join(args.noisy_data_root_dir, args.dataset_type,
                                  args.label_class_name)
    confident_map_dir = os.path.join(
        args.noisy_data_root_dir, args.dataset_type,
        '{}-confident-maps'.format(args.label_class_name))
    if args.CL_type != 'both':
        confident_map_dir = confident_map_dir.replace(
            'confident-maps',
            'confident-maps-' + args.CL_type.replace('_', '-'))

    assert os.path.exists(clean_mask_dir)
    assert os.path.exists(noisy_mask_dir)
    assert os.path.exists(confident_map_dir)
    assert os.path.exists(args.dst_saving_dir)

    dst_saving_dir = os.path.join(
        args.dst_saving_dir,
        args.noisy_data_root_dir.split('/')[-3] + '-' + args.dataset_type +
        '-' + args.label_class_name)
    if args.CL_type != 'both':
        dst_saving_dir += '-' + args.CL_type.replace('_', '-')
    if os.path.exists(dst_saving_dir):
        shutil.rmtree(dst_saving_dir)
    os.mkdir(dst_saving_dir)

    logger = Logger(dst_saving_dir, 'logger.txt')
    logger.write_and_print('Clean mask dir: {}'.format(clean_mask_dir))
    logger.write_and_print('Noisy mask dir: {}'.format(noisy_mask_dir))
    logger.write_and_print('Confident map dir: {}'.format(confident_map_dir))

    filename_list = os.listdir(clean_mask_dir)

    noise_num_dataset_level = 0
    positive_num_dataset_level = 0
    recall_num_dataset_level = 0

    for filename in filename_list:
        logger.write_and_print(
            '--------------------------------------------------------------------------------------'
        )
        logger.write_and_print('    Evaluating: {}'.format(filename))

        # start time of this batch
        start_time_for_batch = time()

        src_image_path = os.path.join(image_dir, filename)
        src_clean_mask_path = os.path.join(clean_mask_dir, filename)
        src_noisy_mask_path = os.path.join(noisy_mask_dir, filename)
        src_confident_map_path = os.path.join(confident_map_dir, filename)

        clean_mask_np = (cv2.imread(src_clean_mask_path,
                                    cv2.IMREAD_GRAYSCALE).astype(np.float) /
                         255.0).astype(np.uint8)
        noisy_mask_np = (cv2.imread(src_noisy_mask_path,
                                    cv2.IMREAD_GRAYSCALE).astype(np.float) /
                         255.0).astype(np.uint8)
        confident_map_np = cv2.imread(src_confident_map_path,
                                      cv2.IMREAD_GRAYSCALE).astype(
                                          np.float) / 255.0

        noise_np = clean_mask_np ^ noisy_mask_np

        # calculating noise identification metric
        recall_num_image_level = (noise_np * confident_map_np).sum()
        noise_num_image_level = noise_np.sum()
        positive_num_image_level = confident_map_np.sum()
        positive_num_dataset_level += positive_num_image_level
        recall_num_dataset_level += recall_num_image_level
        noise_num_dataset_level += noise_num_image_level

        dst_image_path = os.path.join(dst_saving_dir,
                                      filename.replace('.png', '_image.png'))
        dst_clean_mask_path = os.path.join(
            dst_saving_dir, filename.replace('.png', '_clean_mask.png'))
        dst_noisy_mask_path = os.path.join(
            dst_saving_dir, filename.replace('.png', '_noisy_mask.png'))
        dst_noise_path = os.path.join(dst_saving_dir,
                                      filename.replace('.png', '_noise.png'))
        dst_confident_map_path = os.path.join(
            dst_saving_dir, filename.replace('.png', '_confident_map.png'))

        shutil.copyfile(src_image_path, dst_image_path)
        shutil.copyfile(src_clean_mask_path, dst_clean_mask_path)
        shutil.copyfile(src_noisy_mask_path, dst_noisy_mask_path)
        shutil.copyfile(src_confident_map_path, dst_confident_map_path)
        cv2.imwrite(dst_noise_path, (noise_np * 255).astype(np.uint8))

        logger.write_and_print(
            '    Noise pixel number = {}'.format(noise_num_image_level))
        logger.write_and_print(
            '    Positive pixel number = {}'.format(positive_num_image_level))
        logger.write_and_print(
            '    Recalled pixel number = {}'.format(recall_num_image_level))
        logger.write_and_print(
            '    Finished evaluating, consuming time = {:.4f}s'.format(
                time() - start_time_for_batch))
        logger.write_and_print(
            '--------------------------------------------------------------------------------------'
        )

        logger.flush()

    mean_recall_rate = recall_num_dataset_level / noise_num_dataset_level
    mean_precision_rate = recall_num_dataset_level / positive_num_dataset_level
    f1_score = 2 * mean_recall_rate * mean_precision_rate / (
        mean_recall_rate + mean_precision_rate)

    logger.write_and_print(
        '--------------------------------------------------------------------------------------'
    )
    logger.write_and_print('Mean recall rate = {:.2f}%'.format(
        mean_recall_rate * 100))
    logger.write_and_print('Mean precision rate = {:.2f}%'.format(
        mean_precision_rate * 100))
    logger.write_and_print('F1 score = {:.2f}%'.format(f1_score * 100))
    print('{:.2f}%\t{:.2f}%\t{:.2f}%'.format(mean_recall_rate * 100,
                                             mean_precision_rate * 100,
                                             f1_score * 100))

    return
def TestMicroCalcificationRadiographLevelDetection(args):
    # start time of this dataset
    start_time_for_dataset = time()

    # remove existing dir which has the same name and create clean dir
    if os.path.exists(args.prediction_saving_dir):
        shutil.rmtree(args.prediction_saving_dir)
    os.mkdir(args.prediction_saving_dir)

    # create dir for saving visualization results
    patch_level_visualization_saving_dir = None
    if args.save_visualization_results:
        visualization_saving_dir = os.path.join(args.prediction_saving_dir,
                                                'qualitative_results')
        radiograph_level_visualization_saving_dir = os.path.join(
            visualization_saving_dir, 'radiograph_level')
        patch_level_visualization_saving_dir = os.path.join(
            visualization_saving_dir, 'patch_level')
        #
        os.mkdir(visualization_saving_dir)
        os.mkdir(radiograph_level_visualization_saving_dir)
        os.mkdir(patch_level_visualization_saving_dir)

    # initialize logger
    logger = Logger(args.prediction_saving_dir, 'quantitative_results.txt')
    logger.write_and_print('Dataset: {}'.format(args.data_root_dir))
    logger.write_and_print('Dataset type: {}'.format(args.dataset_type))
    logger.write_and_print('Reconstruction model saving dir: {}'.format(
        args.reconstruction_model_saving_dir))
    logger.write_and_print('Reconstruction ckpt index: {}'.format(
        args.reconstruction_epoch_idx))
    logger.write_and_print('Classification model saving dir: {}'.format(
        args.classification_model_saving_dir))
    logger.write_and_print('Classification ckpt index: {}'.format(
        args.classification_epoch_idx))

    # define the reconstruction network
    reconstruction_net = VNet2d(num_in_channels=r_cfg.net.in_channels,
                                num_out_channels=r_cfg.net.out_channels)
    #
    mc_reconstruction_net = copy.deepcopy(reconstruction_net)
    #
    # get the reconstruction absolute ckpt path
    reconstruction_ckpt_path = get_ckpt_path(
        args.reconstruction_model_saving_dir, args.reconstruction_epoch_idx)
    #
    # load ckpt and transfer net into gpu devices
    reconstruction_net = torch.nn.DataParallel(reconstruction_net).cuda()
    reconstruction_net.load_state_dict(torch.load(reconstruction_ckpt_path))
    reconstruction_net = reconstruction_net.eval()
    #
    logger.write_and_print(
        'Load ckpt: {0}...'.format(reconstruction_ckpt_path))

    # get calculate_uncertainty global variance
    calculate_uncertainty = True if len(args.mc_epoch_indexes) > 0 else False

    # get net list for imitating MC dropout process
    net_list = None
    if calculate_uncertainty:
        net_list = get_net_list(
            mc_reconstruction_net,
            os.path.join(args.reconstruction_model_saving_dir, 'ckpt'),
            args.mc_epoch_indexes, logger)

    # import the network package
    try:
        net_package = importlib.import_module('net.{}'.format(
            args.classification_net_name))
    except BaseException:
        print('failed to import package: {}'.format(
            'net.' + args.classification_net_name))
    #
    # define the classification network
    classification_net = net_package.ResNet18(
        in_channels=c_cfg.net.in_channels, num_classes=c_cfg.net.num_classes)
    #
    # get the classification absolute ckpt path
    classification_ckpt_path = get_ckpt_path(
        args.classification_model_saving_dir, args.classification_epoch_idx)
    #
    # load ckpt and transfer net into gpu devices
    classification_net = torch.nn.DataParallel(classification_net).cuda()
    classification_net.load_state_dict(torch.load(classification_ckpt_path))
    classification_net = classification_net.eval()
    #
    logger.write_and_print(
        'Load ckpt: {0}...'.format(classification_ckpt_path))

    # create dataset
    dataset = MicroCalcificationRadiographLevelDataset(
        data_root_dir=args.data_root_dir, mode=args.dataset_type)

    # create data loader
    data_loader = DataLoader(dataset,
                             batch_size=1,
                             shuffle=False,
                             num_workers=r_cfg.train.num_threads)

    # set up metrics object
    metrics = MetricsRadiographLevelDetection(args.distance_threshold,
                                              args.score_threshold_stride)

    for radiograph_idx, (images_tensor, pixel_level_labels_tensor, _,
                         filenames) in enumerate(data_loader):
        filename = filenames[0]

        # logging
        logger.write_and_print(
            '--------------------------------------------------------------------------------------'
        )
        logger.write_and_print(
            'Start evaluating radiograph {} out of {}: {}...'.format(
                radiograph_idx + 1, dataset.__len__(), filename))

        # start time of this radiograph
        start_time_for_radiograph = time()

        # transfer the tensor into gpu device
        images_tensor = images_tensor.cuda()

        # transfer the tensor into ndarray format
        pixel_level_label_np = pixel_level_labels_tensor.cpu().numpy().squeeze(
        )

        # generated raw radiograph-level residue
        _, raw_residue_radiograph_np = generate_radiograph_level_reconstructed_and_residue_result(
            images_tensor, reconstruction_net, pixel_level_label_np,
            args.reconstruction_patch_size, args.patch_stride)

        # post-process the raw radiograph-level residue
        processed_residue_radiograph_np = post_process_residue_radiograph(
            raw_residue_radiograph_np, pixel_level_label_np,
            args.prob_threshold, args.area_threshold)

        # generate coordinates and score list for the post-processed radiograph-level residue
        pred_coord_list, pred_score_list = generate_coordinate_and_score_list(
            images_tensor, classification_net, pixel_level_label_np,
            raw_residue_radiograph_np, processed_residue_radiograph_np,
            filename, patch_level_visualization_saving_dir,
            args.crop_patch_size, args.resampled_patch_size, net_list)

        # generate coordinates list for the mask
        label_coord_list, _ = generate_coordinate_and_score_list(
            images_tensor,
            classification_net,
            pixel_level_label_np,
            raw_residue_radiograph_np,
            processed_residue_radiograph_np,
            filename,
            patch_level_visualization_saving_dir,
            args.crop_patch_size,
            args.resampled_patch_size,
            net_list,
            mode='annotated')

        # evaluate based on the above three lists
        if args.slack_for_recall:
            detection_result_record_radiograph_level = metrics.metric_all_score_thresholds(
                pred_coord_list, pred_score_list, label_coord_list,
                processed_residue_radiograph_np)
        else:
            detection_result_record_radiograph_level = metrics.metric_all_score_thresholds(
                pred_coord_list, pred_score_list, label_coord_list)
        # save radiograph-level visualization results
        if args.save_visualization_results:
            save_radiograph_level_results(
                images_tensor, pixel_level_label_np, raw_residue_radiograph_np,
                processed_residue_radiograph_np, filename,
                radiograph_level_visualization_saving_dir)

        # logging
        # print logging information of this radiograph
        logger.write_and_print(
            'Finish evaluating radiograph: {}, consuming time: {:.4f}s'.format(
                radiograph_idx + 1,
                time() - start_time_for_radiograph))
        detection_result_record_radiograph_level.print(logger)
        logger.write_and_print(
            '--------------------------------------------------------------------------------------'
        )
        logger.flush()

    # print logging information of this dataset
    logger.write_and_print(
        '&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&'
    )
    logger.write_and_print(
        'Finished evaluating this dataset, consuming time: {:.4f}s'.format(
            time() - start_time_for_dataset))
    metrics.detection_result_record_dataset_level.print(logger)
    logger.write_and_print(
        '&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&'
    )

    return
Exemplo n.º 6
0
fmt = '%d %s %s %s %s'

print("Starting log")

userInput = input("Do you wish to reset data ? y/N")
if userInput == "y":
    os.system("rm -r journal/")

lowWatermark = l.id_low
highWatermark = l.id_high
assert lowWatermark <= highWatermark
record = fmt % (current_time(), trId, "Begin", "dependency", " ")
n = l.append(record)

l.flush()
assert l.get(n) == record
assert l.get(lowWatermark - 1) is None
assert l.get(highWatermark + 1) is None
assert lowWatermark <= highWatermark

for i in range(lowWatermark, highWatermark):
    assert l.get(i) is not None

userInput = input("Do you wish to launch interactive mode? y/N ")
if userInput == "y":
    print("Type command in the following format:")
    print("Get the high watermark")
    print("high")
    print()
    print("Get the low watermark")
Exemplo n.º 7
0
def TestMicroCalcificationDetectionPatchLevel(args):
    visualization_saving_dir = os.path.join(args.prediction_saving_dir, 'qualitative_results')

    # remove existing dir which has the same name and create clean dir
    if os.path.exists(args.prediction_saving_dir):
        shutil.rmtree(args.prediction_saving_dir)
    os.mkdir(args.prediction_saving_dir)
    os.mkdir(visualization_saving_dir)

    # initialize logger
    logger = Logger(args.prediction_saving_dir, 'quantitative_results.txt')
    logger.write_and_print('Dataset: {}'.format(args.data_root_dir))
    logger.write_and_print('Dataset type: {}'.format(args.dataset_type))
    logger.write_and_print('Reconstruction model saving dir: {}'.format(args.reconstruction_model_saving_dir))
    logger.write_and_print('Reconstruction ckpt index: {}'.format(args.reconstruction_epoch_idx))
    logger.write_and_print('Classification model saving dir: {}'.format(args.classification_model_saving_dir))
    logger.write_and_print('Classification ckpt index: {}'.format(args.classification_epoch_idx))

    # define the reconstruction network
    reconstruction_net = VNet2d(num_in_channels=r_cfg.net.in_channels, num_out_channels=r_cfg.net.out_channels)
    #
    # get the reconstruction absolute ckpt path
    reconstruction_ckpt_path = get_ckpt_path(args.reconstruction_model_saving_dir, args.reconstruction_epoch_idx)
    #
    # load ckpt and transfer net into gpu devices
    reconstruction_net = torch.nn.DataParallel(reconstruction_net).cuda()
    reconstruction_net.load_state_dict(torch.load(reconstruction_ckpt_path))
    reconstruction_net = reconstruction_net.eval()
    #
    logger.write_and_print('Load ckpt: {0}...'.format(reconstruction_ckpt_path))

    # define the classification network
    classification_net = ResNet18(in_channels=c_cfg.net.in_channels, num_classes=c_cfg.net.num_classes)
    #
    # get the classification absolute ckpt path
    classification_ckpt_path = get_ckpt_path(args.classification_model_saving_dir, args.classification_epoch_idx)
    #
    # load ckpt and transfer net into gpu devices
    classification_net = torch.nn.DataParallel(classification_net).cuda()
    classification_net.load_state_dict(torch.load(classification_ckpt_path))
    classification_net = classification_net.eval()
    #
    logger.write_and_print('Load ckpt: {0}...'.format(classification_ckpt_path))

    # create dataset and data loader
    dataset = MicroCalcificationDataset(data_root_dir=args.data_root_dir,
                                        mode=args.dataset_type,
                                        enable_random_sampling=False,
                                        pos_to_neg_ratio=r_cfg.dataset.pos_to_neg_ratio,
                                        image_channels=r_cfg.dataset.image_channels,
                                        cropping_size=r_cfg.dataset.cropping_size,
                                        dilation_radius=args.dilation_radius,
                                        load_uncertainty_map=False,
                                        calculate_micro_calcification_number=r_cfg.dataset.calculate_micro_calcification_number,
                                        enable_data_augmentation=False)
    #
    data_loader = DataLoader(dataset, batch_size=args.batch_size,
                             shuffle=False, num_workers=r_cfg.train.num_threads)

    metrics = MetricsReconstruction(args.prob_threshold, args.area_threshold, args.distance_threshold)

    calcification_num = 0
    recall_num = 0
    FP_num = 0

    for batch_idx, (images_tensor, pixel_level_labels_tensor, pixel_level_labels_dilated_tensor, _,
                    image_level_labels_tensor, _, filenames) in enumerate(data_loader):
        # start time of this batch
        start_time_for_batch = time()

        # transfer the tensor into gpu device
        images_tensor = images_tensor.cuda()

        # reconstruction network forward
        reconstructed_images_tensor, prediction_residues_tensor = reconstruction_net(images_tensor)

        # classification network forward
        classification_preds_tensor = classification_net(images_tensor)

        # merge the reconstruction and the classification results
        detection_results_np = micro_calcification_detection_batch_level(prediction_residues_tensor,
                                                                         classification_preds_tensor)

        # evaluation
        post_process_preds_np, calcification_num_batch_level, recall_num_batch_level, FP_num_batch_level = \
            metrics.metric_batch_level(detection_results_np, pixel_level_labels_tensor)

        calcification_num += calcification_num_batch_level
        recall_num += recall_num_batch_level
        FP_num += FP_num_batch_level

        # print logging information
        logger.write_and_print(
            'The number of the annotated calcifications of this batch = {}'.format(calcification_num_batch_level))
        logger.write_and_print(
            'The number of the recalled calcifications of this batch = {}'.format(recall_num_batch_level))
        logger.write_and_print(
            'The number of the false positive calcifications of this batch = {}'.format(FP_num_batch_level))
        logger.write_and_print('batch: {}, consuming time: {:.4f}s'.format(batch_idx, time() - start_time_for_batch))
        logger.write_and_print('--------------------------------------------------------------------------------------')

        save_tensor_in_png_and_nii_format(images_tensor, reconstructed_images_tensor, prediction_residues_tensor,
                                          post_process_preds_np, pixel_level_labels_tensor,
                                          pixel_level_labels_dilated_tensor, filenames, visualization_saving_dir)

        logger.flush()

    logger.write_and_print('The number of the annotated calcifications of this dataset = {}'.format(calcification_num))
    logger.write_and_print('The number of the recalled calcifications of this dataset = {}'.format(recall_num))
    logger.write_and_print('The number of the false positive calcifications of this dataset = {}'.format(FP_num))

    return
Exemplo n.º 8
0
def TestMicroCalcificationRadiographLevelDetection(args):
    # start time of this dataset
    start_time_for_dataset = time()

    # create clean dir
    if not os.path.exists(args.dst_data_root_dir):
        os.mkdir(args.dst_data_root_dir)
        for patch_type in ['positive_patches', 'negative_patches']:
            os.mkdir(os.path.join(args.dst_data_root_dir, patch_type))
            for dataset_type in ['training', 'validation', 'test']:
                os.mkdir(
                    os.path.join(args.dst_data_root_dir, patch_type,
                                 dataset_type))
                for image_type in ['images', 'labels', 'uncertainty-maps']:
                    os.mkdir(
                        os.path.join(args.dst_data_root_dir, patch_type,
                                     dataset_type, image_type))

    # initialize logger
    logger = Logger(args.dst_data_root_dir)
    logger.write_and_print('Dataset: {}'.format(args.src_data_root_dir))
    logger.write_and_print('Reconstruction model saving dir: {}'.format(
        args.reconstruction_model_saving_dir))
    logger.write_and_print('Reconstruction ckpt index: {}'.format(
        args.reconstruction_epoch_idx))

    # get net list for imitating MC dropout process
    net_for_mc = VNet2d(num_in_channels=cfg.net.in_channels,
                        num_out_channels=cfg.net.out_channels)
    uncertainty_model_ckpt_dir = os.path.join(
        args.uncertainty_model_saving_dir, 'ckpt')
    net_list = get_net_list(net_for_mc, uncertainty_model_ckpt_dir,
                            args.mc_epoch_indexes, logger)

    # define the reconstruction network
    reconstruction_net = VNet2d(num_in_channels=cfg.net.in_channels,
                                num_out_channels=cfg.net.out_channels)
    #
    # get the reconstruction absolute ckpt path
    reconstruction_ckpt_path = get_ckpt_path(
        args.reconstruction_model_saving_dir, args.reconstruction_epoch_idx)
    #
    # load ckpt and transfer net into gpu devices
    reconstruction_net = torch.nn.DataParallel(reconstruction_net).cuda()
    reconstruction_net.load_state_dict(torch.load(reconstruction_ckpt_path))
    reconstruction_net = reconstruction_net.eval()
    #
    logger.write_and_print(
        'Load ckpt: {0}...'.format(reconstruction_ckpt_path))

    for dataset_type in ['training', 'validation', 'test']:
        positive_dataset_type_dir = os.path.join(args.dst_data_root_dir,
                                                 'positive_patches',
                                                 dataset_type, 'images')
        negative_dataset_type_dir = os.path.join(args.dst_data_root_dir,
                                                 'negative_patches',
                                                 dataset_type, 'images')

        # create dataset
        dataset = MicroCalcificationRadiographLevelDataset(
            data_root_dir=args.src_data_root_dir, mode=dataset_type)

        # create data loader
        data_loader = DataLoader(dataset,
                                 batch_size=1,
                                 shuffle=False,
                                 num_workers=cfg.train.num_threads)

        positive_patch_num_dataset_type_level = 0
        negative_patch_num_dataset_type_level = 0

        logger.write_and_print(
            '  &&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&'
        )
        logger.write_and_print(
            '  Start evaluating {} set...'.format(dataset_type))

        for radiograph_idx, (images_tensor, pixel_level_labels_tensor, _,
                             filenames) in enumerate(data_loader):
            filename = filenames[0]

            # logging
            logger.write_and_print(
                '    Start evaluating {} set radiograph {} out of {}: {}...'.
                format(dataset_type, radiograph_idx + 1, dataset.__len__(),
                       filename))

            # start time of this radiograph
            start_time_for_radiograph = time()

            # transfer the tensor into gpu device
            images_tensor = images_tensor.cuda()

            # transfer the tensor into ndarray format
            image_np = images_tensor.cpu().numpy().squeeze()
            pixel_level_label_np = pixel_level_labels_tensor.cpu().numpy(
            ).squeeze()

            # generated raw radiograph-level residue
            _, raw_residue_radiograph_np = generate_radiograph_level_reconstructed_and_residue_result(
                images_tensor, reconstruction_net, pixel_level_label_np,
                args.reconstruction_patch_size,
                args.reconstruction_patch_stride)

            # post-process the raw radiograph-level residue
            processed_residue_radiograph_np = post_process_residue_radiograph(
                raw_residue_radiograph_np, pixel_level_label_np,
                args.prob_threshold, args.area_threshold)

            # generate coordinates list for the post-processed radiograph-level residue
            pred_coord_list = generate_coordinate_list(
                processed_residue_radiograph_np)

            # generate coordinates list for the mask
            label_coord_list = generate_coordinate_list(pixel_level_label_np,
                                                        mode='annotated')

            # merge pred_coord_list and label_coord_list
            coord_list = merge_coord_list(pred_coord_list, label_coord_list)

            positive_patch_num_radiograph_level, negative_patch_num_radiograph_level = \
                save_images_labels_uncertainty_maps(coord_list, images_tensor, image_np, pixel_level_label_np, net_list,
                                                    filename, positive_dataset_type_dir, negative_dataset_type_dir,
                                                    args.reconstruction_patch_size, args.patch_size)

            positive_patch_num_dataset_type_level += positive_patch_num_radiograph_level
            negative_patch_num_dataset_type_level += negative_patch_num_radiograph_level

            # logging
            # print logging information of this radiograph
            logger.write_and_print(
                '    Finish evaluating radiograph: {}, consuming time: {:.4f}s'
                .format(radiograph_idx + 1,
                        time() - start_time_for_radiograph))
            logger.write_and_print(
                '    This radiograph contains {} positive patches and {} negative patches.'
                .format(positive_patch_num_radiograph_level,
                        negative_patch_num_radiograph_level))
            logger.write_and_print(
                '    ------------------------------------------------------------------------------'
            )
            logger.flush()

        # print logging information of this dataset
        logger.write_and_print(
            '  Finished evaluating {} set, consuming time: {:.4f}s'.format(
                dataset_type,
                time() - start_time_for_dataset))
        logger.write_and_print(
            '  This {} set contains {} positive patches and {} negative patches.'
            .format(dataset_type, positive_patch_num_dataset_type_level,
                    negative_patch_num_dataset_type_level))
        logger.write_and_print(
            '  &&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&'
        )

    return
def TestMicroCalcificationReconstruction(args):
    prediction_saving_dir = os.path.join(
        args.model_saving_dir,
        'pixel_level_classification_results_dataset_{}_epoch_{}'.format(
            args.dataset_type, args.epoch_idx))
    visualization_saving_dir = os.path.join(prediction_saving_dir,
                                            'qualitative_results')

    # remove existing dir which has the same name and create clean dir
    if os.path.exists(prediction_saving_dir):
        shutil.rmtree(prediction_saving_dir)
    os.mkdir(prediction_saving_dir)
    os.mkdir(visualization_saving_dir)

    # initialize logger
    logger = Logger(prediction_saving_dir, 'quantitative_results.txt')
    logger.write_and_print('Dataset: {}'.format(args.data_root_dir))
    logger.write_and_print('Dataset type: {}'.format(args.dataset_type))

    # define the network
    net = VNet2d(num_in_channels=cfg.net.in_channels,
                 num_out_channels=cfg.net.out_channels)

    # load the specified ckpt
    ckpt_dir = os.path.join(args.model_saving_dir, 'ckpt')
    # epoch_idx is specified -> load the specified ckpt
    if args.epoch_idx >= 0:
        ckpt_path = os.path.join(ckpt_dir,
                                 'net_epoch_{}.pth'.format(args.epoch_idx))
    # epoch_idx is not specified -> load the best ckpt
    else:
        saved_ckpt_list = os.listdir(ckpt_dir)
        best_ckpt_filename = [
            best_ckpt_filename for best_ckpt_filename in saved_ckpt_list
            if 'net_best_on_validation_set' in best_ckpt_filename
        ][0]
        ckpt_path = os.path.join(ckpt_dir, best_ckpt_filename)

    # transfer net into gpu devices
    net = torch.nn.DataParallel(net).cuda()
    net.load_state_dict(torch.load(ckpt_path))
    net = net.eval()

    logger.write_and_print('Load ckpt: {0}...'.format(ckpt_path))

    # create dataset and data loader
    dataset = MicroCalcificationDataset(
        data_root_dir=args.data_root_dir,
        mode=args.dataset_type,
        enable_random_sampling=False,
        pos_to_neg_ratio=cfg.dataset.pos_to_neg_ratio,
        image_channels=cfg.dataset.image_channels,
        cropping_size=cfg.dataset.cropping_size,
        dilation_radius=args.dilation_radius,
        load_uncertainty_map=False,
        calculate_micro_calcification_number=cfg.dataset.
        calculate_micro_calcification_number,
        enable_data_augmentation=False)
    #
    data_loader = DataLoader(dataset,
                             batch_size=args.batch_size,
                             shuffle=False,
                             num_workers=cfg.train.num_threads)

    metrics = MetricsPixelLevelClassification(args.prob_threshold,
                                              args.area_threshold,
                                              args.distance_threshold)

    calcification_num = 0
    recall_num = 0
    FP_num = 0

    for batch_idx, (images_tensor, pixel_level_labels_tensor,
                    pixel_level_labels_dilated_tensor, _,
                    image_level_labels_tensor, _,
                    filenames) in enumerate(data_loader):
        # start time of this batch
        start_time_for_batch = time()

        # transfer the tensor into gpu device
        images_tensor = images_tensor.cuda()

        # network forward
        predictions_tensor = net(images_tensor)

        # extract the 1-st channel from classification results
        predictions_tensor = extract_classification_preds_channel(
            predictions_tensor, 1)

        # evaluation
        post_process_preds_np, calcification_num_batch_level, recall_num_batch_level, FP_num_batch_level = \
            metrics.metric_batch_level(predictions_tensor, pixel_level_labels_tensor)

        calcification_num += calcification_num_batch_level
        recall_num += recall_num_batch_level
        FP_num += FP_num_batch_level

        # print logging information
        logger.write_and_print(
            'The number of the annotated calcifications of this batch = {}'.
            format(calcification_num_batch_level))
        logger.write_and_print(
            'The number of the recalled calcifications of this batch = {}'.
            format(recall_num_batch_level))
        logger.write_and_print(
            'The number of the false positive calcifications of this batch = {}'
            .format(FP_num_batch_level))
        logger.write_and_print('batch: {}, consuming time: {:.4f}s'.format(
            batch_idx,
            time() - start_time_for_batch))
        logger.write_and_print(
            '--------------------------------------------------------------------------------------'
        )

        save_tensor_in_png_and_nii_format(images_tensor, predictions_tensor,
                                          post_process_preds_np,
                                          pixel_level_labels_tensor,
                                          pixel_level_labels_dilated_tensor,
                                          filenames, visualization_saving_dir)

        logger.flush()

    logger.write_and_print(
        'The number of the annotated calcifications of this dataset = {}'.
        format(calcification_num))
    logger.write_and_print(
        'The number of the recalled calcifications of this dataset = {}'.
        format(recall_num))
    logger.write_and_print(
        'The number of the false positive calcifications of this dataset = {}'.
        format(FP_num))

    return