Beispiel #1
0
def evaluate(coronal_model_path, volumes_txt_file, data_dir, device, prediction_path, batch_size, orientation,
             label_names, dir_struct, need_unc=False, mc_samples=0):
    print("**Starting evaluation**")
    with open(volumes_txt_file) as file_handle:
        volumes_to_use = file_handle.read().splitlines()

    model = torch.load(coronal_model_path)
    cuda_available = torch.cuda.is_available()
    if cuda_available:
        torch.cuda.empty_cache()
        model.cuda(device)

    model.eval()

    common_utils.create_if_not(prediction_path)
    print("Evaluating now...")
    file_paths = du.load_file_paths_eval(data_dir, volumes_txt_file, dir_struct)

    with torch.no_grad():
        volume_dict_list = []
        cvs_dict_list = []
        iou_dict_list = []
        for vol_idx, file_path in enumerate(file_paths):
            try:
                if need_unc == "True":
                    _, volume_prediction, mc_pred_list, header = _segment_vol_unc(file_path, model, orientation,
                                                                                  batch_size, mc_samples,
                                                                                  cuda_available, device)
                    iou_dict, cvs_dict = compute_structure_uncertainty(mc_pred_list, label_names,
                                                                       volumes_to_use[vol_idx])
                    cvs_dict_list.append(cvs_dict)
                    iou_dict_list.append(iou_dict)
                else:
                    _, volume_prediction, header = _segment_vol(file_path, model, orientation, batch_size,
                                                                cuda_available,
                                                                device)

                nifti_img = nib.Nifti1Image(volume_prediction, np.eye(4), header=header)
                print("Processed: " + volumes_to_use[vol_idx] + " " + str(vol_idx + 1) + " out of " + str(
                    len(file_paths)))
                nib.save(nifti_img, os.path.join(prediction_path, volumes_to_use[vol_idx] + str('.nii')))
                per_volume_dict = compute_volume(volume_prediction, label_names, volumes_to_use[vol_idx])
                volume_dict_list.append(per_volume_dict)
            except FileNotFoundError:
                print("Error in reading the file ...")

        _write_csv_table('volume_estimates.csv', prediction_path, volume_dict_list, label_names)

        if need_unc == "True":
            _write_csv_table('cvs_uncertainty.csv', prediction_path, cvs_dict_list, label_names)
            _write_csv_table('iou_uncertainty.csv', prediction_path, iou_dict_list, label_names)

    print("DONE")
Beispiel #2
0
def compute_vol_bulk(prediction_dir, dir_struct, label_names, volumes_txt_file):
    print("**Computing volume estimates**")

    with open(volumes_txt_file) as file_handle:
        volumes_to_use = file_handle.read().splitlines()

    file_paths = du.load_file_paths_eval(prediction_dir, volumes_txt_file, dir_struct)

    volume_dict_list = []

    for vol_idx, file_path in enumerate(file_paths):
        volume_prediction, header = du.load_and_preprocess_eval(file_path, "SAG", notlabel=False)
        per_volume_dict = compute_volume(volume_prediction, label_names, volumes_to_use[vol_idx])
        volume_dict_list.append(per_volume_dict)

    _write_csv_table('volume_estimates.csv', prediction_dir, volume_dict_list, label_names)
    print("**DONE**")
Beispiel #3
0
def evaluate2view(coronal_model_path, axial_model_path, volumes_txt_file, data_dir, device, prediction_path, batch_size,
                  label_names, dir_struct, need_unc=False, mc_samples=0):
    print("**Starting evaluation**")
    with open(volumes_txt_file) as file_handle:
        volumes_to_use = file_handle.read().splitlines()

    model1 = torch.load(coronal_model_path)

    model2 = torch.load(axial_model_path)

    cuda_available = torch.cuda.is_available()
    if cuda_available:
        torch.cuda.empty_cache()
        model1.cuda(device)
        model2.cuda(device)

    model1.eval()
    model2.eval()

    common_utils.create_if_not(prediction_path)
    print("Evaluating now...")

    file_paths = du.load_file_paths_eval(data_dir, volumes_txt_file, dir_struct)

    with torch.no_grad():
        volume_dict_list = []
        cvs_dict_list = []
        iou_dict_list = []
        for vol_idx, file_path in enumerate(file_paths):
            try:
                if need_unc == "True":
                    volume_prediction_cor, _, mc_pred_list_cor, header = _segment_vol_unc(file_path, model1, "COR",
                                                                                          batch_size, mc_samples,
                                                                                          cuda_available, device)
                    volume_prediction_axi, _, mc_pred_list_axi, header = _segment_vol_unc(file_path, model2, "AXI",
                                                                                          batch_size, mc_samples,
                                                                                          cuda_available, device)
                    mc_pred_list = mc_pred_list_cor + mc_pred_list_axi
                    iou_dict, cvs_dict = compute_structure_uncertainty(mc_pred_list, label_names,
                                                                       volumes_to_use[vol_idx])
                    cvs_dict_list.append(cvs_dict)
                    iou_dict_list.append(iou_dict)
                else:
                    volume_prediction_cor, _, header = _segment_vol(file_path, model1, "COR", batch_size,
                                                                    cuda_available,
                                                                    device)
                    volume_prediction_axi, _, header = _segment_vol(file_path, model2, "AXI", batch_size,
                                                                    cuda_available,
                                                                    device)

                _, volume_prediction = torch.max(volume_prediction_axi + volume_prediction_cor, dim=1)
                volume_prediction = (volume_prediction.cpu().numpy()).astype('float32')
                volume_prediction = np.squeeze(volume_prediction)
                nifti_img = nib.Nifti1Image(volume_prediction, np.eye(4), header=header)
                print("Processed: " + volumes_to_use[vol_idx] + " " + str(vol_idx + 1) + " out of " + str(
                    len(file_paths)))
                nib.save(nifti_img, os.path.join(prediction_path, volumes_to_use[vol_idx] + str('.nii.gz')))

                per_volume_dict = compute_volume(volume_prediction, label_names, volumes_to_use[vol_idx])
                volume_dict_list.append(per_volume_dict)

            except FileNotFoundError:
                print("Error in reading the file ...")
            except Exception as exp:
                import logging
                logging.getLogger(__name__).exception(exp)
                # print("Other kind o error!")

        _write_csv_table('volume_estimates.csv', prediction_path, volume_dict_list, label_names)

        if need_unc == "True":
            _write_csv_table('cvs_uncertainty.csv', prediction_path, cvs_dict_list, label_names)
            _write_csv_table('iou_uncertainty.csv', prediction_path, iou_dict_list, label_names)

    print("DONE")
def evaluate2view(coronal_model_path, axial_model_path, volumes_txt_file, data_dir, device, prediction_path, batch_size,
                  label_names, dir_struct, need_unc=False, mc_samples=0, exit_on_error=False):
    log.info("**Starting evaluation**")
    with open(volumes_txt_file) as file_handle:
        volumes_to_use = file_handle.read().splitlines()

    cuda_available = torch.cuda.is_available()
    if type(device) == int:
        # if CUDA available, follow through, else warn and fallback to CPU
        if cuda_available:
            model1 = torch.load(coronal_model_path)
            model2 = torch.load(axial_model_path)
            
            torch.cuda.empty_cache()
            model1.cuda(device)
            model2.cuda(device)
        else:
            log.warning(
                'CUDA is not available, trying with CPU.' + \
                'This can take much longer (> 1 hour). Cancel and ' + \
                'investigate if this behavior is not desired.'
            )

    if (type(device)==str) or not cuda_available:
        model1 = torch.load(
            coronal_model_path, 
            map_location=torch.device(device)
        )
        model2 = torch.load(
            axial_model_path, 
            map_location=torch.device(device)
        )

    model1.eval()
    model2.eval()

    common_utils.create_if_not(prediction_path)
    log.info("Evaluating now...")

    file_paths = du.load_file_paths_eval(data_dir, volumes_txt_file, dir_struct)

    with torch.no_grad():
        volume_dict_list = []
        cvs_dict_list = []
        iou_dict_list = []
        for vol_idx, file_path in enumerate(file_paths):
            try:
                if need_unc == "True":
                    volume_prediction_cor, _, mc_pred_list_cor, header = _segment_vol_unc(file_path, model1, "COR",
                                                                                          batch_size, mc_samples,
                                                                                          cuda_available, device)
                    volume_prediction_axi, _, mc_pred_list_axi, header = _segment_vol_unc(file_path, model2, "AXI",
                                                                                          batch_size, mc_samples,
                                                                                          cuda_available, device)
                    mc_pred_list = mc_pred_list_cor + mc_pred_list_axi
                    iou_dict, cvs_dict = compute_structure_uncertainty(mc_pred_list, label_names,
                                                                       volumes_to_use[vol_idx])
                    cvs_dict_list.append(cvs_dict)
                    iou_dict_list.append(iou_dict)
                else:
                    volume_prediction_cor, _, header = _segment_vol(file_path, model1, "COR", batch_size,
                                                                    cuda_available,
                                                                    device)
                    volume_prediction_axi, _, header = _segment_vol(file_path, model2, "AXI", batch_size,
                                                                    cuda_available,
                                                                    device)

                _, volume_prediction = torch.max(volume_prediction_axi + volume_prediction_cor, dim=1)
                volume_prediction = (volume_prediction.cpu().numpy()).astype('float32')
                volume_prediction = np.squeeze(volume_prediction)

                #Copy header affine
                Mat = np.array([
                    header['srow_x'], 
                    header['srow_y'], 
                    header['srow_z'],
                    [0,0,0,1]
                ])
                # Apply original image affine to prediction volume
                nifti_img = nib.Nifti1Image(volume_prediction, Mat, header=header)

                log.info("Processed: " + volumes_to_use[vol_idx] + " " + str(vol_idx + 1) + " out of " + str(
                    len(file_paths)))
                nib.save(nifti_img, os.path.join(prediction_path, volumes_to_use[vol_idx] + str('.nii.gz')))

                per_volume_dict = compute_volume(volume_prediction, label_names, volumes_to_use[vol_idx])
                volume_dict_list.append(per_volume_dict)

            except FileNotFoundError as exp:
                log.error("Error in reading the file ...")
                log.exception(exp)
                if exit_on_error:
                    raise(exp)                
            except Exception as exp:
                log.exception(exp)
                if exit_on_error:
                    raise(exp)
                # log.info("Other kind o error!")

        _write_csv_table('volume_estimates.csv', prediction_path, volume_dict_list, label_names)

        if need_unc == "True":
            _write_csv_table('cvs_uncertainty.csv', prediction_path, cvs_dict_list, label_names)
            _write_csv_table('iou_uncertainty.csv', prediction_path, iou_dict_list, label_names)

    log.info("DONE")
def evaluate(coronal_model_path, volumes_txt_file, data_dir, device, prediction_path, batch_size, orientation,
             label_names, dir_struct, need_unc=False, mc_samples=0, exit_on_error=False):
    log.info("**Starting evaluation**")
    with open(volumes_txt_file) as file_handle:
        volumes_to_use = file_handle.read().splitlines()

    cuda_available = torch.cuda.is_available()
    # First, are we attempting to run on a GPU?
    if type(device) == int:
        # if CUDA available, follow through, else warn and fallback to CPU
        if cuda_available:
            model = torch.load(coronal_model_path)
            torch.cuda.empty_cache()
            model.cuda(device)
        else:
            log.warning(
                'CUDA is not available, trying with CPU. ' + \
                'This can take much longer (> 1 hour). Cancel and ' + \
                'investigate if this behavior is not desired.'
            )
            # switch device to 'cpu'
            device = 'cpu'
    # If device is 'cpu' or CUDA not available
    if (type(device)==str) or not cuda_available:
        model = torch.load(
            coronal_model_path, 
            map_location=torch.device(device)
        )

    model.eval()

    common_utils.create_if_not(prediction_path)
    log.info("Evaluating now...")
    file_paths = du.load_file_paths_eval(data_dir, volumes_txt_file, dir_struct)

    with torch.no_grad():
        volume_dict_list = []
        cvs_dict_list = []
        iou_dict_list = []
        for vol_idx, file_path in enumerate(file_paths):
            try:
                if need_unc == "True":
                    _, volume_prediction, mc_pred_list, header = _segment_vol_unc(file_path, model, orientation,
                                                                                  batch_size, mc_samples,
                                                                                  cuda_available, device)
                    iou_dict, cvs_dict = compute_structure_uncertainty(mc_pred_list, label_names,
                                                                       volumes_to_use[vol_idx])
                    cvs_dict_list.append(cvs_dict)
                    iou_dict_list.append(iou_dict)
                else:
                    _, volume_prediction, header = _segment_vol(file_path, model, orientation, batch_size,
                                                                cuda_available,
                                                                device)

                volume_prediction = preprocessor.remap_labels_back(volume_prediction, remap_config='SLANT') #BORIS

                #Copy header affine
                Mat = np.array([
                    header['srow_x'], 
                    header['srow_y'], 
                    header['srow_z'],
                    [0,0,0,1]
                ])
                # Apply original image affine to prediction volume
                nifti_img = nib.Nifti1Image(volume_prediction, Mat, header=header)
                log.info("Processed: " + volumes_to_use[vol_idx] + " " + str(vol_idx + 1) + " out of " + str(
                    len(file_paths)))
                save_file = os.path.join(prediction_path, volumes_to_use[vol_idx])
                if '.nii' not in save_file:
                    save_file += '.nii.gz'
                nib.save(nifti_img, save_file)
                per_volume_dict = compute_volume(volume_prediction, label_names, volumes_to_use[vol_idx])
                volume_dict_list.append(per_volume_dict)
            except FileNotFoundError as exp:
                log.error("Error in reading the file ...")
                log.exception(exp)
                if exit_on_error:
                    raise(exp)
            except Exception as exp:
                log.exception(exp)
                if exit_on_error:
                    raise(exp)

        _write_csv_table('volume_estimates.csv', prediction_path, volume_dict_list, label_names)

        if need_unc == "True":
            _write_csv_table('cvs_uncertainty.csv', prediction_path, cvs_dict_list, label_names)
            _write_csv_table('iou_uncertainty.csv', prediction_path, iou_dict_list, label_names)

    log.info("DONE")