Exemple #1
0
def ori_image_prepare(image_path, view, horizontal_flip, parameters):
    """
    Loads an image and creates stride_lists
    """
    patch_size = parameters['patch_size']
    more_patches = parameters['more_patches']
    stride_fixed = parameters['stride_fixed']

    image = loading.load_image(image_path, view, horizontal_flip)
    image = image.astype(float)
    loading.standard_normalize_single_image(image)
    
    img_width, img_length = image.shape
    width_stride_list = stride_list_generator(img_width, patch_size, more_patches, stride_fixed)
    length_stride_list = stride_list_generator(img_length, patch_size, more_patches, stride_fixed)

    return image, width_stride_list, length_stride_list
Exemple #2
0
def ori_image_prepare(short_file_path, view, horizontal_flip, parameters):
    """
    Loads an image and creates stride_lists
    """
    orginal_image_path = parameters['orginal_image_path']
    patch_size = parameters['patch_size']
    more_patches = parameters['more_patches']
    stride_fixed = parameters['stride_fixed']
    
    image_extension = '.hdf5' if parameters['use_hdf5'] else '.png'
    image_path = os.path.join(orginal_image_path, short_file_path + image_extension)
    image = loading.load_image(image_path, view, horizontal_flip)
    image = image.astype(float)
    loading.standard_normalize_single_image(image)
    
    img_width, img_length = image.shape
    width_stride_list = stride_list_generator(img_width, patch_size, more_patches, stride_fixed)
    length_stride_list = stride_list_generator(img_length, patch_size, more_patches, stride_fixed)

    return image, width_stride_list, length_stride_list
Exemple #3
0
            def run_model(model, exam_list, parameters, turn_on_visualization):
                """
                Run the model over images in sample_data.
                Save the predictions as csv and visualizations as png.
                """
                if (parameters["device_type"] == "gpu") and torch.has_cudnn:
                    device = torch.device("cuda:{}".format(
                        parameters["gpu_number"]))
                else:
                    device = torch.device("cpu")
                model = model.to(device)
                model.eval()

                # initialize data holders
                pred_dict = {
                    "image_index": [],
                    "benign_pred": [],
                    "malignant_pred": [],
                    "benign_label": 'no input',
                    "malignant_label": 'no input'
                }
                with torch.no_grad():
                    # iterate through each exam
                    for datum in tqdm.tqdm(exam_list):
                        for view in VIEWS.LIST:
                            short_file_path = datum[view][0]
                            # load image
                            # the image is already flipped so no need to do it again
                            loaded_image = loading.load_image(
                                image_path=os.path.join(
                                    parameters["image_path"],
                                    short_file_path + ".png"),
                                view=view,
                                horizontal_flip=False,
                            )
                            loading.standard_normalize_single_image(
                                loaded_image)
                            # load segmentation if available
                            benign_seg_path = os.path.join(
                                parameters["segmentation_path"],
                                "{0}_{1}".format(short_file_path,
                                                 "benign.png"))
                            malignant_seg_path = os.path.join(
                                parameters["segmentation_path"],
                                "{0}_{1}".format(short_file_path,
                                                 "malignant.png"))

                            benign_seg = np.zeros([1920, 2944], dtype=int)
                            #benign_seg = None
                            malignant_seg = np.zeros([1920, 2944], dtype=int)
                            #malignant_seg = None
                            if os.path.exists(benign_seg_path):
                                loaded_seg = loading.load_image(
                                    image_path=benign_seg_path,
                                    view=view,
                                    horizontal_flip=False,
                                )
                                benign_seg = loaded_seg
                            if os.path.exists(malignant_seg_path):
                                loaded_seg = loading.load_image(
                                    image_path=malignant_seg_path,
                                    view=view,
                                    horizontal_flip=False,
                                )
                                malignant_seg = loaded_seg
                            # convert python 2D array into 4D torch tensor in N,C,H,W format
                            loaded_image = np.expand_dims(
                                np.expand_dims(loaded_image, 0), 0).copy()
                            tensor_batch = torch.Tensor(loaded_image).to(
                                device)
                            # forward propagation
                            output = model(tensor_batch)
                            pred_numpy = output.data.cpu().numpy()
                            benign_pred, malignant_pred = pred_numpy[
                                0, 0], pred_numpy[0, 1]
                            # save visualization
                            if turn_on_visualization:
                                saliency_maps = model.saliency_map.data.cpu(
                                ).numpy()
                                patch_locations = model.patch_locations
                                patch_imgs = model.patches
                                patch_attentions = model.patch_attns[
                                    0, :].data.cpu().numpy()
                                save_dir = os.path.join(
                                    parameters["output_path"], "visualization",
                                    "{0}.png".format(short_file_path))
                                #b = np.zeros([2560 , 3328], dtype = int)
                                visualize_example(loaded_image, saliency_maps,
                                                  [benign_seg, malignant_seg],
                                                  patch_locations, patch_imgs,
                                                  patch_attentions, save_dir,
                                                  parameters)
                            # propagate holders
                            #benign_label, malignant_label = fetch_cancer_label_by_view(view, datum["cancer_label"])
                            pred_dict["image_index"].append(short_file_path)
                            pred_dict["benign_pred"].append(benign_pred)
                            pred_dict["malignant_pred"].append(malignant_pred)
                            #pred_dict["benign_label"].append(benign_label)
                            #pred_dict["malignant_label"].append(malignant_label)
                return pd.DataFrame(pred_dict)