def run(parameters):
    """
    Outputs the predictions as csv file
    """
    random_number_generator = np.random.RandomState(parameters["seed"])
    model, device = load_model(parameters)

    model_input = load_inputs(
        image_path=parameters["cropped_mammogram_path"],
        metadata_path=parameters["metadata_path"],
        use_heatmaps=parameters["use_heatmaps"],
        benign_heatmap_path=parameters["heatmap_path_benign"],
        malignant_heatmap_path=parameters["heatmap_path_malignant"],
    )
    assert model_input.metadata["full_view"] == parameters["view"]

    all_predictions = []

    # set up hook

    activation = {'out_resnet': []}
    # out_shape = [0,256]
    # activation = {'out_resnet': torch.empty(out_shape)}
    handle = model.all_views_avg_pool.register_forward_hook(
        tools.get_activation(activation, 'out_resnet'))
    # handle = model.view_resnet.layer_list[4][1].conv2.register_forward_hook(tools.get_activation(activation, 'out_resnet'))

    for datum in tqdm.tqdm(exam_list):
        for short_file_path in datum[view]:
            loaded_image = loading.load_image(
                image_path=os.path.join(parameters["image_path"],
                                        short_file_path + image_extension),
                view=view,
                horizontal_flip=datum["horizontal_flip"],
            )
            loaded_image_dict[view].append(loaded_image)

    for data_batch in tools.partition_batch(range(parameters["num_epochs"]),
                                            parameters["batch_size"]):
        batch = []
        for _ in data_batch:
            batch.append(
                process_augment_inputs(
                    model_input=model_input,
                    random_number_generator=random_number_generator,
                    parameters=parameters,
                ))

        tensor_batch = batch_to_tensor(batch, device)
        y_hat = model(tensor_batch)
        predictions = np.exp(y_hat.cpu().detach().numpy())[:, :2, 1]
        all_predictions.append(predictions)
    agg_predictions = np.concatenate(all_predictions, axis=0).mean(0)
    predictions_dict = {
        "benign": float(agg_predictions[0]),
        "malignant": float(agg_predictions[1]),
    }
    print(json.dumps(predictions_dict))
Ejemplo n.º 2
0
def get_all_prob(all_patches, minibatch_size, model, device, parameters):   
    """
    Gets predictions for all sampled patches
    """
    all_prob = np.zeros((len(all_patches), parameters['number_of_classes']))

    for i, minibatch in enumerate(tools.partition_batch(all_patches, minibatch_size)):
        minibatch_prob = prediction_by_batch(minibatch, model, device, parameters)
        all_prob[i * minibatch_size: i * minibatch_size + minibatch_prob.shape[0]] = minibatch_prob
                
    return all_prob.astype(np.float32)
Ejemplo n.º 3
0
def run(parameters):
    """
    Outputs the predictions as csv file
    """
    random_number_generator = np.random.RandomState(parameters["seed"])
    model, device = load_model(parameters)

    model_input = load_inputs(
        image_path=parameters["cropped_mammogram_path"],
        metadata_path=parameters["metadata_path"],
        use_heatmaps=parameters["use_heatmaps"],
        benign_heatmap_path=parameters["heatmap_path_benign"],
        malignant_heatmap_path=parameters["heatmap_path_malignant"],
    )
    assert model_input.metadata["full_view"] == parameters["view"]

    all_predictions = []
    for data_batch in tools.partition_batch(range(parameters["num_epochs"]),
                                            parameters["batch_size"]):
        batch = []
        for _ in data_batch:
            batch.append(
                process_augment_inputs(
                    model_input=model_input,
                    random_number_generator=random_number_generator,
                    parameters=parameters,
                ))
        tensor_batch = batch_to_tensor(batch, device)
        with torch.no_grad():
            y_hat = model(tensor_batch)
        predictions = np.exp(y_hat.cpu().detach().numpy())[:, :2, 1]
        all_predictions.append(predictions)
    agg_predictions = np.concatenate(all_predictions, axis=0).mean(0)
    predictions_dict = {
        "benign": float(agg_predictions[0]),
        "malignant": float(agg_predictions[1]),
    }
    print(json.dumps(predictions_dict))
    return json.dumps(predictions_dict)
Ejemplo n.º 4
0
def run(parameters):
    """
    Outputs the predictions as csv file
    """
    random_number_generator = np.random.RandomState(parameters["seed"])
    sess, x, y = load_model(parameters)

    model_input = load_inputs(
        image_path=parameters["cropped_mammogram_path"],
        metadata_path=parameters["metadata_path"],
        use_heatmaps=parameters["use_heatmaps"],
        benign_heatmap_path=parameters["heatmap_path_benign"],
        malignant_heatmap_path=parameters["heatmap_path_malignant"],
    )
    assert model_input.metadata["full_view"] == parameters["view"]

    all_predictions = []
    for data_batch in tools.partition_batch(range(parameters["num_epochs"]),
                                            parameters["batch_size"]):
        batch = []
        for _ in data_batch:
            batch.append(
                process_augment_inputs(
                    model_input=model_input,
                    random_number_generator=random_number_generator,
                    parameters=parameters,
                ))
        x_data = batch_to_inputs(batch)
        with sess.as_default():
            y_hat = sess.run(y, feed_dict={x: x_data})
        predictions = np.exp(y_hat)[:, :, 1]
        all_predictions.append(predictions)
    agg_predictions = np.concatenate(all_predictions, axis=0).mean(0)
    predictions_dict = {
        "benign": float(agg_predictions[0]),
        "malignant": float(agg_predictions[1]),
    }
    print(json.dumps(predictions_dict))
Ejemplo n.º 5
0
def run_model(model, device, exam_list, parameters):
    """
    Returns predictions of image only model or image+heatmaps model.
    Prediction for each exam is averaged for a given number of epochs.
    """
    random_number_generator = np.random.RandomState(parameters["seed"])

    image_extension = ".hdf5" if parameters["use_hdf5"] else ".png"

    with torch.no_grad():
        predictions_ls = []
        for datum in tqdm.tqdm(exam_list):
            predictions_for_datum = []
            loaded_image_dict = {view: [] for view in VIEWS.LIST}
            loaded_heatmaps_dict = {view: [] for view in VIEWS.LIST}
            for view in VIEWS.LIST:
                for short_file_path in datum[view]:
                    loaded_image = loading.load_image(
                        image_path=os.path.join(
                            parameters["image_path"],
                            short_file_path + image_extension),
                        view=view,
                        horizontal_flip=datum["horizontal_flip"],
                    )
                    if parameters["use_heatmaps"]:
                        loaded_heatmaps = loading.load_heatmaps(
                            benign_heatmap_path=os.path.join(
                                parameters["heatmaps_path"], "heatmap_benign",
                                short_file_path + ".hdf5"),
                            malignant_heatmap_path=os.path.join(
                                parameters["heatmaps_path"],
                                "heatmap_malignant",
                                short_file_path + ".hdf5"),
                            view=view,
                            horizontal_flip=datum["horizontal_flip"],
                        )
                    else:
                        loaded_heatmaps = None

                    loaded_image_dict[view].append(loaded_image)
                    loaded_heatmaps_dict[view].append(loaded_heatmaps)
            for data_batch in tools.partition_batch(
                    range(parameters["num_epochs"]), parameters["batch_size"]):
                batch_dict = {view: [] for view in VIEWS.LIST}
                for _ in data_batch:
                    for view in VIEWS.LIST:
                        image_index = 0
                        if parameters["augmentation"]:
                            image_index = random_number_generator.randint(
                                low=0, high=len(datum[view]))
                        cropped_image, cropped_heatmaps = loading.augment_and_normalize_image(
                            image=loaded_image_dict[view][image_index],
                            auxiliary_image=loaded_heatmaps_dict[view]
                            [image_index],
                            view=view,
                            best_center=datum["best_center"][view]
                            [image_index],
                            random_number_generator=random_number_generator,
                            augmentation=parameters["augmentation"],
                            max_crop_noise=parameters["max_crop_noise"],
                            max_crop_size_noise=parameters[
                                "max_crop_size_noise"],
                        )
                        if loaded_heatmaps_dict[view][image_index] is None:
                            batch_dict[view].append(cropped_image[:, :,
                                                                  np.newaxis])
                        else:
                            batch_dict[view].append(
                                np.concatenate([
                                    cropped_image[:, :, np.newaxis],
                                    cropped_heatmaps,
                                ],
                                               axis=2))

                tensor_batch = {
                    view: torch.tensor(np.stack(batch_dict[view])).permute(
                        0, 3, 1, 2).to(device)
                    for view in VIEWS.LIST
                }
                output = model(tensor_batch)
                batch_predictions = compute_batch_predictions(
                    output, mode=parameters["model_mode"])
                pred_df = pd.DataFrame(
                    {k: v[:, 1]
                     for k, v in batch_predictions.items()})
                pred_df.columns.names = ["label", "view_angle"]
                predictions = pred_df.T.reset_index().groupby(
                    "label").mean().T[LABELS.LIST].values
                predictions_for_datum.append(predictions)
            predictions_ls.append(
                np.mean(np.concatenate(predictions_for_datum, axis=0), axis=0))

    return np.array(predictions_ls)
def run_sub_model(model, exam_list, parameters):
    """
    Returns predictions of image only model or image+heatmaps model. 
    Prediction for each exam is averaged for a given number of epochs.
    """
    if (parameters["device_type"] == "gpu") and torch.has_cudnn:
        device = torch.device(f"cuda:{parameters["gpu_number"]}")
    else:
        device = torch.device("cpu")
    sub_model = sub_model.to(device)
    # F: sets model in evaluation mode. It has an effect in certain modules: e.g. Dropout or BatchNorm Layers
    sub_model.eval()

    random_number_generator = np.random.RandomState(parameters["seed"])

    image_extension = ".hdf5" if parameters["use_hdf5"] else ".png"

    with torch.no_grad():
        predictions_ls = []
        for datum in tqdm.tqdm(exam_list):
            predictions_for_datum = []
            loaded_image_dict = {view: [] for view in VIEWS.LIST}
            loaded_heatmaps_dict = {view: [] for view in VIEWS.LIST}
            for view in VIEWS.LIST:
                # F: for one exam, all images of a specific view
                for short_file_path in datum[view]:
                    loaded_image = loading.load_image(
                        image_path=os.path.join(parameters["image_path"], short_file_path + image_extension),
                        view=view,
                        horizontal_flip=datum["horizontal_flip"],
                    )
                 
                    loaded_image_dict[view].append(loaded_image)
            print(f"length loaded_image: {len(loaded_image_dict)}")
            for data_batch in tools.partition_batch(range(parameters["num_epochs"]), parameters["batch_size"]):
                print(f"num_epochs: {parameters['num_epochs']}")
                print(f"batch_size: {parameters['batch_size']}")
                tmp = tools.partition_batch(range(parameters["num_epochs"]), parameters["batch_size"])
                print(f"partition_batch: {tmp}")
                batch_dict = {view: [] for view in VIEWS.LIST}
                for _ in data_batch:
                    for view in VIEWS.LIST:
                        image_index = 0
                        # F: they use different augmentation for each view
                        if parameters["augmentation"]:
                            image_index = random_number_generator.randint(low=0, high=len(datum[view]))
                        cropped_image, cropped_heatmaps = loading.augment_and_normalize_image(
                            image=loaded_image_dict[view][image_index], 
                            auxiliary_image=loaded_heatmaps_dict[view][image_index],
                            view=view,
                            best_center=datum["best_center"][view][image_index],
                            random_number_generator=random_number_generator,
                            augmentation=parameters["augmentation"],
                            max_crop_noise=parameters["max_crop_noise"],
                            max_crop_size_noise=parameters["max_crop_size_noise"],
                        )
                        # print(f"cropped_image: {image_index} of m in minibatch: {_} size: {cropped_image.shape}")

                        else:
                            # F: e.g. batch_dict[view][:,:,1] is the first heatmap 
                            batch_dict[view].append(np.concatenate([
                                cropped_image[:, :, np.newaxis],
                                cropped_heatmaps,
                            ], axis=2))

                        # print(f"batch_dict_view: {len(batch_dict[view])}")
                        # print(f"batch_img_size: {batch_dict[view][_].shape}")


                tensor_batch = {
                    # F: result of np.stack has one more dimension:
                    # F: 4 dimensions: batch_data_i, y_pixels, x_pixels, channels 
                    view: torch.tensor(np.stack(batch_dict[view])).permute(0, 3, 1, 2).to(device)
                    for view in VIEWS.LIST
                }


                # print(f"layer_names: {model.state_dict().keys()}")
                # Print model's state_dict
                output = model(tensor_batch)
                batch_predictions = compute_batch_predictions(output)
                print(f"batch_predictions: \n {batch_predictions}")
                print(len(batch_predictions.keys()))
                # F: they pick value 1, disregarding value 0 which is the complement of that (prob = 1) 
                pred_df = pd.DataFrame({k: v[:, 1] for k, v in batch_predictions.items()})
                pred_df.columns.names = ["label", "view_angle"]
                # print(f"pred_df.head: {pred_df.head()}")
                # F: complicated way of grouping by label and calculating the mean                
                predictions = pred_df.T.reset_index().groupby("label").mean().T[LABELS.LIST].values
                predictions_for_datum.append(predictions)
                print(f"predictions: {predictions}")
                exit()
            predictions_ls.append(np.mean(np.concatenate(predictions_for_datum, axis=0), axis=0))
Ejemplo n.º 7
0
def run_model(model, device, exam_list, parameters):
    """
    Returns predictions of image only model or image+heatmaps model.
    Prediction for each exam is averaged for a given number of epochs.
    """
    random_number_generator = np.random.RandomState(parameters["seed"])

    image_extension = ".hdf5" if parameters["use_hdf5"] else ".png"

    with torch.no_grad():
        predictions_ls = []
        for datum in tqdm.tqdm(exam_list):
            predictions_for_datum = []
            # F: VIEWS is an adhoc class
            # F: VIEWS.LIST : list of views as string
            loaded_image_dict = {view: [] for view in VIEWS.LIST}
            loaded_heatmaps_dict = {view: [] for view in VIEWS.LIST}
            for view in VIEWS.LIST:
                # F: for one exam, all images of a specific view
                for short_file_path in datum[view]:
                    loaded_image = loading.load_image(
                        image_path=os.path.join(
                            parameters["image_path"],
                            short_file_path + image_extension),
                        view=view,
                        horizontal_flip=datum["horizontal_flip"],
                    )
                    if parameters["use_heatmaps"]:
                        loaded_heatmaps = loading.load_heatmaps(
                            benign_heatmap_path=os.path.join(
                                parameters["heatmaps_path"], "heatmap_benign",
                                short_file_path + ".hdf5"),
                            malignant_heatmap_path=os.path.join(
                                parameters["heatmaps_path"],
                                "heatmap_malignant",
                                short_file_path + ".hdf5"),
                            view=view,
                            horizontal_flip=datum["horizontal_flip"],
                        )
                    else:
                        loaded_heatmaps = None

                    loaded_image_dict[view].append(loaded_image)
                    loaded_heatmaps_dict[view].append(loaded_heatmaps)
            # print(f"length loaded_image: {len(loaded_image_dict)}")
            for data_batch in tools.partition_batch(
                    range(parameters["num_epochs"]), parameters["batch_size"]):
                # print(f"num_epochs: {parameters['num_epochs']}")
                # print(f"batch_size: {parameters['batch_size']}")
                tmp = tools.partition_batch(range(parameters["num_epochs"]),
                                            parameters["batch_size"])
                # print(f"partition_batch: {tmp}")
                batch_dict = {view: [] for view in VIEWS.LIST}
                for _ in data_batch:
                    for view in VIEWS.LIST:
                        image_index = 0
                        # F: they use different augmentation for each view
                        if parameters["augmentation"]:
                            image_index = random_number_generator.randint(
                                low=0, high=len(datum[view]))

                        cropped_image, cropped_heatmaps = loading.augment_and_normalize_image(
                            image=loaded_image_dict[view][image_index],
                            auxiliary_image=loaded_heatmaps_dict[view]
                            [image_index],
                            view=view,
                            best_center=datum["best_center"][view]
                            [image_index],
                            random_number_generator=random_number_generator,
                            augmentation=parameters["augmentation"],
                            max_crop_noise=parameters["max_crop_noise"],
                            max_crop_size_noise=parameters[
                                "max_crop_size_noise"],
                        )
                        # print(f"cropped_image: {image_index} of m in minibatch: {_} size: {cropped_image.shape}")

                        if loaded_heatmaps_dict[view][image_index] is None:
                            batch_dict[view].append(cropped_image[:, :,
                                                                  np.newaxis])
                            # F: e.g. batch_dict[view][_].shape = (2974, 1748, 1)

                        else:
                            # F: e.g. batch_dict[view][:,:,1] is the first heatmap
                            batch_dict[view].append(
                                np.concatenate([
                                    cropped_image[:, :, np.newaxis],
                                    cropped_heatmaps,
                                ],
                                               axis=2))

                        # print(f"batch_dict_view: {len(batch_dict[view])}")
                        # print(f"batch_img_size: {batch_dict[view][_].shape}")

                tensor_batch = {
                    # F: result of np.stack has one more dimension:
                    # F: 4 dimensions: batch_data_i, y_pixels, x_pixels, channels
                    view: torch.tensor(np.stack(batch_dict[view])).permute(
                        0, 3, 1, 2).to(device)
                    for view in VIEWS.LIST
                }

                # print(f"layer_names: {model.state_dict().keys()}")
                # Print model's state_dict
                # print("Model's state_dict:")
                # for param_tensor in model.state_dict():
                # print(param_tensor, "\t", model.state_dict()[param_tensor].size())
                output = model(tensor_batch)
                batch_predictions = compute_batch_predictions(
                    output, mode=parameters["model_mode"])
                # print(f"batch_predictions: \n {batch_predictions}")
                # print(len(batch_predictions.keys()))
                # F: they pick value 1, disregarding value 0 which is the complement of that (prob = 1)
                pred_df = pd.DataFrame(
                    {k: v[:, 1]
                     for k, v in batch_predictions.items()})
                pred_df.columns.names = ["label", "view_angle"]
                # print(f"pred_df.head: {pred_df.head()}")
                # F: complicated way of grouping by label and calculating the mean
                predictions = pred_df.T.reset_index().groupby(
                    "label").mean().T[LABELS.LIST].values
                predictions_for_datum.append(predictions)
                # print(f"predictions: {predictions}")
            predictions_ls.append(
                np.mean(np.concatenate(predictions_for_datum, axis=0), axis=0))

    return np.array(predictions_ls)