示例#1
0
    def save_predictions(self):
        """
        Saves model predicted images in results directory
        """
        print("Save image predictions")

        # Prepare model for inference
        self.model.eval()
        inference_agent = UNetInferenceAgent(model=self.model, device=self.device)
        # Get first test data volume
        first_test_data = self.test_data[0]
        # Get the model predictions
        pred_label = inference_agent.single_volume_inference(first_test_data["image"])
        # Calculate middle slice indice
        axial_middle_index = int(pred_label.shape[0] / 2)
        # Create middle slice images for these volumes for mri image, target and predictions for this epoch
        image = (first_test_data["image"][axial_middle_index] * 255).astype(np.uint8)
        label = (first_test_data["seg"][axial_middle_index] * 255).astype(np.uint8)
        prediction = (pred_label[axial_middle_index] * 255).astype(np.uint8)
        # Convert from numpy array to image objects
        image = Image.fromarray(image)
        label = Image.fromarray(label)
        prediction = Image.fromarray(prediction)
        # Save images
        image.save(self.out_images_dir + '/Epoch' + str(self.epoch) + '-image.png', cmap='Greys')
        label.save(self.out_images_dir + '/Epoch' + str(self.epoch) + '-label.png', cmap='Greys')
        prediction.save(self.out_images_dir + '/Epoch' + str(self.epoch) + '-prediction.png', cmap='Greys')
    def run_test(self):
        """
        This runs test cycle on the test dataset.
        Note that process and evaluations are quite different
        Here we are computing a lot more metrics and returning
        a dictionary that could later be persisted as JSON
        """
        print("Testing...")
        self.model.eval()

        # In this method we will be computing metrics that are relevant to the task of 3D volume
        # segmentation. Therefore, unlike train and validation methods, we will do inferences
        # on full 3D volumes, much like we will be doing it when we deploy the model in the 
        # clinical environment. 

        
        inference_agent = UNetInferenceAgent(model=self.model, device=self.device)

        out_dict = {}
        out_dict["volume_stats"] = []
        dc_list = []
        jc_list = []

        # for every in test set
        for i, x in enumerate(self.test_data):
            pred_label = inference_agent.single_volume_inference(x["image"])

            # We compute and report Dice and Jaccard similarity coefficients which 
            # assess how close our volumes are to each other

            
            # on Wikipedia. If you completed it
            # correctly (and if you picked your train/val/test split right ;)),
            # your average Jaccard on your test set should be around 0.80

            dc = Dice3d(pred_label, x["seg"])
            jc = Jaccard3d(pred_label, x["seg"])
            dc_list.append(dc)
            jc_list.append(jc)

            # STAND-OUT SUGGESTION: By way of exercise, consider also outputting:
            # * Sensitivity and specificity (and explain semantic meaning in terms of 
            #   under/over segmenting)
            # * Dice-per-slice and render combined slices with lowest and highest DpS
            # * Dice per class (anterior/posterior)

            out_dict["volume_stats"].append({
                "filename": x['filename'],
                "dice": dc,
                "jaccard": jc
                })
            print(f"{x['filename']} Dice {dc:.4f}. {100*(i+1)/len(self.test_data):.2f}% complete")

        out_dict["overall"] = {
            "mean_dice": np.mean(dc_list),
            "mean_jaccard": np.mean(jc_list)}

        print("\nTesting complete.")
        return out_dict
示例#3
0
    def run_test(self):
        """
        This runs test cycle on the test dataset.
        Note that process and evaluations are quite different
        Here we are computing a lot more metrics and returning
        a dictionary that could later be persisted as JSON
        """
        print("Testing...")
        self.model.eval()

        # In this method we will be computing metrics that are relevant to the task of 3D volume
        # segmentation. Therefore, unlike train and validation methods, we will do inferences
        # on full 3D volumes, much like we will be doing it when we deploy the model in the
        # clinical environment.

        # Instantiate inference agent
        inference_agent = UNetInferenceAgent(model=self.model, device=self.device)

        out_dict = {}
        out_dict["volume_stats"] = []
        dc_list = []
        jc_list = []

        # for every in test set
        for i, x in enumerate(self.test_data):
            pred_label = inference_agent.single_volume_inference(x["image"])

            # We compute and report Dice and Jaccard similarity coefficients which
            # assess how close our volumes are to each other

            dc = Dice3d(pred_label, x["seg"])
            jc = Jaccard3d(pred_label, x["seg"])
            dc_list.append(dc)
            jc_list.append(jc)

            out_dict["volume_stats"].append({
                "filename": x['filename'],
                "dice": dc,
                "jaccard": jc
                })
            print(f"{x['filename']} Dice {dc:.4f} Jaccard {dc:.4f} {100*(i+1)/len(self.test_data):.2f}% complete")

        mean_dice = np.mean(dc_list)
        mean_jaccard = np.mean(jc_list)

        print(f" Mean Dice {mean_dice:.4f} Mean Jaccard {mean_jaccard:.4f}")

        out_dict["overall"] = {
            "mean_dice": mean_dice,
            "mean_jaccard": mean_jaccard}

        print("\nTesting complete.")
        return out_dict
示例#4
0
    def run_test(self):
        """
        This runs test cycle on the test dataset.
        Note that process and evaluations are quite different
        Here we are computing a lot more metrics and returning
        a dictionary that could later be persisted as JSON
        """
        print("Testing...")
        self.model.eval()

        inference_agent = UNetInferenceAgent(model=self.model,
                                             device=self.device)

        out_dict = {}
        out_dict["volume_stats"] = []
        dc_list = []
        jc_list = []

        # for every in test set
        for i, x in enumerate(self.test_data):
            pred_label = inference_agent.single_volume_inference(x["image"])

            # We compute and report Dice and Jaccard similarity coefficients which
            # assess how close our volumes are to each other

            dc = Dice3d(pred_label, x["seg"])
            jc = Jaccard3d(pred_label, x["seg"])
            dc_list.append(dc)
            jc_list.append(jc)

            # STAND-OUT SUGGESTION: By way of exercise, consider also outputting:
            # * Sensitivity and specificity (and explain semantic meaning in terms of
            #   under/over segmenting)
            # * Dice-per-slice and render combined slices with lowest and highest DpS
            # * Dice per class (anterior/posterior)

            out_dict["volume_stats"].append({
                "filename": x['filename'],
                "dice": dc,
                "jaccard": jc
            })
            print(
                f"{x['filename']} Dice {dc:.4f}. {100*(i+1)/len(self.test_data):.2f}% complete"
            )

        out_dict["overall"] = {
            "mean_dice": np.mean(dc_list),
            "mean_jaccard": np.mean(jc_list)
        }

        print("\nTesting complete.")
        return out_dict
示例#5
0
     if os.path.isdir(os.path.join(sys.argv[1], d))
 ]
 # Get the latest directory
 study_dir = sorted(subdirs,
                    key=lambda dir: os.stat(dir).st_mtime,
                    reverse=True)
 print(
     f"Looking for series to run inference on in directory {study_dir}...")
 # TASK: get_series_for_inference is not complete. Go and complete it
 volume, header = load_dicom_volume_as_numpy_from_list(
     get_series_for_inference(study_dir))
 print(f"Found series of {volume.shape[2]} axial slices")
 print("HippoVolume.AI: Running inference...")
 # TASK: Use the UNetInferenceAgent class and model parameter file from the previous section
 inference_agent = UNetInferenceAgent(
     device="cpu",
     #parameter_file_path=r"./model/model.pth") 20201213
     parameter_file_path=r"/home/workspace/src/model/model.pth")
 # Run inference
 # TASK: single_volume_inference_unpadded takes a volume of arbitrary size
 # and reshapes y and z dimensions to the patch size used by the model before
 # running inference. Your job is to implement it.
 pred_label = inference_agent.single_volume_inference_unpadded(
     np.array(volume))
 # TASK: get_predicted_volumes is not complete. Go and complete it
 pred_volumes = get_predicted_volumes(pred_label)
 # Create and save the report
 print("Creating and pushing report...")
 report_save_path = r"/home/workspace/out/report.dcm"
 # TASK: create_report is not complete. Go and complete it.
 # STAND OUT SUGGESTION: save_report_as_dcm has some suggestions if you want to expand your
 # knowledge of DICOM format
    def run_test(self):
        """
        This runs test cycle on the test dataset.
        Note that process and evaluations are quite different
        Here we are computing a lot more metrics and returning
        a dictionary that could later be persisted as JSON
        """
        print("Testing...")
        self.model.eval()

        # In this method we will be computing metrics that are relevant to the task of 3D volume
        # segmentation. Therefore, unlike train and validation methods, we will do inferences
        # on full 3D volumes, much like we will be doing it when we deploy the model in the
        # clinical environment.

        # TASK: Inference Agent is not complete. Go and finish it. Feel free to test the class
        # in a module of your own by running it against one of the data samples
        param_file_path = self.weights_name if self.weights_name else self.model_name
        if self.weights_name:
            param_file_path = self.weights_name
        elif self.model_name:
            param_file_path = self.model_name + ".pth"

        inference_agent = UNetInferenceAgent(parameter_file_path=os.path.join(
            self.out_dir, param_file_path),
                                             model=self.model,
                                             device=self.device)

        out_dict = {}
        out_dict["volume_stats"] = []
        dice_list = []
        jaccard_list = []
        sensitivity_list = []
        specificity_list = []
        # for every in test set
        for i, x in enumerate(self.test_data):
            pred_label = inference_agent.single_volume_inference(x["image"])

            # We compute and report Dice and Jaccard similarity coefficients which
            # assess how close our volumes are to each other

            # TASK: Dice3D and Jaccard3D functions are not implemented.
            #  Complete the implementation as we discussed
            # in one of the course lessons, you can look up definition of Jaccard index
            # on Wikipedia. If you completed it
            # correctly (and if you picked your train/val/test split right ;)),
            # your average Jaccard on your test set should be around 0.80

            results = perf_metrics(pred_label, x["seg"])

            dice = results["dice"]
            jaccard = results["jaccard"]
            sens = results["sens"]
            spec = results["spec"]

            dice_list.append(dice)
            jaccard_list.append(jaccard)
            sensitivity_list.append(sens)
            specificity_list.append(spec)

            out_dict["volume_stats"].append({
                "filename": x['filename'],
                "dice": dice,
                "jaccard": jaccard,
                "sensitivty": sens,
                "specificity": spec
            })

            print(
                f"{x['filename']} Dice {dice:.4f}. {100*(i+1)/len(self.test_data):.2f}% complete"
            )

        out_dict["overall"] = {
            "mean_dice": np.mean(dice_list),
            "mean_jaccard": np.mean(jaccard_list),
            "mean_sensitivity": np.mean(sensitivity_list),
            "mean_specificity": np.mean(specificity_list)
        }

        print("\nTesting complete.")
        return out_dict
    study_dir = sorted(subdirs, key=lambda dir: os.stat(dir).st_mtime, reverse=True)[0]

    print(f"Looking for series to run inference on in directory {study_dir}...")

    # DONE: get_series_for_inference is not complete. Go and complete it
    volume, header = load_dicom_volume_as_numpy_from_list(get_series_for_inference(study_dir))
    print(f"Found series of {volume.shape[2]} axial slices")

    print("HippoVolume.AI: Running inference...")
    # DONE: Use the UNetInferenceAgent class and model parameter file from the previous section
    modelpath = '/home/workspace/udacity_hippocampal/section2/out/model.pth'
    if not os.path.exists(modelpath):
        raise FileNotFoundError

    inference_agent = UNetInferenceAgent(
        device="cpu",
        parameter_file_path=modelpath)

    # Run inference
    # DONE: single_volume_inference_unpadded takes a volume of arbitrary size 
    # and reshapes y and z dimensions to the patch size used by the model before 
    # running inference. Your job is to implement it.
    pred_label = inference_agent.single_volume_inference_unpadded(np.array(volume))
    # DONE: get_predicted_volumes is not complete. Go and complete it
    pred_volumes = get_predicted_volumes(pred_label)

    # Create and save the report
    print("Creating and pushing report...")
    report_save_path = r"/home/workspace/udacity_hippocampal/section3/out/report.dcm"
    # DONE: create_report is not complete. Go and complete it. 
    # STAND OUT SUGGESTION: save_report_as_dcm has some suggestions if you want to expand your
    # one subdirectory contains a full study
    subdirs = [os.path.join(sys.argv[1], d) for d in os.listdir(sys.argv[1]) if
                os.path.isdir(os.path.join(sys.argv[1], d))]

    # Get the latest directory
    study_dir = sorted(subdirs, key=lambda dir: os.stat(dir).st_mtime, reverse=True)[0]

    print(f"Looking for series to run inference on in directory {study_dir}...")

    volume, header = load_dicom_volume_as_numpy_from_list(get_series_for_inference(study_dir))
    print(f"Found series of {volume.shape[2]} axial slices")

    print("HippoVolume.AI: Running inference...")
    # Use the UNetInferenceAgent class and model parameter file from the previous section
    inference_agent = UNetInferenceAgent(
        device="cpu",
        parameter_file_path=r"/home/workspace/section2/out/final_model/model.pth")

    # Run inference
    # single_volume_inference_unpadded takes a volume of arbitrary size
    # and reshapes y and z dimensions to the patch size used by the model before
    # running inference.
    pred_label = inference_agent.single_volume_inference_unpadded(np.array(volume))
    pred_volumes = get_predicted_volumes(pred_label)

    # Create and save the report
    print("Creating and pushing report...")
    report_save_path = r"/home/workspace/reports/report.dcm"
    report_img = create_report(pred_volumes, header, volume, pred_label)
    save_report_as_dcm(header, report_img, report_save_path)
    study_dir = sorted(subdirs,
                       key=lambda dir: os.stat(dir).st_mtime,
                       reverse=True)[0]

    print(
        f"Looking for series to run inference on in directory {study_dir}...")

    # TASK: get_series_for_inference is not complete. Go and complete it
    volume, header = load_dicom_volume_as_numpy_from_list(
        get_series_for_inference(study_dir))
    print(f"Found series of {volume.shape[2]} axial slices")

    print("HippoVolume.AI: Running inference...")
    # TASK: Use the UNetInferenceAgent class and model parameter file from the previous section
    inference_agent = UNetInferenceAgent(
        device="cpu",
        parameter_file_path=r"./2020-06-11_1327_Basic_unet/model.pth")

    # Run inference
    # TASK: single_volume_inference_unpadded takes a volume of arbitrary size
    # and reshapes y and z dimensions to the patch size used by the model before
    # running inference. Your job is to implement it.
    pred_label = inference_agent.single_volume_inference_unpadded(
        np.array(volume))
    # TASK: get_predicted_volumes is not complete. Go and complete it
    pred_volumes = get_predicted_volumes(pred_label)

    # Create and save the report
    print("Creating and pushing report...")
    report_save_path = r"./report.dcm"
    # TASK: create_report is not complete. Go and complete it.
示例#10
0
    sp.communicate()


if __name__ == "__main__":

    study_dir = "/data7/common/inqlee0704/HippoSeg/data/TestVolumes/Study1/13_HCropVolume"

    print(
        f"Looking for series to run inference on in directory {study_dir}...")

    volume, header = load_dicom_volume_as_numpy_from_list(
        get_series_for_inference(study_dir))
    print(f"Found series of {volume.shape[2]} axial slices")

    print("HippoVolume.AI: Running inference...")
    inference_agent = UNetInferenceAgent(
        device="cpu",
        parameter_file_path=
        r"/data7/common/inqlee0704/HippoSeg/train/RESULTS/2020-07-16_0717_Basic_unet/model.pth"
    )

    pred_label = inference_agent.single_volume_inference_unpadded(
        np.array(volume))
    pred_volumes = get_predicted_volumes(pred_label)

    # new_volume = med_reshape(volume,pred_label.shape)
    # print(new_volume.shape)
    # print(pred_label.shape)
    # img = nib.Nifti1Image(new_volume,affine=np.eye(4))
    # img.to_filename('YOUR PATH/volume.nii.gz')
示例#11
0
    def run_test(self):
        """
        This runs test cycle on the test dataset.
        Note that process and evaluations are quite different
        Here we are computing a lot more metrics and returning
        a dictionary that could later be persisted as JSON
        """
        print("Testing...")

        model_dir = "C://Data"
        self.load_model_parameters(path=model_dir)
        self.model.eval()

        # In this method we will be computing metrics that are relevant to the task of 3D volume
        # segmentation. Therefore, unlike train and validation methods, we will do inferences
        # on full 3D volumes, much like we will be doing it when we deploy the model in the
        # clinical environment.

        # TASK: Inference Agent is not complete. Go and finish it. Feel free to test the class
        # in a module of your own by running it against one of the data samples
        inference_agent = UNetInferenceAgent(model=self.model,
                                             device=self.device)
        print("Testing...2")
        out_dict = {}
        out_dict["volume_stats"] = []
        dc_list = []
        jc_list = []
        lr_list = []
        print(len((self.test_data)))
        # for every in test set
        for i, x in enumerate(self.test_data):
            print("Testing...loop")
            pred_label = inference_agent.single_volume_inference(x["image"])
            #print(np.nonzero(x["seg"]))
            #print(np.nonzero(pred_label))
            # We compute and report Dice and Jaccard similarity coefficients which
            # assess how close our volumes are to each other

            # TASK: Dice3D and Jaccard3D functions are not implemented.
            #  Complete the implementation as we discussed
            # in one of the course lessons, you can look up definition of Jaccard index
            # on Wikipedia. If you completed it
            # correctly (and if you picked your train/val/test split right ;)),
            # your average Jaccard on your test set should be around 0.80

            dc = Dice3d(pred_label, x["seg"])
            jc = Jaccard3d(pred_label, x["seg"])
            lr = Likelihoodratio(pred_label, x["seg"])
            dc_list.append(dc)
            jc_list.append(jc)
            lr_list.append(lr)

            # STAND-OUT SUGGESTION: By way of exercise, consider also outputting:
            # * Sensitivity and specificity (and explain semantic meaning in terms of
            #   under/over segmenting)
            # * Dice-per-slice and render combined slices with lowest and highest DpS
            # * Dice per class (anterior/posterior)

            out_dict["volume_stats"].append({
                "filename": x['filename'],
                "dice": dc,
                "jaccard": jc,
                "likelihood": lr
            })
            print(
                f"{x['filename']} Dice {dc:.4f}. {100*(i+1)/len(self.test_data):.2f}% complete"
            )

        out_dict["overall"] = {
            "mean_dice": np.mean(dc_list),
            "mean_jaccard": np.mean(jc_list),
            "mean_likelihood": np.mean(lr_list)
        }

        print("\nTesting complete.")
        return out_dict
示例#12
0
                       key=lambda dir: os.stat(dir).st_mtime,
                       reverse=True)[1]  #按时间降序排列
    print(f"study:{study_dir}")
    # /data/TestVolumes/Study1/37916-T2reg-88910
    print(
        f"Looking for series to run inference on in directory {study_dir}...")

    # get_series_for_inference is not complete. Go and complete it
    volume, header = load_dicom_volume_as_numpy_from_list(
        get_series_for_inference(study_dir))
    print(f"Found series of {volume.shape[2]} axial slices")

    print("HippoVolume.AI: Running inference...")
    # Use the UNetInferenceAgent class and model parameter file from the previous section
    inference_agent = UNetInferenceAgent(
        device="cpu",
        parameter_file_path=
        r"section2/out/Result/2020-08-02_1436_Basic_unet/model.pth")

    # Run inference
    # single_volume_inference_unpadded takes a volume of arbitrary size
    # and reshapes y and z dimensions to the patch size used by the model before
    # running inference.
    pred_label = inference_agent.single_volume_inference_unpadded(
        np.array(volume))
    # get_predicted_volumes is not complete. Go and complete it
    pred_volumes = get_predicted_volumes(pred_label)

    # Create and save the report
    print("Creating and pushing report...")
    report_save_path = r"/home/workspace/out/report.dcm"
示例#13
0
    # Get the latest directory
    study_dir = sorted(subdirs,
                       key=lambda dir: os.stat(dir).st_mtime,
                       reverse=True)[0]

    print(
        f"Looking for series to run inference on in directory {study_dir}...")

    # TASK: get_series_for_inference is not complete. Go and complete it
    volume, header = load_dicom_volume_as_numpy_from_list(
        get_series_for_inference(study_dir))
    print(f"Found series of {volume.shape[2]} axial slices")

    print("HippoVolume.AI: Running inference...")
    # TASK: Use the UNetInferenceAgent class and model parameter file from the previous section
    inference_agent = UNetInferenceAgent(
        device="cpu", parameter_file_path=r"<PATH TO PARAMETER FILE>")

    # Run inference
    # TASK: single_volume_inference_unpadded takes a volume of arbitrary size
    # and reshapes y and z dimensions to the patch size used by the model before
    # running inference. Your job is to implement it.
    pred_label = inference_agent.single_volume_inference_unpadded(
        np.array(volume))
    # TASK: get_predicted_volumes is not complete. Go and complete it
    pred_volumes = get_predicted_volumes(pred_label)

    # Create and save the report
    print("Creating and pushing report...")
    report_save_path = r"<TEMPORARY PATH TO SAVE YOUR REPORT FILE>"
    # TASK: create_report is not complete. Go and complete it.
    # STAND OUT SUGGESTION: save_report_as_dcm has some suggestions if you want to expand your
示例#14
0
    study_dir = sorted(subdirs,
                       key=lambda dir: os.stat(dir).st_mtime,
                       reverse=True)[0]

    print(
        f"Looking for series to run inference on in directory {study_dir}...")

    # TASK: get_series_for_inference is not complete. Go and complete it
    volume, header = load_dicom_volume_as_numpy_from_list(
        get_series_for_inference(study_dir))
    print(f"Found series of {volume.shape[2]} axial slices")

    print("HippoVolume.AI: Running inference...")
    # TASK: Use the UNetInferenceAgent class and model parameter file from the previous section
    inference_agent = UNetInferenceAgent(
        device="cpu",
        parameter_file_path=os.path.abspath(
            os.path.join(*['.', 'model', 'model.pth'])))

    # Run inference
    # TASK: single_volume_inference_unpadded takes a volume of arbitrary size
    # and reshapes y and z dimensions to the patch size used by the model before
    # running inference. Your job is to implement it.
    pred_label = inference_agent.single_volume_inference_unpadded(
        np.array(volume))
    # TASK: get_predicted_volumes is not complete. Go and complete it
    pred_volumes = get_predicted_volumes(pred_label)

    # Create and save the report
    print("Creating and pushing report...")
    file_name = f'report_{time.strftime("%Y-%m-%d_%H%M", time.localtime())}.dcm'
    report_save_path = os.path.abspath(os.path.join(*['..', 'out', file_name]))
    # Get t latest directory
    study_dir = sorted(subdirs, key=lambda dir: os.stat(dir).st_mtime, reverse=True) #[0]
    study_dir = [dir for dir in study_dir if 'HCropVolume' in dir][0]
    print(study_dir)

    print(f"Looking for series to run inference on in direory {study_dir}...")

    # TASK_COMPLETE : get_series_for_inference is not comete. Go and complete it
#     study_dir = [dir for dir, subdirs, files in os.walk(path) if '13_HCropVolume' in dir]
    volume, header = load_dicom_volume_as_numpy_from_list(get_series_for_inference(study_dir))
    print(f"Found series of {volume.shape[2]} axial slices")

    print("HippoVome.AI: Running inference...")
    # TASK_COMPLETE : Use the UNetInferenceAgent class and model paramet file from the previous section
    inference_agent = UNetInferenceAgent(
        device="cpu",
        parameter_file_path=r"")

    # Run inference
    # TASK_COMPLETE : single_volume_inference_unpadded takes a volume of arbitrary size 
    # and reshapes y and z dimensionto the patch size used by the model before 
    # nning inference. Your job is to implement it.
    pred_label = inference_agent.single_volume_inference_unpadded(np.array(volume), patch_size=64)
    # TASK_COMPLETE : get_pricted_volumes is not complete. Go and complete it
    pred_volumes = get_predicted_volumes(pred_label)

    # Create and savehe report
    print("Creating and pushing report...")
    report_save_path = r"../out/report.dcm"
  # TASK_COMPLETE : create_report is not complete. Go and complete it. 
    # STAND OUT SUGGESTION: savreport_as_dcm has some suggestns if you want to expand your
示例#16
0
    def run_test(self):
        """
        This runs test cycle on the test dataset.
        Note that process and evaluations are quite different
        Here we are computing a lot more metrics and returning
        a dictionary that could later be persisted as JSON
        """
        print("Testing...")
        # load_model_parameters('/home/dev/Documents/github/nd320-c3-3d-imaging-starter/section2/src/2020-06-08_1647_Basic_unet/model.pth')

        self.model.eval()

        # In this method we will be computing metrics that are relevant to the task of 3D volume
        # segmentation. Therefore, unlike train and validation methods, we will do inferences
        # on full 3D volumes, much like we will be doing it when we deploy the model in the
        # clinical environment.

        # TASK: Inference Agent is not complete. Go and finish it. Feel free to test the class
        # in a module of your own by running it against one of the data samples
        inference_agent = UNetInferenceAgent(model=self.model,
                                             device=self.device)

        out_dict = {}
        out_dict["volume_stats"] = []
        dc_list = []
        jc_list = []
        # print('self.test_data.shape: ', self.test_data.shape)
        # for every in test set
        for i, x in enumerate(self.test_data):
            print('filename being tested: ', x["filename"])
            if (x["filename"] == 'hippocampus_150.nii.gz'):

                print('1')

                pred_label = inference_agent.single_volume_inference(
                    x["image"])

                pickle.dump(x["image"], open("image_150.p", "wb"))
                pickle.dump(pred_label, open("label_150.p", "wb"))

                # We compute and report Dice and Jaccard similarity coefficients which
                # assess how close our volumes are to each other

                # TASK: Dice3D and Jaccard3D functions are not implemented.
                #  Complete the implementation as we discussed
                # in one of the course lessons, you can look up definition of Jaccard index
                # on Wikipedia. If you completed it
                # correctly (and if you picked your train/val/test split right ;)),
                # your average Jaccard on your test set should be around 0.80

                dc = Dice3d(pred_label, x["seg"])
                jc = Jaccard3d(pred_label, x["seg"])
                dc_list.append(dc)
                jc_list.append(jc)

                # STAND-OUT SUGGESTION: By way of exercise, consider also outputting:
                # * Sensitivity and specificity (and explain semantic meaning in terms of
                #   under/over segmenting)
                # * Dice-per-slice and render combined slices with lowest and highest DpS
                # * Dice per class (anterior/posterior)

                out_dict["volume_stats"].append({
                    "filename": x['filename'],
                    "dice": dc,
                    "jaccard": jc
                })
                print(
                    f"{x['filename']} Dice {dc:.4f} and Jaccard: {jc:.4f} . {100*(i+1)/len(self.test_data):.2f}% complete"
                )
                #break

                out_dict["overall"] = {
                    "mean_dice": np.mean(dc_list),
                    "mean_jaccard": np.mean(jc_list)
                }

        print("\nTesting complete.")
        return out_dict
示例#17
0
    def run_test(self):
        """
        This runs test cycle on the test dataset.
        Note that process and evaluations are quite different
        Here we are computing a lot more metrics and returning
        a dictionary that could later be persisted as JSON
        """
        self.model.eval()
        # In this method we will be computing metrics that are relevant to the task of 3D volume
        # segmentation. Therefore, unlike train and validation methods, we will do inferences
        # on full 3D volumes, much like we will be doing it when we deploy the model in the
        # clinical environment.

        # TASK: Inference Agent is not complete. Go and finish it. Feel free to test the class
        # in a module of your own by running it against one of the data samples

        inference_agent = UNetInferenceAgent(model=self.model,
                                             device=self.device)

        out_dict = {}
        out_dict["volume_stats"] = []
        dc_list = []
        jc_list = []
        sens_list = []
        spec_list = []
        # f1_list = []

        # for every in test set
        for i, x in enumerate(self.test_data):

            gt = x["seg"]  # test image ground truth
            ti = x["image"]  # test image data
            original_filename = x['filename']  # test image file name
            pred_filename = 'predicted_' + x[
                'filename']  # test image file name

            file_path = os.path.join("..\data", "images", original_filename)

            original_images = nib.load(file_path)

            mask3d = np.zeros(ti.shape)
            pred = inference_agent.single_volume_inference(ti)
            mask3d = np.array(torch.argmax(pred, dim=1))

            # Save predicted labels to local environment for further verification
            # with the original image NIFTI coordinate system
            pred_coord = nib.Nifti1Image(mask3d, original_images.affine)
            pred_out_path = os.path.join("..\data", "preds")
            pred_out_file = os.path.join(pred_out_path, pred_filename)

            if not os.path.exists(pred_out_path):
                os.makedirs(pred_out_path)

            nib.save(pred_coord, pred_out_file)

            # We compute and report Dice and Jaccard similarity coefficients which
            # assess how close our volumes are to each other

            # TASK: Dice3D and Jaccard3D functions are not implemented.
            # Complete the implementation as we discussed
            # in one of the course lessons, you can look up definition of Jaccard index
            # on Wikipedia. If you completed it
            # correctly (and if you picked your train/val/test split right ;)),
            # your average Jaccard on your test set should be around 0.80

            # a - prediction
            # b - ground truth
            dc = Dice3d(mask3d, gt)
            dc_list.append(dc)

            jc = Jaccard3d(mask3d, gt)
            jc_list.append(jc)

            sens = Sensitivity(mask3d, gt)
            sens_list.append(sens)

            spec = Specificity(mask3d, gt)
            spec_list.append(spec)

            # f1 = F1_score(mask3d, gt)
            # f1_list.append(f1)

            # STAND-OUT SUGGESTION: By way of exercise, consider also outputting:
            # * Sensitivity and specificity (and explain semantic meaning in terms of
            #   under/over segmenting)
            # * Dice-per-slice and render combined slices with lowest and highest DpS
            # * Dice per class (anterior/posterior)

            out_dict["volume_stats"].append({
                "filename": x['filename'],
                "dice": dc,
                "jaccard": jc,
                "sensitivity": sens,
                "specificity": spec,
                # "f1": f1,
            })

            print(
                f"{x['filename']} Dice {dc:.4f}, Jaccard {jc:.4f}, Sensitivity {sens:.4f}, and Specificity {spec:.4f}. {100*(i+1)/len(self.test_data):.2f}% complete"
            )

        avg_dc = np.mean(dc_list)
        avg_jc = np.mean(jc_list)
        avg_sens = np.mean(sens_list)
        avg_spec = np.mean(spec_list)
        # avg_f1 = np.mean(f1_list)

        out_dict["overall"] = {
            "mean_dice": avg_dc,
            "mean_jaccard": avg_jc,
            "mean_sensitivity": avg_sens,
            "mean_specificity": avg_spec,
            # "mean_f1": avg_f1,
        }

        print("\nTesting complete.")
        print("------------------------------")
        print(
            f"Average Dice {avg_dc:.4f}, Average Jaccard {avg_jc:.4f}, Average Sensitivity {avg_sens:.4f}, and Average Specificity {avg_spec:.4f}"
        )

        return out_dict
示例#18
0
    # one subdirectory contains a full study
    subdirs = [os.path.join(sys.argv[1], d) for d in os.listdir(sys.argv[1]) if
                os.path.isdir(os.path.join(sys.argv[1], d))]

    # Get the latest directory
    study_dir = sorted(subdirs, key=lambda dir: os.stat(dir).st_mtime, reverse=True)[0]

    print(f"Looking for series to run inference on in directory {study_dir}...")

    volume, header = load_dicom_volume_as_numpy_from_list(get_series_for_inference(study_dir))
    print(f"Found series of {volume.shape[2]} axial slices")

    print("HippoVolume.AI: Running inference...")
    # Use the UNetInferenceAgent class and model parameter file from the previous section
    inference_agent = UNetInferenceAgent(
        device="cpu",
        parameter_file_path=r"../../section2/out/model.pth")

    # Run inference
    # single_volume_inference_unpadded takes a volume of arbitrary size 
    # and reshapes y and z dimensions to the patch size used by the model before 
    # running inference. Your job is to implement it.
    t_start = time.time()
    pred_label = inference_agent.single_volume_inference_unpadded(np.array(volume))
    pred_volumes = get_predicted_volumes(pred_label)

    # Create and save the report
    print("Creating and pushing report...")
    report_save_path = r"../out/report.dcm"
    
    # STAND OUT SUGGESTION: save_report_as_dcm has some suggestions if you want to expand your
示例#19
0
    # Get the latest directory
    study_dir = sorted(subdirs,
                       key=lambda dir: os.stat(dir).st_mtime,
                       reverse=True)[0]

    print(
        f"Looking for series to run inference on in directory {study_dir}...")

    volume, header = load_dicom_volume_as_numpy_from_list(
        get_series_for_inference(study_dir))
    print(f"Found series of {volume.shape[2]} axial slices")

    print("HippoVolume.AI: Running inference...")
    # Use the UNetInferenceAgent class and model parameter file from the previous section
    inference_agent = UNetInferenceAgent(device="cpu",
                                         parameter_file_path=r"model.pth")

    # Run inference
    pred_label = inference_agent.single_volume_inference_unpadded(volume)

    pred_volumes = get_predicted_volumes(pred_label)

    # Create and save the report
    print("Creating and pushing report...")
    report_save_path = r"./inference_report.dcm"
    report_img = create_report(pred_volumes, header, volume, pred_label)
    save_report_as_dcm(header, report_img, report_save_path)

    # Send report to our storage archive
    # Command line string that will issue a DICOM C-STORE request to send our report
    # to our Orthanc server (that runs on port 4242 of the local machine), using storescu tool