def run(self):

        time_start = ph.start_timing()

        # if no mask is provided, use unity stacks for all masks
        is_unity_mask = np.alltrue([s.is_unity_mask() for s in self._stacks])
        if is_unity_mask:
            ph.print_info(
                "Keep unity masks for all stacks. "
                "It is recommended to provide anatomical masks for increased "
                "accuracy.")

        # Segmentation propagation
        if self._segmentation_propagator is not None and not is_unity_mask:

            stacks_to_propagate_indices = []
            for i in range(0, self._N_stacks):
                if self._stacks[i].is_unity_mask():
                    stacks_to_propagate_indices.append(i)

            stacks_to_propagate_indices = \
                list(set(stacks_to_propagate_indices) -
                     set([self._target_stack_index]))

            # Set target mask
            target = self._stacks[self._target_stack_index]

            # Propagate masks
            self._segmentation_propagator.set_template(target)
            for i in stacks_to_propagate_indices:
                ph.print_info(
                    "Propagate mask from stack '%s' to '%s'" %
                    (target.get_filename(), self._stacks[i].get_filename()))
                self._segmentation_propagator.set_stack(self._stacks[i])
                self._segmentation_propagator.run_segmentation_propagation()
                self._stacks[i] = \
                    self._segmentation_propagator.get_segmented_stack()

                # self._stacks[i].show(1)

        # Crop to mask
        if self._use_cropping_to_mask and not is_unity_mask:
            ph.print_info("Crop stacks to their masks")

            for i in range(0, self._N_stacks):
                self._stacks[i] = self._stacks[
                    i].get_cropped_stack_based_on_mask(
                        boundary_i=self._boundary_i,
                        boundary_j=self._boundary_j,
                        boundary_k=self._boundary_k,
                        unit=self._unit)

        # N4 Bias Field Correction
        if self._use_N4BiasFieldCorrector:
            bias_field_corrector = n4bfc.N4BiasFieldCorrection()

            for i in range(0, self._N_stacks):
                ph.print_info(
                    "Perform N4 Bias Field Correction for stack %d ... " %
                    (i + 1),
                    newline=False)
                bias_field_corrector.set_stack(self._stacks[i])
                bias_field_corrector.run_bias_field_correction()
                self._stacks[i] = \
                    bias_field_corrector.get_bias_field_corrected_stack()
                print("done")

        # Linear Intensity Correction
        if self._use_intensity_correction:
            stacks_to_intensity_correct = list(
                set(range(0, self._N_stacks)) -
                set([self._target_stack_index]))

            intensity_corrector = ic.IntensityCorrection()
            intensity_corrector.use_individual_slice_correction(False)
            intensity_corrector.use_reference_mask(True)
            intensity_corrector.use_verbose(True)

            for i in stacks_to_intensity_correct:
                stack = self._stacks[i]
                intensity_corrector.set_stack(stack)
                intensity_corrector.set_reference(
                    target.get_resampled_stack(resampling_grid=stack.sitk))
                # intensity_corrector.run_affine_intensity_correction()
                intensity_corrector.run_linear_intensity_correction()
                self._stacks[i] = \
                    intensity_corrector.get_intensity_corrected_stack()
        self._computational_time = ph.stop_timing(time_start)
Beispiel #2
0
def main():

    input_parser = InputArgparser(description="Convert NIfTI to DICOM image", )
    input_parser.add_filename(required=True)
    input_parser.add_option(
        option_string="--template",
        type=str,
        required=True,
        help="Template DICOM to extract relevant DICOM tags.",
    )
    input_parser.add_dir_output(required=True)
    input_parser.add_label(
        help="Label used for series description of DICOM output.",
        default="SRR_NiftyMIC")
    input_parser.add_argument(
        "--volume",
        "-volume",
        action='store_true',
        help="If given, the output DICOM file is combined as 3D volume")
    args = input_parser.parse_args()
    input_parser.print_arguments(args)

    # Prepare for final DICOM output
    ph.create_directory(args.dir_output)

    if args.volume:
        dir_output_2d_slices = os.path.join(DIR_TMP, "dicom_slices")
    else:
        dir_output_2d_slices = os.path.join(args.dir_output, args.label)
    ph.create_directory(dir_output_2d_slices, delete_files=True)

    # Create set of 2D DICOM slices from 3D NIfTI image
    # (correct image orientation!)
    ph.print_title("Create set of 2D DICOM slices from 3D NIfTI image")
    cmd_args = ["nifti2dicom"]
    cmd_args.append("-i '%s'" % args.filename)
    cmd_args.append("-o '%s'" % dir_output_2d_slices)
    cmd_args.append("-d '%s'" % args.template)
    cmd_args.append("--prefix ''")
    cmd_args.append("--seriesdescription '%s'" % args.label)
    cmd_args.append("--accessionnumber '%s'" % ACCESSION_NUMBER)
    cmd_args.append("--seriesnumber '%s'" % SERIES_NUMBER)
    cmd_args.append("--institutionname '%s'" % INSTITUTION_NAME)

    # Overwrite default "nifti2dicom" tags which would be added otherwise
    # (no deletion/update with empty '' sufficient to overwrite them)
    cmd_args.append("--manufacturersmodelname '%s'" % IMAGE_COMMENTS)
    cmd_args.append("--protocolname '%s'" % IMAGE_COMMENTS)

    cmd_args.append("-y")
    ph.execute_command(" ".join(cmd_args))

    if args.volume:
        path_to_output = os.path.join(args.dir_output, "%s.dcm" % args.label)
        # Combine set of 2D DICOM slices to form 3D DICOM image
        # (image orientation stays correct)
        ph.print_title("Combine set of 2D DICOM slices to form 3D DICOM image")
        cmd_args = ["medcon"]
        cmd_args.append("-f '%s'/*.dcm" % dir_output_2d_slices)
        cmd_args.append("-o '%s'" % path_to_output)
        cmd_args.append("-c dicom")
        cmd_args.append("-stack3d")
        cmd_args.append("-n")
        cmd_args.append("-qc")
        cmd_args.append("-w")
        ph.execute_command(" ".join(cmd_args))

        # Update all relevant DICOM tags accordingly
        ph.print_title("Update all relevant DICOM tags accordingly")
        print("")
        dataset_template = pydicom.dcmread(args.template)
        dataset = pydicom.dcmread(path_to_output)

        # Copy tags from template (to guarantee grouping with original data)
        update_dicom_tags = {}
        for tag in COPY_DICOM_TAGS:
            try:
                update_dicom_tags[tag] = getattr(dataset_template, tag)
            except:
                update_dicom_tags[tag] = ""

        # Additional tags
        update_dicom_tags["SeriesDescription"] = args.label
        update_dicom_tags["InstitutionName"] = INSTITUTION_NAME
        update_dicom_tags["ImageComments"] = IMAGE_COMMENTS
        update_dicom_tags["AccessionNumber"] = ACCESSION_NUMBER
        update_dicom_tags["SeriesNumber"] = SERIES_NUMBER

        for tag in sorted(update_dicom_tags.keys()):
            value = update_dicom_tags[tag]
            setattr(dataset, tag, value)
            ph.print_info("%s: '%s'" % (tag, value))

        dataset.save_as(path_to_output)
        print("")
        ph.print_info("3D DICOM image written to '%s'" % path_to_output)

    else:
        ph.print_info("DICOM images written to '%s'" % dir_output_2d_slices)

    return 0
Beispiel #3
0
def main():

    time_start = ph.start_timing()

    # Set print options
    np.set_printoptions(precision=3)
    pd.set_option('display.width', 1000)

    input_parser = InputArgparser(description=".", )
    input_parser.add_filenames()
    input_parser.add_filenames_masks()
    input_parser.add_dir_input_mc()
    input_parser.add_suffix_mask(default="_mask")
    input_parser.add_reference(required=True)
    input_parser.add_reference_mask()
    input_parser.add_dir_output(required=False)
    input_parser.add_log_config(default=1)
    input_parser.add_measures(default=["PSNR", "RMSE", "SSIM", "NCC", "NMI"])
    input_parser.add_verbose(default=0)
    input_parser.add_slice_thicknesses(default=None)
    input_parser.add_option(option_string="--use-reference-mask",
                            type=int,
                            default=1)
    input_parser.add_option(option_string="--use-slice-masks",
                            type=int,
                            default=1)

    args = input_parser.parse_args()
    input_parser.print_arguments(args)

    if args.log_config:
        input_parser.log_config(os.path.abspath(__file__))

    # --------------------------------Read Data--------------------------------
    ph.print_title("Read Data")

    data_reader = dr.MultipleImagesReader(
        file_paths=args.filenames,
        file_paths_masks=args.filenames_masks,
        suffix_mask=args.suffix_mask,
        dir_motion_correction=args.dir_input_mc,
        stacks_slice_thicknesses=args.slice_thicknesses,
    )

    data_reader.read_data()
    stacks = data_reader.get_data()
    ph.print_info("%d input stacks read for further processing" % len(stacks))

    reference = st.Stack.from_filename(args.reference, args.reference_mask)

    ph.print_title("Slice Residual Similarity")
    residual_evaluator = res_ev.ResidualEvaluator(
        stacks=stacks,
        reference=reference,
        measures=args.measures,
        use_reference_mask=args.use_reference_mask,
        use_slice_masks=args.use_slice_masks,
    )
    residual_evaluator.compute_slice_projections()
    residual_evaluator.evaluate_slice_similarities()
    residual_evaluator.write_slice_similarities(args.dir_output)

    elapsed_time = ph.stop_timing(time_start)
    ph.print_title("Summary")
    print("Computational Time for Slice Residual Evaluation: %s" %
          (elapsed_time))

    return 0
Beispiel #4
0
    def _print_info_text(self):

        ph.print_subtitle("Tikhonov Solver:")
        ph.print_info("Chosen regularization type: ", newline=False)
        if self._reg_type in ["TK0"]:
            print("Zeroth-order Tikhonov")

        else:
            print("First-order Tikhonov")

        if self._deconvolution_mode in ["only_in_plane"]:
            ph.print_info("(Only in-plane deconvolution is performed)")

        elif self._deconvolution_mode in ["predefined_covariance"]:
            ph.print_info("(Predefined covariance used: cov = %s)"
                          % (np.diag(self._predefined_covariance)))

        if self._data_loss in ["huber"]:
            ph.print_info("Loss function: %s (gamma = %g)" %
                          (self._data_loss, self._huber_gamma))
        else:
            ph.print_info("Loss function: %s" % (self._data_loss))

        if self._data_loss != "linear":
            ph.print_info("Loss function scale: %g" % (self._data_loss_scale))

        ph.print_info("Regularization parameter: " + str(self._alpha))
        ph.print_info("Minimizer: " + self._minimizer)
        ph.print_info(
            "Maximum number of iterations: " + str(self._iter_max))
def main():

    time_start = ph.start_timing()

    # Set print options
    np.set_printoptions(precision=3)
    pd.set_option('display.width', 1000)

    input_parser = InputArgparser(
        description=".",
    )
    input_parser.add_filenames()
    input_parser.add_filenames_masks()
    input_parser.add_dir_input_mc()
    input_parser.add_suffix_mask(default="_mask")
    input_parser.add_reference(required=True)
    input_parser.add_reference_mask()
    input_parser.add_dir_output(required=False)
    input_parser.add_log_config(default=1)
    input_parser.add_measures(
        default=["PSNR", "MAE", "RMSE", "SSIM", "NCC", "NMI"])
    input_parser.add_verbose(default=0)
    input_parser.add_target_stack(default=None)
    input_parser.add_intensity_correction(default=1)
    input_parser.add_slice_thicknesses(default=None)
    input_parser.add_option(
        option_string="--use-reference-mask", type=int, default=1)
    input_parser.add_option(
        option_string="--use-slice-masks", type=int, default=1)

    args = input_parser.parse_args()
    input_parser.print_arguments(args)

    if args.log_config:
        input_parser.log_config(os.path.abspath(__file__))

    # --------------------------------Read Data--------------------------------
    ph.print_title("Read Data")

    data_reader = dr.MultipleImagesReader(
        file_paths=args.filenames,
        file_paths_masks=args.filenames_masks,
        suffix_mask=args.suffix_mask,
        dir_motion_correction=args.dir_input_mc,
        stacks_slice_thicknesses=args.slice_thicknesses,
    )

    data_reader.read_data()
    stacks = data_reader.get_data()
    ph.print_info("%d input stacks read for further processing" % len(stacks))

    # Specify target stack for intensity correction and reconstruction space
    if args.target_stack is None:
        target_stack_index = 0
    else:
        filenames = ["%s.nii.gz" % s.get_filename() for s in stacks]
        filename_target_stack = os.path.basename(args.target_stack)
        try:
            target_stack_index = filenames.index(filename_target_stack)
        except ValueError as e:
            raise ValueError(
                "--target-stack must correspond to an image as provided by "
                "--filenames")

    # ---------------------------Intensity Correction--------------------------
    if args.intensity_correction:
        ph.print_title("Intensity Correction")
        intensity_corrector = ic.IntensityCorrection()
        intensity_corrector.use_individual_slice_correction(False)
        intensity_corrector.use_stack_mask(True)
        intensity_corrector.use_reference_mask(True)
        intensity_corrector.use_verbose(False)

        for i, stack in enumerate(stacks):
            if i == target_stack_index:
                ph.print_info("Stack %d (%s): Reference image. Skipped." % (
                    i + 1, stack.get_filename()))
                continue
            else:
                ph.print_info("Stack %d (%s): Intensity Correction ... " % (
                    i + 1, stack.get_filename()), newline=False)
            intensity_corrector.set_stack(stack)
            intensity_corrector.set_reference(
                stacks[target_stack_index].get_resampled_stack(
                    resampling_grid=stack.sitk,
                    interpolator="NearestNeighbor",
                ))
            intensity_corrector.run_linear_intensity_correction()
            stacks[i] = intensity_corrector.get_intensity_corrected_stack()
            print("done (c1 = %g) " %
                  intensity_corrector.get_intensity_correction_coefficients())

    # ----------------------- Slice Residual Similarity -----------------------
    reference = st.Stack.from_filename(args.reference, args.reference_mask)

    ph.print_title("Slice Residual Similarity")
    residual_evaluator = res_ev.ResidualEvaluator(
        stacks=stacks,
        reference=reference,
        measures=args.measures,
        use_reference_mask=args.use_reference_mask,
        use_slice_masks=args.use_slice_masks,
    )
    residual_evaluator.compute_slice_projections()
    residual_evaluator.evaluate_slice_similarities()
    residual_evaluator.write_slice_similarities(args.dir_output)

    elapsed_time = ph.stop_timing(time_start)
    ph.print_title("Summary")
    print("Computational Time for Slice Residual Evaluation: %s" %
          (elapsed_time))

    return 0
Beispiel #6
0
def main():

    time_start = ph.start_timing()

    # Set print options for numpy
    np.set_printoptions(precision=3)

    input_parser = InputArgparser(
        description="Volumetric MRI reconstruction framework to reconstruct "
        "an isotropic, high-resolution 3D volume from multiple stacks of 2D "
        "slices with motion correction. The resolution of the computed "
        "Super-Resolution Reconstruction (SRR) is given by the in-plane "
        "spacing of the selected target stack. A region of interest can be "
        "specified by providing a mask for the selected target stack. Only "
        "this region will then be reconstructed by the SRR algorithm which "
        "can substantially reduce the computational time.", )
    input_parser.add_filenames(required=True)
    input_parser.add_filenames_masks()
    input_parser.add_output(required=True)
    input_parser.add_suffix_mask(default="_mask")
    input_parser.add_target_stack(default=None)
    input_parser.add_search_angle(default=45)
    input_parser.add_multiresolution(default=0)
    input_parser.add_shrink_factors(default=[3, 2, 1])
    input_parser.add_smoothing_sigmas(default=[1.5, 1, 0])
    input_parser.add_sigma(default=1)
    input_parser.add_reconstruction_type(default="TK1L2")
    input_parser.add_iterations(default=15)
    input_parser.add_alpha(default=0.015)
    input_parser.add_alpha_first(default=0.2)
    input_parser.add_iter_max(default=10)
    input_parser.add_iter_max_first(default=5)
    input_parser.add_dilation_radius(default=3)
    input_parser.add_extra_frame_target(default=10)
    input_parser.add_bias_field_correction(default=0)
    input_parser.add_intensity_correction(default=1)
    input_parser.add_isotropic_resolution(default=1)
    input_parser.add_log_config(default=1)
    input_parser.add_subfolder_motion_correction()
    input_parser.add_write_motion_correction(default=1)
    input_parser.add_verbose(default=0)
    input_parser.add_two_step_cycles(default=3)
    input_parser.add_use_masks_srr(default=0)
    input_parser.add_boundary_stacks(default=[10, 10, 0])
    input_parser.add_metric(default="Correlation")
    input_parser.add_metric_radius(default=10)
    input_parser.add_reference()
    input_parser.add_reference_mask()
    input_parser.add_outlier_rejection(default=1)
    input_parser.add_threshold_first(default=0.5)
    input_parser.add_threshold(default=0.8)
    input_parser.add_interleave(default=3)
    input_parser.add_slice_thicknesses(default=None)
    input_parser.add_viewer(default="itksnap")
    input_parser.add_v2v_method(default="RegAladin")
    input_parser.add_argument(
        "--v2v-robust",
        "-v2v-robust",
        action='store_true',
        help="If given, a more robust volume-to-volume registration step is "
        "performed, i.e. four rigid registrations are performed using four "
        "rigid transform initializations based on "
        "principal component alignment of associated masks.")
    input_parser.add_argument(
        "--s2v-hierarchical",
        "-s2v-hierarchical",
        action='store_true',
        help="If given, a hierarchical approach for the first slice-to-volume "
        "registration cycle is used, i.e. sub-packages defined by the "
        "specified interleave (--interleave) are registered until each "
        "slice is registered independently.")
    input_parser.add_argument(
        "--sda",
        "-sda",
        action='store_true',
        help="If given, the volumetric reconstructions are performed using "
        "Scattered Data Approximation (Vercauteren et al., 2006). "
        "'alpha' is considered the final 'sigma' for the "
        "iterative adjustment. "
        "Recommended value is, e.g., --alpha 0.8")
    input_parser.add_option(
        option_string="--transforms-history",
        type=int,
        help="Write entire history of applied slice motion correction "
        "transformations to motion correction output directory",
        default=0,
    )

    args = input_parser.parse_args()
    input_parser.print_arguments(args)

    rejection_measure = "NCC"
    threshold_v2v = -2  # 0.3
    debug = False

    if args.v2v_method not in V2V_METHOD_OPTIONS:
        raise ValueError("v2v-method must be in {%s}" %
                         (", ".join(V2V_METHOD_OPTIONS)))

    if np.alltrue([not args.output.endswith(t) for t in ALLOWED_EXTENSIONS]):
        raise ValueError(
            "output filename invalid; allowed extensions are: %s" %
            ", ".join(ALLOWED_EXTENSIONS))

    if args.alpha_first < args.alpha and not args.sda:
        raise ValueError("It must hold alpha-first >= alpha")

    if args.threshold_first > args.threshold:
        raise ValueError("It must hold threshold-first <= threshold")

    dir_output = os.path.dirname(args.output)
    ph.create_directory(dir_output)

    if args.log_config:
        input_parser.log_config(os.path.abspath(__file__))

    # --------------------------------Read Data--------------------------------
    ph.print_title("Read Data")
    data_reader = dr.MultipleImagesReader(
        file_paths=args.filenames,
        file_paths_masks=args.filenames_masks,
        suffix_mask=args.suffix_mask,
        stacks_slice_thicknesses=args.slice_thicknesses,
    )

    if len(args.boundary_stacks) is not 3:
        raise IOError(
            "Provide exactly three values for '--boundary-stacks' to define "
            "cropping in i-, j-, and k-dimension of the input stacks")

    data_reader.read_data()
    stacks = data_reader.get_data()
    ph.print_info("%d input stacks read for further processing" % len(stacks))

    if all(s.is_unity_mask() is True for s in stacks):
        ph.print_warning("No mask is provided! "
                         "Generated reconstruction space may be very big!")
        ph.print_warning("Consider using a mask to speed up computations")

        # args.extra_frame_target = 0
        # ph.wrint_warning("Overwritten: extra-frame-target set to 0")

    # Specify target stack for intensity correction and reconstruction space
    if args.target_stack is None:
        target_stack_index = 0
    else:
        try:
            target_stack_index = args.filenames.index(args.target_stack)
        except ValueError as e:
            raise ValueError(
                "--target-stack must correspond to an image as provided by "
                "--filenames")

    # ---------------------------Data Preprocessing---------------------------
    ph.print_title("Data Preprocessing")

    segmentation_propagator = segprop.SegmentationPropagation(
        # registration_method=regflirt.FLIRT(use_verbose=args.verbose),
        dilation_radius=args.dilation_radius,
        dilation_kernel="Ball",
    )

    data_preprocessing = dp.DataPreprocessing(
        stacks=stacks,
        segmentation_propagator=segmentation_propagator,
        use_cropping_to_mask=True,
        use_N4BiasFieldCorrector=args.bias_field_correction,
        target_stack_index=target_stack_index,
        boundary_i=args.boundary_stacks[0],
        boundary_j=args.boundary_stacks[1],
        boundary_k=args.boundary_stacks[2],
        unit="mm",
    )
    data_preprocessing.run()
    time_data_preprocessing = data_preprocessing.get_computational_time()

    # Get preprocessed stacks
    stacks = data_preprocessing.get_preprocessed_stacks()

    # Define reference/target stack for registration and reconstruction
    if args.reference is not None:
        reference = st.Stack.from_filename(file_path=args.reference,
                                           file_path_mask=args.reference_mask,
                                           extract_slices=False)

    else:
        reference = st.Stack.from_stack(stacks[target_stack_index])

    # ------------------------Volume-to-Volume Registration--------------------
    if args.two_step_cycles > 0 and len(stacks) > 1:

        if args.v2v_method == "FLIRT":
            # Define search angle ranges for FLIRT in all three dimensions
            search_angles = [
                "-searchr%s -%d %d" % (x, args.search_angle, args.search_angle)
                for x in ["x", "y", "z"]
            ]
            options = (" ").join(search_angles)
            # options += " -noresample"

            vol_registration = regflirt.FLIRT(
                registration_type="Rigid",
                use_fixed_mask=True,
                use_moving_mask=True,
                options=options,
                use_verbose=False,
            )
        else:
            vol_registration = niftyreg.RegAladin(
                registration_type="Rigid",
                use_fixed_mask=True,
                use_moving_mask=True,
                # options="-ln 2 -voff",
                use_verbose=False,
            )
        v2vreg = pipeline.VolumeToVolumeRegistration(
            stacks=stacks,
            reference=reference,
            registration_method=vol_registration,
            verbose=debug,
            robust=args.v2v_robust,
        )
        v2vreg.run()
        stacks = v2vreg.get_stacks()
        time_registration = v2vreg.get_computational_time()

    else:
        time_registration = ph.get_zero_time()

    # ---------------------------Intensity Correction--------------------------
    if args.intensity_correction:
        ph.print_title("Intensity Correction")
        intensity_corrector = ic.IntensityCorrection()
        intensity_corrector.use_individual_slice_correction(False)
        intensity_corrector.use_reference_mask(True)
        intensity_corrector.use_stack_mask(True)
        intensity_corrector.use_verbose(False)

        for i, stack in enumerate(stacks):
            if i == target_stack_index:
                ph.print_info("Stack %d (%s): Reference image. Skipped." %
                              (i + 1, stack.get_filename()))
                continue
            else:
                ph.print_info("Stack %d (%s): Intensity Correction ... " %
                              (i + 1, stack.get_filename()),
                              newline=False)
            intensity_corrector.set_stack(stack)
            intensity_corrector.set_reference(
                stacks[target_stack_index].get_resampled_stack(
                    resampling_grid=stack.sitk,
                    interpolator="NearestNeighbor",
                ))
            intensity_corrector.run_linear_intensity_correction()
            stacks[i] = intensity_corrector.get_intensity_corrected_stack()
            print("done (c1 = %g) " %
                  intensity_corrector.get_intensity_correction_coefficients())

    # ---------------------------Create first volume---------------------------
    time_tmp = ph.start_timing()

    # Isotropic resampling to define HR target space
    ph.print_title("Reconstruction Space Generation")
    HR_volume = reference.get_isotropically_resampled_stack(
        resolution=args.isotropic_resolution)
    ph.print_info(
        "Isotropic reconstruction space with %g mm resolution is created" %
        HR_volume.sitk.GetSpacing()[0])

    if args.reference is None:
        # Create joint image mask in target space
        joint_image_mask_builder = imb.JointImageMaskBuilder(
            stacks=stacks,
            target=HR_volume,
            dilation_radius=1,
        )
        joint_image_mask_builder.run()
        HR_volume = joint_image_mask_builder.get_stack()
        ph.print_info("Isotropic reconstruction space is centered around "
                      "joint stack masks. ")

        # Crop to space defined by mask (plus extra margin)
        HR_volume = HR_volume.get_cropped_stack_based_on_mask(
            boundary_i=args.extra_frame_target,
            boundary_j=args.extra_frame_target,
            boundary_k=args.extra_frame_target,
            unit="mm",
        )

        # Create first volume
        # If outlier rejection is activated, eliminate obvious outliers early
        # from stack and re-run SDA to get initial volume without them
        ph.print_title("First Estimate of HR Volume")
        if args.outlier_rejection and threshold_v2v > -1:
            ph.print_subtitle("SDA Approximation")
            SDA = sda.ScatteredDataApproximation(stacks,
                                                 HR_volume,
                                                 sigma=args.sigma)
            SDA.run()
            HR_volume = SDA.get_reconstruction()

            # Identify and reject outliers
            ph.print_subtitle("Eliminate slice outliers (%s < %g)" %
                              (rejection_measure, threshold_v2v))
            outlier_rejector = outre.OutlierRejector(
                stacks=stacks,
                reference=HR_volume,
                threshold=threshold_v2v,
                measure=rejection_measure,
                verbose=True,
            )
            outlier_rejector.run()
            stacks = outlier_rejector.get_stacks()

        ph.print_subtitle("SDA Approximation Image")
        SDA = sda.ScatteredDataApproximation(stacks,
                                             HR_volume,
                                             sigma=args.sigma)
        SDA.run()
        HR_volume = SDA.get_reconstruction()

        ph.print_subtitle("SDA Approximation Image Mask")
        SDA = sda.ScatteredDataApproximation(stacks,
                                             HR_volume,
                                             sigma=args.sigma,
                                             sda_mask=True)
        SDA.run()
        # HR volume contains updated mask based on SDA
        HR_volume = SDA.get_reconstruction()

        HR_volume.set_filename(SDA.get_setting_specific_filename())

    time_reconstruction = ph.stop_timing(time_tmp)

    if args.verbose:
        tmp = list(stacks)
        tmp.insert(0, HR_volume)
        sitkh.show_stacks(tmp, segmentation=HR_volume, viewer=args.viewer)

    # -----------Two-step Slice-to-Volume Registration-Reconstruction----------
    if args.two_step_cycles > 0:

        # Slice-to-volume registration set-up
        if args.metric == "ANTSNeighborhoodCorrelation":
            metric_params = {"radius": args.metric_radius}
        else:
            metric_params = None
        registration = regsitk.SimpleItkRegistration(
            moving=HR_volume,
            use_fixed_mask=True,
            use_moving_mask=True,
            interpolator="Linear",
            metric=args.metric,
            metric_params=metric_params,
            use_multiresolution_framework=args.multiresolution,
            shrink_factors=args.shrink_factors,
            smoothing_sigmas=args.smoothing_sigmas,
            initializer_type="SelfGEOMETRY",
            optimizer="ConjugateGradientLineSearch",
            optimizer_params={
                "learningRate": 1,
                "numberOfIterations": 100,
                "lineSearchUpperLimit": 2,
            },
            scales_estimator="Jacobian",
            use_verbose=debug,
        )

        # Volumetric reconstruction set-up
        if args.sda:
            recon_method = sda.ScatteredDataApproximation(
                stacks,
                HR_volume,
                sigma=args.sigma,
                use_masks=args.use_masks_srr,
            )
            alpha_range = [args.sigma, args.alpha]
        else:
            recon_method = tk.TikhonovSolver(
                stacks=stacks,
                reconstruction=HR_volume,
                reg_type="TK1",
                minimizer="lsmr",
                alpha=args.alpha_first,
                iter_max=np.min([args.iter_max_first, args.iter_max]),
                verbose=True,
                use_masks=args.use_masks_srr,
            )
            alpha_range = [args.alpha_first, args.alpha]

        # Define the regularization parameters for the individual
        # reconstruction steps in the two-step cycles
        alphas = np.linspace(alpha_range[0], alpha_range[1],
                             args.two_step_cycles)

        # Define outlier rejection threshold after each S2V-reg step
        thresholds = np.linspace(args.threshold_first, args.threshold,
                                 args.two_step_cycles)

        two_step_s2v_reg_recon = \
            pipeline.TwoStepSliceToVolumeRegistrationReconstruction(
                stacks=stacks,
                reference=HR_volume,
                registration_method=registration,
                reconstruction_method=recon_method,
                cycles=args.two_step_cycles,
                alphas=alphas[0:args.two_step_cycles - 1],
                outlier_rejection=args.outlier_rejection,
                threshold_measure=rejection_measure,
                thresholds=thresholds,
                interleave=args.interleave,
                viewer=args.viewer,
                verbose=args.verbose,
                use_hierarchical_registration=args.s2v_hierarchical,
            )
        two_step_s2v_reg_recon.run()
        HR_volume_iterations = \
            two_step_s2v_reg_recon.get_iterative_reconstructions()
        time_registration += \
            two_step_s2v_reg_recon.get_computational_time_registration()
        time_reconstruction += \
            two_step_s2v_reg_recon.get_computational_time_reconstruction()
        stacks = two_step_s2v_reg_recon.get_stacks()

    # no two-step s2v-registration/reconstruction iterations
    else:
        HR_volume_iterations = []

    # Write motion-correction results
    ph.print_title("Write Motion Correction Results")
    if args.write_motion_correction:
        dir_output_mc = os.path.join(dir_output,
                                     args.subfolder_motion_correction)
        ph.clear_directory(dir_output_mc)

        for stack in stacks:
            stack.write(
                dir_output_mc,
                write_stack=False,
                write_mask=False,
                write_slices=False,
                write_transforms=True,
                write_transforms_history=args.transforms_history,
            )

        if args.outlier_rejection:
            deleted_slices_dic = {}
            for i, stack in enumerate(stacks):
                deleted_slices = stack.get_deleted_slice_numbers()
                deleted_slices_dic[stack.get_filename()] = deleted_slices

            # check whether any stack was removed entirely
            stacks0 = data_preprocessing.get_preprocessed_stacks()
            if len(stacks) != len(stacks0):
                stacks_remain = [s.get_filename() for s in stacks]
                for stack in stacks0:
                    if stack.get_filename() in stacks_remain:
                        continue

                    # add info that all slices of this stack were rejected
                    deleted_slices = [
                        slice.get_slice_number()
                        for slice in stack.get_slices()
                    ]
                    deleted_slices_dic[stack.get_filename()] = deleted_slices

            ph.write_dictionary_to_json(
                deleted_slices_dic,
                os.path.join(dir_output, args.subfolder_motion_correction,
                             "rejected_slices.json"))

    # ---------------------Final Volumetric Reconstruction---------------------
    ph.print_title("Final Volumetric Reconstruction")
    if args.sda:
        recon_method = sda.ScatteredDataApproximation(
            stacks,
            HR_volume,
            sigma=args.alpha,
            use_masks=args.use_masks_srr,
        )
    else:
        if args.reconstruction_type in ["TVL2", "HuberL2"]:
            recon_method = pd.PrimalDualSolver(
                stacks=stacks,
                reconstruction=HR_volume,
                reg_type="TV"
                if args.reconstruction_type == "TVL2" else "huber",
                iterations=args.iterations,
                use_masks=args.use_masks_srr,
            )
        else:
            recon_method = tk.TikhonovSolver(
                stacks=stacks,
                reconstruction=HR_volume,
                reg_type="TK1"
                if args.reconstruction_type == "TK1L2" else "TK0",
                use_masks=args.use_masks_srr,
            )
        recon_method.set_alpha(args.alpha)
        recon_method.set_iter_max(args.iter_max)
        recon_method.set_verbose(True)
    recon_method.run()
    time_reconstruction += recon_method.get_computational_time()
    HR_volume_final = recon_method.get_reconstruction()

    ph.print_subtitle("Final SDA Approximation Image Mask")
    SDA = sda.ScatteredDataApproximation(stacks,
                                         HR_volume_final,
                                         sigma=args.sigma,
                                         sda_mask=True)
    SDA.run()
    # HR volume contains updated mask based on SDA
    HR_volume_final = SDA.get_reconstruction()
    time_reconstruction += SDA.get_computational_time()

    elapsed_time_total = ph.stop_timing(time_start)

    # Write SRR result
    filename = recon_method.get_setting_specific_filename()
    HR_volume_final.set_filename(filename)
    dw.DataWriter.write_image(HR_volume_final.sitk,
                              args.output,
                              description=filename)
    dw.DataWriter.write_mask(HR_volume_final.sitk_mask,
                             ph.append_to_filename(args.output, "_mask"),
                             description=SDA.get_setting_specific_filename())

    HR_volume_iterations.insert(0, HR_volume_final)
    for stack in stacks:
        HR_volume_iterations.append(stack)

    if args.verbose:
        sitkh.show_stacks(
            HR_volume_iterations,
            segmentation=HR_volume_final,
            viewer=args.viewer,
        )

    # Summary
    ph.print_title("Summary")
    exe_file_info = os.path.basename(os.path.abspath(__file__)).split(".")[0]
    print("%s | Computational Time for Data Preprocessing: %s" %
          (exe_file_info, time_data_preprocessing))
    print("%s | Computational Time for Registrations: %s" %
          (exe_file_info, time_registration))
    print("%s | Computational Time for Reconstructions: %s" %
          (exe_file_info, time_reconstruction))
    print("%s | Computational Time for Entire Reconstruction Pipeline: %s" %
          (exe_file_info, elapsed_time_total))

    ph.print_line_separator()

    return 0
def main():

    time_start = ph.start_timing()

    # Set print options for numpy
    np.set_printoptions(precision=3)

    # Read input
    input_parser = InputArgparser(
        description="Volumetric MRI reconstruction framework to reconstruct "
        "an isotropic, high-resolution 3D volume from multiple "
        "motion-corrected (or static) stacks of low-resolution slices.", )
    input_parser.add_filenames(required=True)
    input_parser.add_filenames_masks()
    input_parser.add_dir_input_mc()
    input_parser.add_output(required=True)
    input_parser.add_suffix_mask(default="_mask")
    input_parser.add_target_stack(default=None)
    input_parser.add_extra_frame_target(default=10)
    input_parser.add_isotropic_resolution(default=None)
    input_parser.add_intensity_correction(default=1)
    input_parser.add_reconstruction_space(default=None)
    input_parser.add_minimizer(default="lsmr")
    input_parser.add_iter_max(default=10)
    input_parser.add_reconstruction_type(default="TK1L2")
    input_parser.add_data_loss(default="linear")
    input_parser.add_data_loss_scale(default=1)
    input_parser.add_alpha(
        default=0.01  # TK1L2 @ isotropic_resolution = 0.8
        # default=0.006  #TVL2, HuberL2 @ isotropic_resolution = 0.8
    )
    input_parser.add_rho(default=0.1)
    input_parser.add_tv_solver(default="PD")
    input_parser.add_pd_alg_type(default="ALG2")
    input_parser.add_iterations(default=15)
    input_parser.add_log_config(default=1)
    input_parser.add_use_masks_srr(default=0)
    input_parser.add_slice_thicknesses(default=None)
    input_parser.add_verbose(default=0)
    input_parser.add_viewer(default="itksnap")
    input_parser.add_argument(
        "--mask",
        "-mask",
        action='store_true',
        help="If given, input images are interpreted as image masks. "
        "Obtained volumetric reconstruction will be exported in uint8 format.")
    input_parser.add_argument(
        "--sda",
        "-sda",
        action='store_true',
        help="If given, the volume is reconstructed using "
        "Scattered Data Approximation (Vercauteren et al., 2006). "
        "--alpha is considered the value for the standard deviation then. "
        "Recommended value is, e.g., --alpha 0.8")

    args = input_parser.parse_args()
    input_parser.print_arguments(args)

    if args.reconstruction_type not in ["TK1L2", "TVL2", "HuberL2"]:
        raise IOError("Reconstruction type unknown")

    if np.alltrue([not args.output.endswith(t) for t in ALLOWED_EXTENSIONS]):
        raise ValueError("output filename '%s' invalid; "
                         "allowed image extensions are: %s" %
                         (args.output, ", ".join(ALLOWED_EXTENSIONS)))

    dir_output = os.path.dirname(args.output)
    ph.create_directory(dir_output)

    debug = 0

    if args.log_config:
        input_parser.log_config(os.path.abspath(__file__))

    if args.verbose:
        show_niftis = []
        # show_niftis = [f for f in args.filenames]

    # --------------------------------Read Data--------------------------------
    ph.print_title("Read Data")

    if args.mask:
        filenames_masks = args.filenames
    else:
        filenames_masks = args.filenames_masks

    data_reader = dr.MultipleImagesReader(
        file_paths=args.filenames,
        file_paths_masks=filenames_masks,
        suffix_mask=args.suffix_mask,
        dir_motion_correction=args.dir_input_mc,
        stacks_slice_thicknesses=args.slice_thicknesses,
    )
    data_reader.read_data()
    stacks = data_reader.get_data()

    ph.print_info("%d input stacks read for further processing" % len(stacks))

    # Specify target stack for intensity correction and reconstruction space
    if args.target_stack is None:
        target_stack_index = 0
    else:
        # TODO: deal with case when target stack got rejected in previous step
        filenames = ["%s.nii.gz" % s.get_filename() for s in stacks]
        filename_target_stack = os.path.basename(args.target_stack)
        try:
            target_stack_index = filenames.index(filename_target_stack)
        except ValueError as e:
            raise ValueError(
                "--target-stack must correspond to an image as provided by "
                "--filenames")

    # ---------------------------Intensity Correction--------------------------
    if args.intensity_correction and not args.mask:
        ph.print_title("Intensity Correction")
        intensity_corrector = ic.IntensityCorrection()
        intensity_corrector.use_individual_slice_correction(False)
        intensity_corrector.use_stack_mask(True)
        intensity_corrector.use_reference_mask(True)
        intensity_corrector.use_verbose(False)

        for i, stack in enumerate(stacks):
            if i == target_stack_index:
                ph.print_info("Stack %d (%s): Reference image. Skipped." %
                              (i + 1, stack.get_filename()))
                continue
            else:
                ph.print_info("Stack %d (%s): Intensity Correction ... " %
                              (i + 1, stack.get_filename()),
                              newline=False)
            intensity_corrector.set_stack(stack)
            intensity_corrector.set_reference(
                stacks[target_stack_index].get_resampled_stack(
                    resampling_grid=stack.sitk,
                    interpolator="NearestNeighbor",
                ))
            intensity_corrector.run_linear_intensity_correction()
            stacks[i] = intensity_corrector.get_intensity_corrected_stack()
            print("done (c1 = %g) " %
                  intensity_corrector.get_intensity_correction_coefficients())

    # -------------------------Volumetric Reconstruction-----------------------
    ph.print_title("Volumetric Reconstruction")

    # Reconstruction space defined by isotropically resampled,
    # bounding box-cropped target stack
    if args.reconstruction_space is None:
        recon0 = stacks[target_stack_index].get_isotropically_resampled_stack(
            resolution=args.isotropic_resolution,
            extra_frame=args.extra_frame_target,
        )
        recon0 = recon0.get_cropped_stack_based_on_mask(
            boundary_i=args.extra_frame_target,
            boundary_j=args.extra_frame_target,
            boundary_k=args.extra_frame_target,
            unit="mm",
        )

    # Reconstruction space was provided by user
    else:
        recon0 = st.Stack.from_filename(args.reconstruction_space,
                                        extract_slices=False)

        # Change resolution for isotropic resolution if provided by user
        if args.isotropic_resolution is not None:
            recon0 = recon0.get_isotropically_resampled_stack(
                args.isotropic_resolution)

        # Use image information of selected target stack as recon0 serves
        # as initial value for reconstruction
        recon0 = stacks[target_stack_index].get_resampled_stack(recon0.sitk)
        recon0 = recon0.get_stack_multiplied_with_mask()

    ph.print_info("Reconstruction space defined with %s mm3 resolution" %
                  " x ".join(["%.2f" % s for s in recon0.sitk.GetSpacing()]))

    if debug:
        # visualize (intensity corrected) data alongside recon0 init
        show = [st.Stack.from_stack(s) for s in stacks]
        show.insert(0, recon0)
        sitkh.show_stacks(show)

    if args.sda:
        ph.print_title("Compute SDA reconstruction")
        SDA = sda.ScatteredDataApproximation(stacks,
                                             recon0,
                                             sigma=args.alpha,
                                             sda_mask=args.mask)
        SDA.run()
        recon = SDA.get_reconstruction()
        filename = SDA.get_setting_specific_filename()
        if args.mask:
            dw.DataWriter.write_mask(recon.sitk_mask,
                                     args.output,
                                     description=filename)
        else:
            dw.DataWriter.write_image(recon.sitk,
                                      args.output,
                                      description=filename)

        if args.verbose:
            show_niftis.insert(0, args.output)

    else:
        if args.reconstruction_type in ["TVL2", "HuberL2"]:
            ph.print_title("Compute Initial value for %s" %
                           args.reconstruction_type)
            SRR0 = sda.ScatteredDataApproximation(stacks, recon0, sigma=0.8)
        else:
            ph.print_title("Compute %s reconstruction" %
                           args.reconstruction_type)
            SRR0 = tk.TikhonovSolver(
                stacks=stacks,
                reconstruction=recon0,
                alpha=args.alpha,
                iter_max=args.iter_max,
                reg_type="TK1",
                minimizer=args.minimizer,
                data_loss=args.data_loss,
                data_loss_scale=args.data_loss_scale,
                use_masks=args.use_masks_srr,
                # verbose=args.verbose,
            )
        SRR0.run()

        recon = SRR0.get_reconstruction()
        filename = SRR0.get_setting_specific_filename()

        if args.verbose and args.reconstruction_type in ["TVL2", "HuberL2"]:
            output = ph.append_to_filename(args.output, "_init")

            if args.mask:
                mask_estimator = bm.BinaryMaskFromMaskSRREstimator(recon.sitk)
                mask_estimator.run()
                mask_sitk = mask_estimator.get_mask_sitk()
                dw.DataWriter.write_mask(mask_sitk,
                                         output,
                                         description=filename)
            else:
                dw.DataWriter.write_image(recon.sitk,
                                          output,
                                          description=filename)

            show_niftis.insert(0, output)

        if args.reconstruction_type in ["TVL2", "HuberL2"]:
            ph.print_title("Compute %s reconstruction" %
                           args.reconstruction_type)
            if args.tv_solver == "ADMM":
                SRR = admm.ADMMSolver(
                    stacks=stacks,
                    reconstruction=st.Stack.from_stack(
                        SRR0.get_reconstruction()),
                    minimizer=args.minimizer,
                    alpha=args.alpha,
                    iter_max=args.iter_max,
                    rho=args.rho,
                    data_loss=args.data_loss,
                    iterations=args.iterations,
                    use_masks=args.use_masks_srr,
                    verbose=args.verbose,
                )

            else:
                SRR = pd.PrimalDualSolver(
                    stacks=stacks,
                    reconstruction=st.Stack.from_stack(
                        SRR0.get_reconstruction()),
                    minimizer=args.minimizer,
                    alpha=args.alpha,
                    iter_max=args.iter_max,
                    iterations=args.iterations,
                    alg_type=args.pd_alg_type,
                    reg_type="TV"
                    if args.reconstruction_type == "TVL2" else "huber",
                    data_loss=args.data_loss,
                    use_masks=args.use_masks_srr,
                    verbose=args.verbose,
                )
            SRR.run()
            recon = SRR.get_reconstruction()
            filename = SRR.get_setting_specific_filename()

        if args.mask:
            mask_estimator = bm.BinaryMaskFromMaskSRREstimator(recon.sitk)
            mask_estimator.run()
            mask_sitk = mask_estimator.get_mask_sitk()
            dw.DataWriter.write_mask(mask_sitk,
                                     args.output,
                                     description=filename)

        else:
            dw.DataWriter.write_image(recon.sitk,
                                      args.output,
                                      description=filename)

        if args.verbose:
            show_niftis.insert(0, args.output)

    if args.verbose:
        ph.show_niftis(show_niftis, viewer=args.viewer)

    ph.print_line_separator()

    elapsed_time = ph.stop_timing(time_start)
    ph.print_title("Summary")
    exe_file_info = os.path.basename(os.path.abspath(__file__)).split(".")[0]
    print("%s | Computational Time for Volumetric Reconstruction: %s" %
          (exe_file_info, elapsed_time))

    return 0
 def _print_info_text(self):
     ph.print_subtitle("Primal-Dual Solver:")
     ph.print_info("Chosen regularization type: %s" % (self._reg_type),
                   newline=False)
     if self._reg_type == "huber":
         print(" (gamma = %g)" % (self._reg_huber_gamma))
     else:
         print("")
     ph.print_info("Strategy for parameter update: %s" % (self._alg_type))
     ph.print_info("Regularization parameter alpha: %g" % (self._alpha))
     if self._data_loss in ["huber"]:
         ph.print_info("Loss function: %s (gamma = %g)" %
                       (self._data_loss, self._huber_gamma))
     else:
         ph.print_info("Loss function: %s" % (self._data_loss))
     ph.print_info("Number of Primal-Dual iterations: %d" %
                   (self._iterations))
     ph.print_info("Minimizer: %s" % (self._minimizer))
     ph.print_info("Maximum number of iterations: %d" % (self._iter_max))
    def _run_intensity_correction(self, correction_model):

        N_slices = self._stack.get_number_of_slices()

        if correction_model in ["linear"]:
            correction_coefficients = np.zeros((N_slices, 1))
        elif correction_model in ["affine"]:
            correction_coefficients = np.zeros((N_slices, 2))

        # Gets the required data arrays to perform intensity correction
        nda, nda_reference, nda_mask, nda_additional_stack = self._get_data_arrays_prior_to_intensity_correction(
        )

        if self._use_individual_slice_correction:
            if self._use_verbose:
                ph.print_info(
                    "Run " + correction_model +
                    " intensity correction for each slice individually")
            for i in range(0, N_slices):
                if self._use_verbose:
                    sys.stdout.write(
                        "Slice %2d/%d: " %
                        (i, self._stack.get_number_of_slices() - 1))
                    sys.stdout.flush()
                if self._additional_stack is None:
                    nda[i, :, :], correction_coefficients[
                        i, :] = self._apply_intensity_correction[
                            correction_model](nda[i, :, :],
                                              nda_reference[i, :, :],
                                              nda_mask[i, :, :])
                else:
                    nda[i, :, :], correction_coefficients[
                        i, :], nda_additional_stack[
                            i, :, :] = self._apply_intensity_correction[
                                correction_model](
                                    nda[i, :, :], nda_reference[i, :, :],
                                    nda_mask[i, :, :],
                                    nda_additional_stack[i, :, :])
            correction_coefficients[:, ] = np.tile(cc, (N_slices, 1))
        else:
            if self._use_verbose:
                ph.print_info(
                    "Run " + correction_model +
                    " intensity correction uniformly for entire stack")
            if self._additional_stack is None:
                nda, cc = \
                    self._apply_intensity_correction[
                        correction_model](nda, nda_reference, nda_mask)
            else:
                nda, cc, nda_additional_stack = self._apply_intensity_correction[
                    correction_model](nda, nda_reference, nda_mask,
                                      nda_additional_stack)
            correction_coefficients = cc

        # Create Stack instance with correct image header information
        if self._additional_stack is None:
            return self._create_stack_from_corrected_intensity_array(
                nda, self._stack), correction_coefficients, None
        else:
            return self._create_stack_from_corrected_intensity_array(
                nda, self._stack
            ), correction_coefficients, self._create_stack_from_corrected_intensity_array(
                nda_additional_stack, self._additional_stack)