def _run(self, debug=0):

        for i, stack in enumerate(self._stacks):
            slices = stack.get_slices()
            for indices in self._slice_index_sets_of_stacks[i]:
                txt = "%sSliceSet-to-Volume Registration -- " \
                    "Stack %d/%d -- Slices %s" % (
                        self._print_prefix,
                        i + 1, len(self._stacks),
                        str(indices))
                if self._verbose:
                    ph.print_subtitle(txt)
                else:
                    ph.print_info(txt)

                image = self._get_stack_subgroup(stack, indices)

                if debug:
                    ph.killall_itksnap()
                    image.show()
                    stack.get_slice(indices[1]).show()

                self._registration_method.set_fixed(image)
                self._registration_method.run()
                transform_sitk = self._registration_method.\
                    get_registration_transform_sitk()

                for j in indices:
                    slices[j].update_motion_correction(transform_sitk)
Пример #2
0
    def _run(self):

        ph.print_title("Slice-to-Volume Registration")

        self._registration_method.set_moving(self._reference)

        for i, stack in enumerate(self._stacks):
            slices = stack.get_slices()

            transforms_sitk = {}

            for j, slice_j in enumerate(slices):

                txt = "%sSlice-to-Volume Registration -- " \
                    "Stack %d/%d (%s) -- Slice %d/%d" % (
                        self._print_prefix,
                        i + 1, len(self._stacks), stack.get_filename(),
                        j + 1, len(slices))
                if self._verbose:
                    ph.print_subtitle(txt)
                else:
                    ph.print_info(txt)

                self._registration_method.set_fixed(slice_j)
                self._registration_method.run()

                # Store information on registration transform
                transform_sitk = \
                    self._registration_method.get_registration_transform_sitk()
                transforms_sitk[slice_j.get_slice_number()] = transform_sitk

            # Update position of slice
            for slice in slices:
                slice_number = slice.get_slice_number()
                slice.update_motion_correction(transforms_sitk[slice_number])
    def _run(self):

        ph.print_title("Slice-to-Volume Registration")

        self._registration_method.set_moving(self._reference)

        for i in range(0, len(self._stacks)):
            stack = self._stacks[i]
            slices = stack.get_slices()
            for j in range(0, len(slices)):

                txt = "%sSlice-to-Volume Registration -- " \
                    "Stack %d/%d -- Slice %d/%d" % (
                        self._print_prefix,
                        i+1, len(self._stacks),
                        j+1, len(slices))
                if self._verbose:
                    ph.print_subtitle(txt)
                else:
                    ph.print_info(txt)

                self._registration_method.set_fixed(slices[j])
                self._registration_method.run()

                # Update position of slice
                transform_sitk = \
                    self._registration_method.\
                    get_registration_transform_sitk()
                slices[j].update_motion_correction(transform_sitk)
Пример #4
0
    def _print_info_text(self):

        ph.print_subtitle("Tikhonov Solver:")
        ph.print_info("Chosen regularization type: ", newline=False)
        if self._reg_type in ["TK0"]:
            print("Zeroth-order Tikhonov")

        else:
            print("First-order Tikhonov")

        if self._deconvolution_mode in ["only_in_plane"]:
            ph.print_info("(Only in-plane deconvolution is performed)")

        elif self._deconvolution_mode in ["predefined_covariance"]:
            ph.print_info("(Predefined covariance used: cov = %s)" %
                          (np.diag(self._predefined_covariance)))

        if self._data_loss in ["huber"]:
            ph.print_info("Loss function: %s (gamma = %g)" %
                          (self._data_loss, self._huber_gamma))
        else:
            ph.print_info("Loss function: %s" % (self._data_loss))

        if self._data_loss != "linear":
            ph.print_info("Loss function scale: %g" % (self._data_loss_scale))

        ph.print_info("Regularization parameter: " + str(self._alpha))
        ph.print_info("Minimizer: " + self._minimizer)
        ph.print_info("Maximum number of iterations: " + str(self._iter_max))
Пример #5
0
    def _run(self):

        ph.print_title("Volume-to-Volume Registration")

        for i in range(0, len(self._stacks)):
            txt = "Volume-to-Volume Registration -- " \
                "Stack %d/%d" % (i + 1, len(self._stacks))
            if self._verbose:
                ph.print_subtitle(txt)
            else:
                ph.print_info(txt)

            if self._robust:
                transform_initializer = tinit.TransformInitializer(
                    fixed=self._reference,
                    moving=self._stacks[i],
                    similarity_measure="NCC",
                    refine_pca_initializations=True,
                )
                transform_initializer.run()
                transform_sitk = transform_initializer.get_transform_sitk()
                transform_sitk = sitk.AffineTransform(
                    transform_sitk.GetInverse())

            else:
                self._registration_method.set_moving(self._reference)
                self._registration_method.set_fixed(self._stacks[i])
                self._registration_method.run()
                transform_sitk = self._registration_method.get_registration_transform_sitk()

            # Update position of stack
            self._stacks[i].update_motion_correction(transform_sitk)
Пример #6
0
 def _print_info_text(self):
     ph.print_subtitle("ADMM Solver:")
     ph.print_info("Chosen regularization type: TV")
     ph.print_info("Regularization parameter alpha: " + str(self._alpha))
     ph.print_info(
         "Regularization parameter of augmented Lagrangian term rho: " + str(self._rho))
     ph.print_info("Number of ADMM iterations: " + str(self._iterations))
     ph.print_info(
         "Maximum number of TK1 solver iterations: " + str(self._iter_max))
    def _run(self):

        ph.print_title("Multi-Component Reconstruction")

        self._reconstructions = [None] * len(self._stacks)

        for i in range(0, len(self._stacks)):
            ph.print_subtitle("Multi-Component Reconstruction -- "
                              "Stack %d/%d" % (i + 1, len(self._stacks)))
            stack = self._stacks[i]
            self._reconstruction_method.set_stacks([stack])
            self._reconstruction_method.run()
            self._reconstructions[i] = st.Stack.from_stack(
                self._reconstruction_method.get_reconstruction())
            self._reconstructions[i].set_filename(stack.get_filename() +
                                                  self._suffix)
 def _print_info_text(self):
     ph.print_subtitle("Primal-Dual Solver:")
     ph.print_info("Chosen regularization type: %s" % (self._reg_type),
                   newline=False)
     if self._reg_type == "huber":
         print(" (gamma = %g)" % (self._reg_huber_gamma))
     else:
         print("")
     ph.print_info("Strategy for parameter update: %s" % (self._alg_type))
     ph.print_info("Regularization parameter alpha: %g" % (self._alpha))
     if self._data_loss in ["huber"]:
         ph.print_info("Loss function: %s (gamma = %g)" %
                       (self._data_loss, self._huber_gamma))
     else:
         ph.print_info("Loss function: %s" % (self._data_loss))
     ph.print_info("Number of Primal-Dual iterations: %d" %
                   (self._iterations))
     ph.print_info("Minimizer: %s" % (self._minimizer))
     ph.print_info("Maximum number of iterations: %d" % (self._iter_max))
    def _run(self):

        ph.print_title("Volume-to-Volume Registration")

        self._registration_method.set_moving(self._reference)

        for i in range(0, len(self._stacks)):
            txt = "Volume-to-Volume Registration -- " \
                "Stack %d/%d" % (i + 1, len(self._stacks))
            if self._verbose:
                ph.print_subtitle(txt)
            else:
                ph.print_info(txt)

            self._registration_method.set_fixed(self._stacks[i])
            self._registration_method.run()

            # Update position of stack
            transform_sitk = \
                self._registration_method.\
                get_registration_transform_sitk()
            self._stacks[i].update_motion_correction(transform_sitk)
Пример #10
0
    def _run(self, debug=0):
        ph.print_title(
            "Hierarchical SliceSet2V-Registration")

        N_stacks = len(self._stacks)

        self._registration_method.set_moving(self._reference)

        for i_stack, stack in enumerate(self._stacks):
            n_slices = stack.get_number_of_slices()
            for i in range(self._interleave):
                package = list(np.arange(i, n_slices, self._interleave))
                if len(package) / 2 >= self._min_slices:
                    indices_splits = self._recursive_split(
                        package, [], self._min_slices)
                else:
                    indices_splits = [package]

                prefix = "Hierarchical S2V-Reg: " \
                    "Stack %d/%d (%s) -- Interleave %d/%d --" % (
                        i_stack + 1, len(self._stacks), stack.get_filename(),
                        i + 1, self._interleave,
                    )
                if debug:
                    ph.print_subtitle(
                        "%s %d splits: %s" % (
                            prefix, len(indices_splits), indices_splits),
                    )

                ss2vreg = SliceSetToVolumeRegistration(
                    print_prefix=prefix,
                    stack=stack,
                    reference=self._reference,
                    registration_method=self._registration_method,
                    slice_set_indices=indices_splits,
                    verbose=self._verbose,
                )
                ss2vreg.run()
Пример #11
0
    def _run(self, debug=1):

        stack = self._stacks[0]
        slices = stack.get_slices()
        for i, indices in enumerate(self._slice_set_indices):
            txt = "%s Split %d/%d -- Slices %s" % (
                self._print_prefix, i + 1,
                len(self._slice_set_indices), str(indices))
            if self._verbose:
                ph.print_subtitle(txt)
            else:
                ph.print_info(txt)

                image = self._get_stack_subgroup(indices)

                if debug:
                    first = np.linalg.norm(
                        stack.get_slice(indices[0]).sitk.GetOrigin() -
                        np.array(image.sitk[:, :, 0:1].GetOrigin()))
                    last = np.linalg.norm(
                        stack.get_slice(indices[-1]).sitk.GetOrigin() -
                        np.array(image.sitk[:, :, -1:].GetOrigin()))
                    if first > 1e-6:
                        raise RuntimeError(
                            "Hierarchical S2V: first slice position flawed")
                    if last > 1e-6:
                        raise RuntimeError(
                            "Hierarchical S2V: last slice position flawed")

                self._registration_method.set_fixed(image)
                self._registration_method.run()
                transform_sitk = self._registration_method.\
                    get_registration_transform_sitk()

                for j in indices:
                    slices[j].update_motion_correction(transform_sitk)
Пример #12
0
    def _run(self):

        ph.print_title("Two-step S2V-Registration and SRR Reconstruction")

        s2vreg = SliceToVolumeRegistration(
            stacks=self._stacks,
            reference=self._reference,
            registration_method=self._registration_method,
            verbose=False,
            interleave=self._interleave,
        )

        reference = self._reference

        for cycle in range(0, self._cycles):

            if cycle == 0 and self._use_hierarchical_registration:
                hs2vreg = HieararchicalSliceSetRegistration(
                    stacks=self._stacks,
                    reference=reference,
                    registration_method=self._registration_method,
                    interleave=self._interleave,
                    viewer=self._viewer,
                    min_slices=1,
                    verbose=False,
                )
                hs2vreg.run()
                self._computational_time_registration += \
                    hs2vreg.get_computational_time()
            else:
                # Slice-to-volume registration step
                s2vreg.set_reference(reference)
                s2vreg.set_print_prefix("Cycle %d/%d: " %
                                        (cycle + 1, self._cycles))
                s2vreg.run()

            self._computational_time_registration += \
                s2vreg.get_computational_time()

            # Reject misregistered slices
            if self._outlier_rejection:
                ph.print_subtitle("Slice Outlier Rejection (%s < %g)" % (
                    self._threshold_measure, self._thresholds[cycle]))
                outlier_rejector = outre.OutlierRejector(
                    stacks=self._stacks,
                    reference=self._reference,
                    threshold=self._thresholds[cycle],
                    measure=self._threshold_measure,
                    verbose=True,
                )
                outlier_rejector.run()
                self._reconstruction_method.set_stacks(
                    outlier_rejector.get_stacks())

                if len(self._stacks) == 0:
                    raise RuntimeError(
                        "All slices of all stacks were rejected "
                        "as outliers. Volumetric reconstruction is aborted.")

            # SRR step
            if cycle < self._cycles - 1:
                # ---------------- Perform Image Reconstruction ---------------
                ph.print_subtitle("Volumetric Image Reconstruction")
                if isinstance(
                    self._reconstruction_method,
                    sda.ScatteredDataApproximation
                ):
                    self._reconstruction_method.set_sigma(self._alphas[cycle])
                else:
                    self._reconstruction_method.set_alpha(self._alphas[cycle])
                self._reconstruction_method.run()

                self._computational_time_reconstruction += \
                    self._reconstruction_method.get_computational_time()

                reference = self._reconstruction_method.get_reconstruction()

                # ------------------ Perform Image Mask SDA -------------------
                ph.print_subtitle("Volumetric Image Mask Reconstruction")
                SDA = sda.ScatteredDataApproximation(
                    self._stacks,
                    reference,
                    sigma=self._sigma_sda_mask,
                    sda_mask=True,
                )
                SDA.run()

                # reference contains updated mask based on SDA
                reference = SDA.get_reconstruction()

                # -------------------- Store Reconstruction -------------------
                filename = "Iter%d_%s" % (
                    cycle + 1,
                    self._reconstruction_method.get_setting_specific_filename()
                )
                self._reconstructions.insert(0, st.Stack.from_stack(
                    reference, filename=filename))

                if self._verbose:
                    sitkh.show_stacks(self._reconstructions,
                                      segmentation=self._reference,
                                      viewer=self._viewer)
    def _run(self):

        ph.print_title("Two-step S2V-Registration and SRR Reconstruction")

        # Use linear spacing for alphas excluding the last alpha reserved
        # for the final SRR step
        alphas = np.linspace(self._alpha_range[0], self._alpha_range[1],
                             self._cycles)
        alphas = alphas[0:self._cycles]

        thresholds = np.linspace(self._threshold_range[0],
                                 self._threshold_range[1], self._cycles)
        thresholds = thresholds[0:self._cycles]

        s2vreg = SliceToVolumeRegistration(
            stacks=self._stacks,
            reference=self._reference,
            registration_method=self._registration_method,
            verbose=self._verbose,
            threshold_measure=self._threshold_measure,
            interleave=self._interleave,
        )

        reference = self._reference

        for cycle in range(0, self._cycles):

            # Slice-to-volume registration step
            s2vreg.set_reference(reference)
            s2vreg.set_print_prefix("Cycle %d/%d: " %
                                    (cycle + 1, self._cycles))
            if self._outlier_rejection:
                s2vreg.set_threshold(thresholds[cycle])
            if self._use_robust_registration and cycle == 0:
                s2vreg.set_s2v_smoothing(self._s2v_smoothing)
            else:
                s2vreg.set_s2v_smoothing(None)
            s2vreg.run()

            self._computational_time_registration += \
                s2vreg.get_computational_time()

            # SRR step
            if cycle < self._cycles - 1:
                # ---------------- Perform Image Reconstruction ---------------
                ph.print_subtitle("Volumetric Image Reconstruction")
                if isinstance(self._reconstruction_method,
                              sda.ScatteredDataApproximation):
                    self._reconstruction_method.set_sigma(alphas[cycle])
                else:
                    self._reconstruction_method.set_alpha(alphas[cycle])
                self._reconstruction_method.run()

                self._computational_time_reconstruction += \
                    self._reconstruction_method.get_computational_time()

                reference = self._reconstruction_method.get_reconstruction()

                # ------------------ Perform Image Mask SDA -------------------
                ph.print_subtitle("Volumetric Image Mask Reconstruction")
                SDA = sda.ScatteredDataApproximation(
                    self._stacks,
                    reference,
                    sigma=self._sigma_sda_mask,
                    sda_mask=True,
                )
                SDA.run()

                # reference contains updated mask based on SDA
                reference = SDA.get_reconstruction()

                # -------------------- Store Reconstruction -------------------
                filename = "Iter%d_%s" % (cycle + 1,
                                          self._reconstruction_method.
                                          get_setting_specific_filename())
                self._reconstructions.insert(
                    0, st.Stack.from_stack(reference, filename=filename))

                if self._verbose:
                    sitkh.show_stacks(self._reconstructions,
                                      segmentation=self._reference,
                                      viewer=self._viewer)
Пример #14
0
def main():

    # Set print options
    np.set_printoptions(precision=3)
    pd.set_option('display.width', 1000)

    input_parser = InputArgparser(description=".", )
    input_parser.add_filenames(required=True)
    input_parser.add_reference(required=True)
    input_parser.add_reference_mask()
    input_parser.add_dir_output(required=False)
    input_parser.add_measures(
        default=["PSNR", "RMSE", "MAE", "SSIM", "NCC", "NMI"])
    input_parser.add_verbose(default=0)
    args = input_parser.parse_args()
    input_parser.print_arguments(args)

    ph.print_title("Image similarity")
    data_reader = dr.MultipleImagesReader(args.filenames)
    data_reader.read_data()
    stacks = data_reader.get_data()

    reference = st.Stack.from_filename(args.reference, args.reference_mask)

    for stack in stacks:
        try:
            stack.sitk - reference.sitk
        except RuntimeError as e:
            raise IOError(
                "All provided images must be at the same image space")

    x_ref = sitk.GetArrayFromImage(reference.sitk)

    if args.reference_mask is None:
        indices = np.where(x_ref != np.inf)
    else:
        x_ref_mask = sitk.GetArrayFromImage(reference.sitk_mask)
        indices = np.where(x_ref_mask > 0)

    measures_dic = {
        m: lambda x, m=m: SimilarityMeasures.similarity_measures[m]
        (x[indices], x_ref[indices])
        # SimilarityMeasures.similarity_measures[m](x, x_ref)
        for m in args.measures
    }

    observer = obs.Observer()
    observer.set_measures(measures_dic)
    for stack in stacks:
        nda = sitk.GetArrayFromImage(stack.sitk)
        observer.add_x(nda)

    if args.verbose:
        stacks_comparison = [s for s in stacks]
        stacks_comparison.insert(0, reference)
        sitkh.show_stacks(
            stacks_comparison,
            segmentation=reference,
        )

    observer.compute_measures()
    measures = observer.get_measures()

    # Store information in array
    error = np.zeros((len(stacks), len(measures)))
    cols = measures
    rows = []
    for i_stack, stack in enumerate(stacks):
        error[i_stack, :] = np.array([measures[m][i_stack] for m in measures])
        rows.append(stack.get_filename())

    header = "# Ref: %s, Ref-Mask: %d, %s \n" % (
        reference.get_filename(),
        args.reference_mask is None,
        ph.get_time_stamp(),
    )
    header += "# %s\n" % ("\t").join(measures)

    path_to_file_filenames = os.path.join(args.dir_output, "filenames.txt")
    path_to_file_similarities = os.path.join(args.dir_output,
                                             "similarities.txt")

    # Write to files
    ph.write_to_file(path_to_file_similarities, header)
    ph.write_array_to_file(path_to_file_similarities, error, verbose=False)
    text = header
    text += "%s\n" % "\n".join(rows)
    ph.write_to_file(path_to_file_filenames, text)

    # Print to screen
    ph.print_subtitle("Computed Similarities")
    df = pd.DataFrame(error, rows, cols)
    print(df)

    return 0
    def _run(self):

        ph.print_title("Slice-to-Volume Registration")

        self._registration_method.set_moving(self._reference)

        for i, stack in enumerate(self._stacks):
            slices = stack.get_slices()
            transforms_sitk = [None] * len(slices)

            for j, slice_j in enumerate(slices):

                txt = "%sSlice-to-Volume Registration -- " \
                    "Stack %d/%d -- Slice %d/%d" % (
                        self._print_prefix,
                        i + 1, len(self._stacks),
                        j + 1, len(slices))
                if self._verbose:
                    ph.print_subtitle(txt)
                else:
                    ph.print_info(txt)

                self._registration_method.set_fixed(slice_j)
                self._registration_method.run()

                # Store information on registration transform
                transform_sitk = \
                    self._registration_method.\
                    get_registration_transform_sitk()
                transforms_sitk[j] = transform_sitk

            # Avoid slice misregistrations
            if self._s2v_smoothing is not None:
                ph.print_subtitle("Robust slice motion estimation "
                                  "(GP smoothing = %g, interleave = %d)" %
                                  (self._s2v_smoothing, self._interleave))
                robust_motion_estimator = rme.RobustMotionEstimator(
                    transforms_sitk=transforms_sitk,
                    interleave=self._interleave)
                robust_motion_estimator.run_gaussian_process_smoothing(
                    self._s2v_smoothing)
                transforms_sitk = \
                    robust_motion_estimator.get_robust_transforms_sitk()

                # Export figures
                # title = "%s_Stack%d%s" % (
                #     self._print_prefix, i, stack.get_filename())
                # title = ph.replace_string_for_print(title)
                # robust_motion_estimator.show_estimated_transform_parameters(
                #     dir_output="/tmp/fetal_brain/figs", title=title)

            # dir_output = "/tmp/fetal/figs"
            # motion_evaluator = me.MotionEvaluator(transforms_sitk)
            # motion_evaluator.run()
            # motion_evaluator.display(dir_output=dir_output, title=title)
            # motion_evaluator.show(dir_output=dir_output, title=title)

            # Update position of slice
            for j, slice_j in enumerate(slices):
                slice_j.update_motion_correction(transforms_sitk[j])

        # Reject misregistered slices
        if self._threshold is not None:
            ph.print_subtitle("Slice Outlier Rejection (%s < %g)" %
                              (self._threshold_measure, self._threshold))
            outlier_rejector = outre.OutlierRejector(
                stacks=self._stacks,
                reference=self._reference,
                threshold=self._threshold,
                measure=self._threshold_measure,
                verbose=True,
            )
            outlier_rejector.run()
            self._stacks = outlier_rejector.get_stacks()

            if len(self._stacks) == 0:
                raise RuntimeError(
                    "All slices of all stacks were rejected "
                    "as outliers. Volumetric reconstruction is aborted.")
Пример #16
0
def main():

    input_parser = InputArgparser(
        description="Script to export a side-by-side comparison of originally "
        "acquired and simulated/projected slice given the estimated "
        "volumetric reconstruction."
        "This function takes the result of "
        "simulate_stacks_from_reconstruction.py as input.", )
    input_parser.add_filenames(required=True)
    input_parser.add_dir_output(required=True)
    input_parser.add_option(
        option_string="--prefix-simulated",
        type=str,
        help="Specify the prefix of the simulated stacks to distinguish them "
        "from the original data.",
        default="Simulated_",
    )
    input_parser.add_option(
        option_string="--dir-input-simulated",
        type=str,
        help="Specify the directory where the simulated stacks are. "
        "If not given, it is assumed that they are in the same directory "
        "as the original ones.",
        default=None)
    input_parser.add_option(
        option_string="--resize",
        type=float,
        help="Factor to resize images (otherwise they might be very small "
        "depending on the FOV)",
        default=3)

    args = input_parser.parse_args()
    input_parser.print_arguments(args)

    # --------------------------------Read Data--------------------------------
    ph.print_title("Read Data")

    # Read original data
    filenames_original = args.filenames
    data_reader = dr.MultipleImagesReader(filenames_original)
    data_reader.read_data()
    stacks_original = data_reader.get_data()

    # Read data simulated from obtained reconstruction
    if args.dir_input_simulated is None:
        dir_input_simulated = os.path.dirname(filenames_original[0])
    else:
        dir_input_simulated = args.dir_input_simulated
    filenames_simulated = [
        os.path.join("%s", "%s%s") %
        (dir_input_simulated, args.prefix_simulated, os.path.basename(f))
        for f in filenames_original
    ]
    data_reader = dr.MultipleImagesReader(filenames_simulated)
    data_reader.read_data()
    stacks_simulated = data_reader.get_data()

    ph.create_directory(args.dir_output)

    for i in range(len(stacks_original)):
        try:
            stacks_original[i].sitk - stacks_simulated[i].sitk
        except:
            raise IOError(
                "Images '%s' and '%s' do not occupy the same space!" %
                (filenames_original[i], filenames_simulated[i]))

    # ---------------------Create side-by-side comparisons---------------------
    ph.print_title("Create side-by-side comparisons")
    intensity_max = 255
    intensity_min = 0
    for i in range(len(stacks_original)):
        ph.print_subtitle("Stack %d/%d" % (i + 1, len(stacks_original)))
        nda_3D_original = sitk.GetArrayFromImage(stacks_original[i].sitk)
        nda_3D_simulated = sitk.GetArrayFromImage(stacks_simulated[i].sitk)

        # Scale uniformly between 0 and 255 according to the simulated stack
        # for export to png
        scale = np.max(nda_3D_simulated)
        nda_3D_original = intensity_max * nda_3D_original / scale
        nda_3D_simulated = intensity_max * nda_3D_simulated / scale

        nda_3D_simulated = np.clip(nda_3D_simulated, intensity_min,
                                   intensity_max)
        nda_3D_original = np.clip(nda_3D_original, intensity_min,
                                  intensity_max)

        filename = stacks_original[i].get_filename()
        path_to_file = os.path.join(args.dir_output, "%s.pdf" % filename)

        # Export side-by-side comparison of each stack to a pdf file
        export_comparison_to_file(nda_3D_original,
                                  nda_3D_simulated,
                                  path_to_file,
                                  resize=args.resize)
Пример #17
0
def main():

    time_start = ph.start_timing()

    # Read input
    parser = argparse.ArgumentParser(
        description="Perform rigid registration using landmarks",
        prog=None,
        epilog="Author: Michael Ebner ([email protected])",
    )
    parser.add_argument(
        "-f",
        "--fixed",
        help="Path to fixed image landmarks.",
        type=str,
        required=1,
    )
    parser.add_argument(
        "-m",
        "--moving",
        help="Path to moving image landmarks.",
        type=str,
        required=1,
    )
    parser.add_argument(
        "-o",
        "--output",
        help="Path for obtained SimpleITK registration transform (.txt)",
        type=str,
        required=1,
    )
    parser.add_argument(
        "-v",
        "--verbose",
        help="Turn on/off verbose output",
        type=int,
        required=0,
        default=0,
    )
    parser.add_argument(
        "--pca",
        "-pca",
        action="store_true",
        help="If given, principal component analysis (PCA) is used "
        "to test various initializations for the point based registrations.")

    args = parser.parse_args()

    landmarks_fixed_nda = dr.DataReader.read_landmarks(args.fixed)
    landmarks_moving_nda = dr.DataReader.read_landmarks(args.moving)

    if args.pca:
        ph.print_subtitle("Use PCA to initialize registrations")
        pca_fixed = pca.PrincipalComponentAnalysis(landmarks_fixed_nda)
        pca_fixed.run()
        eigvec_fixed = pca_fixed.get_eigvec()
        mean_fixed = pca_fixed.get_mean()

        pca_moving = pca.PrincipalComponentAnalysis(landmarks_moving_nda)
        pca_moving.run()
        eigvec_moving = pca_moving.get_eigvec()
        mean_moving = pca_moving.get_mean()

        # test different initializations based on eigenvector orientations
        orientations = [
            [1, 1],
            [1, -1],
            [-1, 1],
            [-1, -1],
        ]
        error = np.inf
        for i_o, orientation in enumerate(orientations):
            eigvec_moving_o = np.array(eigvec_moving)
            eigvec_moving_o[:, 0] *= orientation[0]
            eigvec_moving_o[:, 1] *= orientation[1]

            # get right-handed coordinate system
            cross = np.cross(eigvec_moving_o[:, 0], eigvec_moving_o[:, 1])
            eigvec_moving_o[:, 2] = cross

            # transformation to align fixed with moving eigenbasis
            R = eigvec_moving_o.dot(eigvec_fixed.transpose())
            t = mean_moving - R.dot(mean_fixed)

            ph.print_info(
                "Registration based on PCA eigenvector initialization "
                "%d/%d ... " % (i_o + 1, len(orientations)),
                newline=False)
            reg = pycpd.rigid_registration(
                **{
                    "Y": landmarks_fixed_nda,
                    "X": landmarks_moving_nda,
                    "max_iterations": 100,
                    "R": R,
                    "t": t,
                })
            reg.register()
            params = reg.get_registration_parameters()
            scale, R, t = params
            error_o = reg.err
            print("done. Error: %.2f" % error_o)
            if error_o < error:
                error = error_o
                rotation_matrix_nda = np.array(R)
                translation_nda = np.array(t)
                ph.print_info("Currently best estimate")

    else:
        reg = pycpd.rigid_registration(
            **{
                "Y": landmarks_fixed_nda,
                "X": landmarks_moving_nda,
                "max_iterations": 100,
            })
        if args.verbose:
            fig = plt.figure()
            ax = fig.add_subplot(111, projection='3d')
            callback = partial(visualize, ax=ax)
        else:
            callback = None
        ph.print_info("Registration ... ", newline=False)
        reg.register(callback)
        if args.verbose:
            plt.show(block=False)
        # reg.register()
        scale, R, t = reg.get_registration_parameters()
        rotation_matrix_nda = R
        translation_nda = t
        print("done. Error: %.2f" % reg.err)

    rigid_transform_sitk = sitk.Euler3DTransform()
    rigid_transform_sitk.SetMatrix(rotation_matrix_nda.flatten())
    rigid_transform_sitk.SetTranslation(translation_nda)

    dw.DataWriter.write_transform(rigid_transform_sitk,
                                  args.output,
                                  verbose=True)

    elapsed_time_total = ph.stop_timing(time_start)
    ph.print_info("Computational Time: %s" % elapsed_time_total)

    return 0
Пример #18
0
    def _run(self):

        ph.print_title("Slice-to-Volume Registration")

        self._registration_method.set_moving(self._reference)

        for i, stack in enumerate(self._stacks):
            slices = stack.get_slices()

            transforms_sitk = {}

            for j, slice_j in enumerate(slices):

                txt = "%sSlice-to-Volume Registration -- " \
                    "Stack %d/%d (%s) -- Slice %d/%d" % (
                        self._print_prefix,
                        i + 1, len(self._stacks), stack.get_filename(),
                        j + 1, len(slices))
                if self._verbose:
                    ph.print_subtitle(txt)
                else:
                    ph.print_info(txt)

                self._registration_method.set_fixed(slice_j)
                self._registration_method.run()

                # Store information on registration transform
                transform_sitk = \
                    self._registration_method.get_registration_transform_sitk()
                transforms_sitk[slice_j.get_slice_number()] = transform_sitk

            # Avoid slice misregistrations
            if self._s2v_smoothing is not None:
                # import os
                # for slice_number in transforms_sitk.keys():
                #     path_to_file = os.path.join(
                #         "/tmp/fetal_brain", "%s_slice%d.tfm" % (
                #             stack.get_filename(), slice_number))
                #     sitk.WriteTransform(
                #         transforms_sitk[slice_number], path_to_file)
                ph.print_subtitle("Robust slice motion estimation "
                                  "(GP smoothing = %g, interleave = %d)" %
                                  (self._s2v_smoothing, self._interleave))
                robust_motion_estimator = rme.RobustMotionEstimator(
                    transforms_sitk=transforms_sitk,
                    interleave=self._interleave)
                robust_motion_estimator.run(self._s2v_smoothing)
                transforms_sitk = \
                    robust_motion_estimator.get_robust_transforms_sitk()

                # Update position of slice
                for slice in slices:
                    slice_number = slice.get_slice_number()
                    slice.update_motion_correction(
                        transforms_sitk[slice_number])

                # Run s2v-reg again
                for j, slice_j in enumerate(slices):
                    txt = "%sSlice-to-Volume Registration -- " \
                        "Stack %d/%d -- Slice %d/%d (after GP init)" % (
                            self._print_prefix,
                            i + 1, len(self._stacks),
                            j + 1, len(slices))
                    if self._verbose:
                        ph.print_subtitle(txt)
                    else:
                        ph.print_info(txt)

                    self._registration_method.set_fixed(slice_j)
                    self._registration_method.run()

                    # Store information on registration transform
                    transform_sitk = \
                        self._registration_method.get_registration_transform_sitk()
                    transforms_sitk[
                        slice_j.get_slice_number()] = transform_sitk

                # Export figures
                # title = "%s_Stack%d%s" % (
                #     self._print_prefix, i, stack.get_filename())
                # title = ph.replace_string_for_print(title)
                # robust_motion_estimator.show_estimated_transform_parameters(
                #     dir_output="/tmp/fetal_brain/figs", title=title)

            # dir_output = "/tmp/fetal/figs"
            # motion_evaluator = me.MotionEvaluator(transforms_sitk)
            # motion_evaluator.run()
            # motion_evaluator.display(dir_output=dir_output, title=title)
            # motion_evaluator.show(dir_output=dir_output, title=title)

            # Update position of slice
            for slice in slices:
                slice_number = slice.get_slice_number()
                slice.update_motion_correction(transforms_sitk[slice_number])
Пример #19
0
def main():

    time_start = ph.start_timing()

    flag_individual_cases_only = 1

    flag_batch_script = 0
    batch_ctr = [32]

    flag_correct_bias_field = 0
    # flag_correct_intensities = 0

    flag_collect_segmentations = 0
    flag_select_images_segmentations = 0

    flag_reconstruct_volume_subject_space = 0
    flag_reconstruct_volume_subject_space_irtk = 0
    flag_reconstruct_volume_subject_space_show_comparison = 0
    flag_register_to_template = 0
    flag_register_to_template_irtk = 0
    flag_show_srr_template_space = 0
    flag_reconstruct_volume_template_space = 0
    flag_collect_volumetric_reconstruction_results = 0
    flag_show_volumetric_reconstruction_results = 0

    flag_rsync_stuff = 0

    # Analysis
    flag_remove_failed_cases_for_analysis = 1
    flag_postop = 2  # 0... preop, 1...postop, 2... pre+postop

    flag_evaluate_image_similarities = 0
    flag_analyse_image_similarities = 1

    flag_evaluate_slice_residual_similarities = 0
    flag_analyse_slice_residual_similarities = 0

    flag_analyse_stacks = 0
    flag_analyse_qualitative_assessment = 0

    flag_collect_data_blinded_analysis = 0
    flag_anonymize_data_blinded_analysis = 0

    provide_comparison = 0
    intensity_correction = 1
    isotropic_resolution = 0.75
    alpha = 0.02
    outlier_rejection = 1
    threshold = 0.7
    threshold_first = 0.6

    # metric = "ANTSNeighborhoodCorrelation"
    # metric_radius = 5
    # multiresolution = 0

    prefix_srr = "srr_"
    prefix_srr_qa = "masked_"

    # ----------------------------------Set Up---------------------------------
    if flag_correct_bias_field:
        dir_batch = os.path.join(utils.DIR_BATCH_ROOT, "BiasFieldCorrection")
    elif flag_reconstruct_volume_subject_space:
        dir_batch = os.path.join(utils.DIR_BATCH_ROOT,
                                 "VolumetricReconstructionSubjectSpace")
    elif flag_register_to_template:
        dir_batch = os.path.join(utils.DIR_BATCH_ROOT,
                                 "VolumetricReconstructionRegisterToTemplate")
    elif flag_reconstruct_volume_template_space:
        dir_batch = os.path.join(utils.DIR_BATCH_ROOT,
                                 "VolumetricReconstructionTemplateSpace")
    else:
        dir_batch = os.path.join(utils.DIR_BATCH_ROOT, "foo")
    file_prefix_batch = os.path.join(dir_batch, "command")

    if flag_batch_script:
        verbose = 0
    else:
        verbose = 1

    data_reader = dr.ExcelSheetDataReader(utils.EXCEL_FILE)
    data_reader.read_data()
    cases = data_reader.get_data()

    if flag_analyse_qualitative_assessment:
        data_reader = dr.ExcelSheetQualitativeAssessmentReader(utils.QA_FILE)
        data_reader.read_data()
        qualitative_assessment = data_reader.get_data()

        statistical_evaluation = se.StatisticalEvaluation(
            qualitative_assessment)
        statistical_evaluation.run_tests(ref="seg_manual")
        ph.exit()

    cases_similarities = []
    cases_stacks = []

    if flag_individual_cases_only:
        N_cases = len(INDIVIDUAL_CASE_IDS)
    else:
        N_cases = len(cases.keys())

    i_case = 0
    for case_id in sorted(cases.keys()):
        if flag_individual_cases_only and case_id not in INDIVIDUAL_CASE_IDS:
            continue
        if not flag_analyse_image_similarities and \
                not flag_analyse_slice_residual_similarities:
            i_case += 1
            ph.print_title("%d/%d: %s" % (i_case, N_cases, case_id))

        if flag_rsync_stuff:
            dir_output = utils.get_directory_case_recon_seg_mode(
                case_id=case_id, recon_space="template_space", seg_mode="")

            dir_input = re.sub("Volumes/spina/",
                               "Volumes/medic-volumetric_res/SpinaBifida/",
                               dir_output)
            cmd = "rsync -avuhn --exclude 'motion_correction' %sseg_manual %s" % (
                dir_input, dir_output)
            ph.print_execution(cmd)
            # ph.execute_command(cmd)

        # -------------------------Correct Bias Field--------------------------
        if flag_correct_bias_field:
            filenames = utils.get_filenames_preprocessing_bias_field(case_id)
            paths_to_filenames = [
                os.path.join(utils.get_directory_case_original(case_id), f)
                for f in filenames
            ]
            dir_output = utils.get_directory_case_preprocessing(
                case_id, stage="01_N4ITK")

            # no image found matching the pattern
            if len(paths_to_filenames) == 0:
                continue

            cmd_args = []
            cmd_args.append("--filenames %s" % " ".join(paths_to_filenames))
            cmd_args.append("--dir-output %s" % dir_output)
            cmd_args.append("--prefix-output ''")
            cmd = "niftymic_correct_bias_field %s" % (" ").join(cmd_args)

            ph.execute_command(cmd,
                               flag_print_to_file=flag_batch_script,
                               path_to_file="%s%d.txt" %
                               (file_prefix_batch, ph.add_one(batch_ctr)))

        # # Skip case in case segmentations have not been provided yet
        # if not ph.directory_exists(utils.get_directory_case_segmentation(
        #         case_id, utils.SEGMENTATION_INIT, SEG_MODES[0])):
        #     continue

        # ------------------------Collect Segmentations------------------------
        if flag_collect_segmentations:
            # Skip case in case segmentations have been collected already
            if ph.directory_exists(
                    utils.get_directory_case_segmentation(
                        case_id, utils.SEGMENTATION_SELECTED, SEG_MODES[0])):
                ph.print_info("skipped")
                continue

            filenames = utils.get_segmented_image_filenames(
                case_id, subfolder=utils.SEGMENTATION_INIT)

            for i_seg_mode, seg_mode in enumerate(SEG_MODES):
                directory_selected = utils.get_directory_case_segmentation(
                    case_id, utils.SEGMENTATION_SELECTED, seg_mode)
                ph.create_directory(directory_selected)
                paths_to_filenames_init = [
                    os.path.join(
                        utils.get_directory_case_segmentation(
                            case_id, utils.SEGMENTATION_INIT, seg_mode), f)
                    for f in filenames
                ]
                paths_to_filenames_selected = [
                    os.path.join(directory_selected, f) for f in filenames
                ]
                for i in range(len(filenames)):
                    cmd = "cp -p %s %s" % (paths_to_filenames_init[i],
                                           paths_to_filenames_selected[i])
                    # ph.print_execution(cmd)
                    ph.execute_command(cmd)

        if flag_select_images_segmentations:
            filenames = utils.get_segmented_image_filenames(
                case_id, subfolder=utils.SEGMENTATION_SELECTED)
            paths_to_filenames = [
                os.path.join(
                    utils.get_directory_case_preprocessing(case_id,
                                                           stage="01_N4ITK"),
                    f) for f in filenames
            ]
            paths_to_filenames_masks = [
                os.path.join(
                    utils.get_directory_case_segmentation(
                        case_id, utils.SEGMENTATION_SELECTED, "seg_manual"), f)
                for f in filenames
            ]
            for i in range(len(filenames)):
                ph.show_niftis(
                    [paths_to_filenames[i]],
                    segmentation=paths_to_filenames_masks[i],
                    # viewer="fsleyes",
                )
                ph.pause()
                ph.killall_itksnap()

        # # -------------------------Correct Intensities-----------------------
        # if flag_correct_intensities:
        #     filenames = utils.get_segmented_image_filenames(case_id)
        #     paths_to_filenames_bias = [os.path.join(
        #         utils.get_directory_case_preprocessing(
        #             case_id, stage="01_N4ITK"), f) for f in filenames]
        #     print paths_to_filenames_bias

        # -----------------Reconstruct Volume in Subject Space-----------------
        if flag_reconstruct_volume_subject_space:

            filenames = utils.get_segmented_image_filenames(
                case_id, subfolder=utils.SEGMENTATION_SELECTED)
            # filenames = filenames[0:2]

            paths_to_filenames = [
                os.path.join(
                    utils.get_directory_case_preprocessing(case_id,
                                                           stage="01_N4ITK"),
                    f) for f in filenames
            ]

            # Estimate target stack
            target_stack_index = utils.get_target_stack_index(
                case_id, utils.SEGMENTATION_SELECTED, "seg_auto", filenames)

            for i, seg_mode in enumerate(SEG_MODES):
                # Get mask filenames
                paths_to_filenames_masks = [
                    os.path.join(
                        utils.get_directory_case_segmentation(
                            case_id, utils.SEGMENTATION_SELECTED, seg_mode), f)
                    for f in filenames
                ]

                if flag_reconstruct_volume_subject_space_irtk:
                    if seg_mode != "seg_manual":
                        continue
                    utils.export_irtk_call_to_workstation(
                        case_id=case_id,
                        filenames=filenames,
                        seg_mode=seg_mode,
                        isotropic_resolution=isotropic_resolution,
                        target_stack_index=target_stack_index,
                        kernel_mask_dilation=(15, 15, 4))

                else:
                    dir_output = utils.get_directory_case_recon_seg_mode(
                        case_id=case_id,
                        recon_space="subject_space",
                        seg_mode=seg_mode)
                    # dir_output = "/tmp/foo"

                    cmd_args = []
                    cmd_args.append("--filenames %s" %
                                    " ".join(paths_to_filenames))
                    cmd_args.append("--filenames-masks %s" %
                                    " ".join(paths_to_filenames_masks))
                    cmd_args.append("--dir-output %s" % dir_output)
                    cmd_args.append("--use-masks-srr 0")
                    cmd_args.append("--isotropic-resolution %f" %
                                    isotropic_resolution)
                    cmd_args.append("--target-stack-index %d" %
                                    target_stack_index)
                    cmd_args.append("--intensity-correction %d" %
                                    intensity_correction)
                    cmd_args.append("--outlier-rejection %d" %
                                    outlier_rejection)
                    cmd_args.append("--threshold-first %f" % threshold_first)
                    cmd_args.append("--threshold %f" % threshold)
                    # cmd_args.append("--metric %s" % metric)
                    # cmd_args.append("--multiresolution %d" % multiresolution)
                    # cmd_args.append("--metric-radius %s" % metric_radius)
                    # if i > 0:
                    #     cmd_args.append("--reconstruction-space %s" % (
                    #         utils.get_path_to_recon(
                    #             utils.get_directory_case_recon_seg_mode(
                    #                 case_id, "seg_manual"))))
                    # cmd_args.append("--two-step-cycles 0")
                    cmd_args.append("--verbose %d" % verbose)
                    cmd_args.append("--provide-comparison %d" %
                                    provide_comparison)
                    # cmd_args.append("--iter-max 1")

                    cmd = "niftymic_reconstruct_volume %s" % (
                        " ").join(cmd_args)

                    ph.execute_command(
                        cmd,
                        flag_print_to_file=flag_batch_script,
                        path_to_file="%s%d.txt" %
                        (file_prefix_batch, ph.add_one(batch_ctr)))

        if flag_reconstruct_volume_subject_space_show_comparison:
            recon_paths = []
            for seg_mode in SEG_MODES:
                path_to_recon = utils.get_path_to_recon(
                    utils.get_directory_case_recon_seg_mode(
                        case_id=case_id,
                        recon_space="subject_space",
                        seg_mode=seg_mode))
                recon_paths.append(path_to_recon)
            recon_path_irtk = os.path.join(
                utils.get_directory_case_recon_seg_mode(
                    case_id=case_id,
                    recon_space="subject_space",
                    seg_mode="IRTK"), "IRTK_SRR.nii.gz")
            show_modes = list(SEG_MODES)
            if ph.file_exists(recon_path_irtk):
                recon_paths.append(recon_path_irtk)
                show_modes.append("irtk")
            ph.show_niftis(recon_paths)
            ph.print_info("Sequence: %s" % (" -- ").join(show_modes))
            ph.pause()
            ph.killall_itksnap()

        # -------------------------Register to template------------------------
        if flag_register_to_template:
            for seg_mode in SEG_MODES:

                cmd_args = []
                # register seg_auto-recon to template space
                if seg_mode == "seg_auto":

                    path_to_recon = utils.get_path_to_recon(
                        utils.get_directory_case_recon_seg_mode(
                            case_id=case_id,
                            recon_space="subject_space",
                            seg_mode=seg_mode))

                    template_stack_estimator = \
                        tse.TemplateStackEstimator.from_mask(
                            ph.append_to_filename(path_to_recon, "_mask"))
                    path_to_reference = \
                        template_stack_estimator.get_path_to_template()

                    dir_input_motion_correction = os.path.join(
                        utils.get_directory_case_recon_seg_mode(
                            case_id=case_id,
                            recon_space="subject_space",
                            seg_mode=seg_mode), "motion_correction")

                    dir_output = utils.get_directory_case_recon_seg_mode(
                        case_id=case_id,
                        recon_space="template_space",
                        seg_mode=seg_mode)
                    # dir_output = "/home/mebner/tmp"
                    # # ------- DELETE -----
                    # dir_output = re.sub("data", "foo+1", dir_output)
                    # dir_output = re.sub(
                    #     "volumetric_reconstruction/20180126/template_space/seg_auto",
                    #     "", dir_output)
                    # # -------
                    # cmd_args.append("--use-fixed-mask 1")
                    cmd_args.append("--use-moving-mask 1")

                    # HACK
                    path_to_initial_transform = os.path.join(
                        utils.DIR_INPUT_ROOT_DATA, case_id,
                        "volumetric_reconstruction", "20180126",
                        "template_space", "seg_manual",
                        "registration_transform_sitk.txt")
                    cmd_args.append("--initial-transform %s" %
                                    path_to_initial_transform)
                    cmd_args.append("--use-flirt 0")
                    cmd_args.append("--use-regaladin 1")
                    cmd_args.append("--test-ap-flip 0")

                # register remaining recons to registered seg_auto-recon
                else:
                    path_to_reference = utils.get_path_to_recon(
                        utils.get_directory_case_recon_seg_mode(
                            case_id=case_id,
                            recon_space="template_space",
                            seg_mode="seg_auto"),
                        suffix="ResamplingToTemplateSpace",
                    )
                    path_to_initial_transform = os.path.join(
                        utils.get_directory_case_recon_seg_mode(
                            case_id=case_id,
                            recon_space="template_space",
                            seg_mode="seg_auto"),
                        "registration_transform_sitk.txt")

                    path_to_recon = utils.get_path_to_recon(
                        utils.get_directory_case_recon_seg_mode(
                            case_id=case_id,
                            recon_space="subject_space",
                            seg_mode=seg_mode))
                    dir_input_motion_correction = os.path.join(
                        utils.get_directory_case_recon_seg_mode(
                            case_id=case_id,
                            recon_space="subject_space",
                            seg_mode=seg_mode), "motion_correction")
                    dir_output = utils.get_directory_case_recon_seg_mode(
                        case_id=case_id,
                        recon_space="template_space",
                        seg_mode=seg_mode)

                    cmd_args.append("--use-fixed-mask 0")
                    cmd_args.append("--use-moving-mask 0")
                    cmd_args.append("--initial-transform %s" %
                                    path_to_initial_transform)
                    cmd_args.append("--use-flirt 0")
                    cmd_args.append("--use-regaladin 1")
                    cmd_args.append("--test-ap-flip 0")

                cmd_args.append("--moving %s" % path_to_recon)
                cmd_args.append("--fixed %s" % path_to_reference)
                cmd_args.append("--dir-input %s" % dir_input_motion_correction)
                cmd_args.append("--dir-output %s" % dir_output)
                cmd_args.append("--write-transform 1")
                cmd_args.append("--verbose %d" % verbose)
                cmd = "niftymic_register_image %s" % (" ").join(cmd_args)

                ph.execute_command(cmd,
                                   flag_print_to_file=flag_batch_script,
                                   path_to_file="%s%d.txt" %
                                   (file_prefix_batch, ph.add_one(batch_ctr)))

        if flag_register_to_template_irtk:
            dir_input = utils.get_directory_case_recon_seg_mode(
                case_id=case_id, recon_space="subject_space", seg_mode="IRTK")
            dir_output = utils.get_directory_case_recon_seg_mode(
                case_id=case_id, recon_space="template_space", seg_mode="IRTK")
            path_to_recon = os.path.join(dir_input, "IRTK_SRR.nii.gz")
            path_to_reference = utils.get_path_to_recon(
                utils.get_directory_case_recon_seg_mode(
                    case_id=case_id,
                    recon_space="template_space",
                    seg_mode="seg_manual"),
                suffix="ResamplingToTemplateSpace",
            )
            path_to_initial_transform = os.path.join(
                utils.get_directory_case_recon_seg_mode(
                    case_id=case_id,
                    recon_space="template_space",
                    seg_mode="seg_manual"), "registration_transform_sitk.txt")

            cmd_args = []
            cmd_args.append("--fixed %s" % path_to_reference)
            cmd_args.append("--moving %s" % path_to_recon)
            cmd_args.append("--initial-transform %s" %
                            path_to_initial_transform)
            cmd_args.append("--use-fixed-mask 0")
            cmd_args.append("--use-moving-mask 0")
            cmd_args.append("--use-flirt 0")
            cmd_args.append("--use-regaladin 1")
            cmd_args.append("--test-ap-flip 0")
            cmd_args.append("--dir-output %s" % dir_output)
            cmd_args.append("--verbose %d" % verbose)
            cmd = "niftymic_register_image %s" % (" ").join(cmd_args)
            ph.execute_command(cmd)

        if flag_show_srr_template_space:
            recon_paths = []
            show_modes = list(SEG_MODES)
            # show_modes.append("IRTK")
            for seg_mode in show_modes:
                dir_input = utils.get_directory_case_recon_seg_mode(
                    case_id=case_id,
                    recon_space="template_space",
                    seg_mode=seg_mode)
                # # ------- DELETE -----
                # dir_input = re.sub("data", "foo+1", dir_input)
                # dir_input = re.sub(
                #     "volumetric_reconstruction/20180126/template_space/seg_auto",
                #     "", dir_input)
                # # -------
                path_to_recon_space = utils.get_path_to_recon(
                    dir_input,
                    suffix="ResamplingToTemplateSpace",
                )
                recon_paths.append(path_to_recon_space)
            ph.show_niftis(recon_paths)
            ph.print_info("Sequence: %s" % (" -- ").join(show_modes))
            ph.pause()
            ph.killall_itksnap()

        # -----------------Reconstruct Volume in Template Space----------------
        if flag_reconstruct_volume_template_space:
            for seg_mode in SEG_MODES:
                path_to_recon_space = utils.get_path_to_recon(
                    utils.get_directory_case_recon_seg_mode(
                        case_id=case_id,
                        recon_space="template_space",
                        seg_mode=seg_mode),
                    suffix="ResamplingToTemplateSpace",
                )
                dir_input = os.path.join(
                    utils.get_directory_case_recon_seg_mode(
                        case_id=case_id,
                        recon_space="template_space",
                        seg_mode=seg_mode), "motion_correction")
                dir_output = utils.get_directory_case_recon_seg_mode(
                    case_id=case_id,
                    recon_space="template_space",
                    seg_mode=seg_mode)
                # dir_output = os.path.join("/tmp/spina/template_space/%s-%s" % (
                #     case_id, seg_mode))

                cmd_args = []
                cmd_args.append("--dir-input %s" % dir_input)
                cmd_args.append("--dir-output %s" % dir_output)
                cmd_args.append("--reconstruction-space %s" %
                                path_to_recon_space)
                cmd_args.append("--alpha %s" % alpha)
                cmd_args.append("--verbose %s" % verbose)
                cmd_args.append("--use-masks-srr 0")

                # cmd_args.append("--minimizer L-BFGS-B")
                # cmd_args.append("--alpha 0.006")
                # cmd_args.append("--reconstruction-type HuberL2")
                # cmd_args.append("--data-loss arctan")
                # cmd_args.append("--iterations 5")
                # cmd_args.append("--data-loss-scale 0.7")

                cmd = "niftymic_reconstruct_volume_from_slices %s" % \
                    (" ").join(cmd_args)
                ph.execute_command(cmd,
                                   flag_print_to_file=flag_batch_script,
                                   path_to_file="%s%d.txt" %
                                   (file_prefix_batch, ph.add_one(batch_ctr)))

        # ----------------Collect SRR results in Template Space----------------
        if flag_collect_volumetric_reconstruction_results:
            directory = utils.get_directory_case_recon_summary(case_id)
            ph.create_directory(directory)

            # clear potentially existing files
            cmd = "rm -f %s/*.nii.gz" % (directory)
            ph.execute_command(cmd)

            # Collect SRRs
            for seg_mode in SEG_MODES:
                path_to_recon_src = utils.get_path_to_recon(
                    utils.get_directory_case_recon_seg_mode(
                        case_id=case_id,
                        recon_space="template_space",
                        seg_mode=seg_mode), )
                path_to_recon = os.path.join(
                    directory, "%s%s.nii.gz" % (prefix_srr, seg_mode))

                cmd = "cp -p %s %s" % (path_to_recon_src, path_to_recon)
                ph.execute_command(cmd)

            # Collect IRTK recon
            path_to_recon_src = os.path.join(
                utils.get_directory_case_recon_seg_mode(
                    case_id=case_id,
                    recon_space="template_space",
                    seg_mode="IRTK"),
                "IRTK_SRR_LinearResamplingToTemplateSpace.nii.gz")

            path_to_recon = os.path.join(directory,
                                         "%s%s.nii.gz" % (prefix_srr, "irtk"))

            cmd = "cp -p %s %s" % (path_to_recon_src, path_to_recon)
            ph.execute_command(cmd)

            # Collect evaluation mask
            path_to_recon = utils.get_path_to_recon(
                utils.get_directory_case_recon_seg_mode(
                    case_id=case_id,
                    recon_space="subject_space",
                    seg_mode="seg_auto"))

            template_stack_estimator = \
                tse.TemplateStackEstimator.from_mask(
                    ph.append_to_filename(path_to_recon, "_mask"))
            path_to_template = \
                template_stack_estimator.get_path_to_template()
            path_to_template_mask_src = ph.append_to_filename(
                path_to_template, "_mask_dil")
            path_to_template_mask = "%s/" % directory

            cmd = "cp -p %s %s" % (path_to_template_mask_src,
                                   path_to_template_mask)
            ph.execute_command(cmd)

        if flag_show_volumetric_reconstruction_results:
            dir_output = utils.get_directory_case_recon_summary(case_id)
            paths_to_recons = []
            for seg_mode in RECON_MODES:
                path_to_recon = os.path.join(
                    dir_output, "%s%s.nii.gz" % (prefix_srr, seg_mode))
                paths_to_recons.append(path_to_recon)
            path_to_mask = "%s/STA*.nii.gz" % dir_output
            cmd = ph.show_niftis(paths_to_recons, segmentation=path_to_mask)
            sitkh.write_executable_file([cmd], dir_output=dir_output)
            ph.pause()
            ph.killall_itksnap()

        # ---------------------Evaluate Image Similarities---------------------
        if flag_evaluate_image_similarities:
            dir_input = utils.get_directory_case_recon_summary(case_id)
            dir_output = utils.get_directory_case_recon_similarities(case_id)
            paths_to_recons = []
            for seg_mode in ["seg_auto", "detect", "irtk"]:
                path_to_recon = os.path.join(
                    dir_input, "%s%s.nii.gz" % (prefix_srr, seg_mode))
                paths_to_recons.append(path_to_recon)
            path_to_reference = os.path.join(
                dir_input, "%s%s.nii.gz" % (prefix_srr, "seg_manual"))
            path_to_reference_mask = utils.get_path_to_mask(dir_input)

            cmd_args = []
            cmd_args.append("--filenames %s" % " ".join(paths_to_recons))
            cmd_args.append("--reference %s" % path_to_reference)
            cmd_args.append("--reference-mask %s" % path_to_reference_mask)
            # cmd_args.append("--verbose 1")
            cmd_args.append("--dir-output %s" % dir_output)

            exe = re.sub("pyc", "py",
                         os.path.abspath(evaluate_image_similarity.__file__))
            cmd_args.insert(0, exe)

            # clear potentially existing files
            cmd = "rm -f %s/*.txt" % (dir_output)
            ph.execute_command(cmd)

            cmd = "python %s" % " ".join(cmd_args)
            ph.execute_command(cmd)

        # -----------------Evaluate Slice Residual Similarities----------------
        if flag_evaluate_slice_residual_similarities:

            path_to_reference_mask = utils.get_path_to_mask(
                utils.get_directory_case_recon_summary(case_id))

            dir_output_root = \
                utils.get_directory_case_slice_residual_similarities(case_id)

            # clear potentially existing files
            # cmd = "rm -f %s/*.txt" % (dir_output_root)
            # ph.execute_command(cmd)

            for seg_mode in SEG_MODES:
                dir_input = os.path.join(
                    utils.get_directory_case_recon_seg_mode(
                        case_id=case_id,
                        recon_space="template_space",
                        seg_mode=seg_mode,
                    ), "motion_correction")
                path_to_reference = os.path.join(
                    utils.get_directory_case_recon_summary(case_id),
                    "%s%s.nii.gz" % (prefix_srr, seg_mode))
                dir_output = os.path.join(dir_output_root, seg_mode)

                cmd_args = []
                cmd_args.append("--dir-input %s" % dir_input)
                cmd_args.append("--reference %s" % path_to_reference)
                cmd_args.append("--reference-mask %s" % path_to_reference_mask)
                cmd_args.append("--use-reference-mask 1")
                cmd_args.append("--use-slice-masks 0")
                # cmd_args.append("--verbose 1")
                cmd_args.append("--dir-output %s" % dir_output)

                exe = re.sub("pyc", "py", os.path.abspath(esrs.__file__))
                cmd_args.insert(0, exe)

                cmd = "python %s" % " ".join(cmd_args)
                ph.execute_command(cmd)

        # Collect data for blinded analysis
        if flag_collect_data_blinded_analysis:
            if flag_remove_failed_cases_for_analysis and case_id in RECON_FAILED_CASE_IDS:
                continue

            dir_input = utils.get_directory_case_recon_summary(case_id)
            # pattern = "STA([0-9]+)[_]mask.nii.gz"
            pattern = "STA([0-9]+)[_]mask_dil.nii.gz"
            p = re.compile(pattern)
            gw = [
                p.match(f).group(1) for f in os.listdir(dir_input)
                if p.match(f)
            ][0]

            dir_output = os.path.join(
                utils.get_directory_blinded_analysis(case_id, "open"), case_id)

            exe = re.sub("pyc", "py", os.path.abspath(mswm.__file__))

            recons = []

            for seg_mode in RECON_MODES:
                path_to_recon = os.path.join(
                    dir_input, "%s%s.nii.gz" % (prefix_srr, seg_mode))

                cmd_args = []
                cmd_args.append("--filename %s" % path_to_recon)
                cmd_args.append("--gestational-age %s" % gw)
                cmd_args.append("--dir-output %s" % dir_output)
                cmd_args.append("--prefix-output %s" % prefix_srr_qa)
                cmd_args.append("--verbose 0")
                cmd_args.insert(0, exe)

                cmd = "python %s" % " ".join(cmd_args)
                # ph.execute_command(cmd)

                recon = "%s%s" % (prefix_srr_qa,
                                  os.path.basename(path_to_recon))
                recons.append(recon)
            ph.write_show_niftis_exe(recons, dir_output)

        if flag_anonymize_data_blinded_analysis:
            dir_input = os.path.join(
                utils.get_directory_blinded_analysis(case_id, "open"), case_id)
            dir_output_dictionaries = utils.get_directory_anonymized_dictionares(
                case_id)
            dir_output_anonymized_images = os.path.join(
                utils.get_directory_blinded_analysis(case_id, "anonymized"),
                case_id)

            if not ph.directory_exists(dir_input):
                continue
            ph.create_directory(dir_output_dictionaries)
            ph.create_directory(dir_output_anonymized_images)

            data_anonymizer = da.DataAnonymizer()
            # Create random dictionary (only required once)
            # data_anonymizer.set_prefix_identifiers("%s_" % case_id)
            # data_anonymizer.read_nifti_filenames_from_directory(dir_input)
            # data_anonymizer.generate_identifiers()
            # data_anonymizer.generate_randomized_dictionary()
            # data_anonymizer.write_dictionary(
            #     dir_output_dictionaries, "dictionary_%s" % case_id)

            # Read dictionary
            data_anonymizer.read_dictionary(dir_output_dictionaries,
                                            "dictionary_%s" % case_id)

            # Anonymize files
            if 0:
                ph.clear_directory(dir_output_anonymized_images)
                data_anonymizer.anonymize_files(dir_input,
                                                dir_output_anonymized_images)

                # Write executable script
                filenames = [
                    "%s.nii.gz" % f
                    for f in sorted(data_anonymizer.get_identifiers())
                ]
                ph.write_show_niftis_exe(filenames,
                                         dir_output_anonymized_images)

            # Reveal anonymized files
            if 1:
                filenames = data_anonymizer.reveal_anonymized_files(
                    dir_output_anonymized_images)
                filenames = sorted(["%s" % f for f in filenames])
                ph.write_show_niftis_exe(filenames,
                                         dir_output_anonymized_images)

            # Reveal additional, original files
            # data_anonymizer.reveal_original_files(dir_output)

            # relative_directory = re.sub(
            #     utils.get_directory_blinded_analysis(case_id, "anonymized"),
            #     ".",
            #     dir_output_anonymized_images)
            # paths_to_filenames = [os.path.join(
            #     relative_directory, f) for f in filenames]

        # ---------------------Analyse Image Similarities---------------------
        if flag_analyse_image_similarities or \
                flag_analyse_slice_residual_similarities or \
                flag_analyse_stacks:
            if flag_remove_failed_cases_for_analysis:
                if case_id in RECON_FAILED_CASE_IDS:
                    continue
            if cases[case_id]['postrep'] == flag_postop or flag_postop == 2:
                cases_similarities.append(case_id)
                cases_stacks.append(
                    utils.get_segmented_image_filenames(
                        case_id,
                        # subfolder=utils.SEGMENTATION_INIT,
                        subfolder=utils.SEGMENTATION_SELECTED,
                    ))

        dir_output_analysis = os.path.join(
            # "/Users/mebner/UCL/UCL/Publications",
            "/home/mebner/Dropbox/UCL/Publications",
            "2018_MICCAI/brain_reconstruction_paper")

    if flag_analyse_image_similarities:
        dir_inputs = []
        filename = "image_similarities_postop%d.txt" % flag_postop
        for case_id in cases_similarities:
            dir_inputs.append(
                utils.get_directory_case_recon_similarities(case_id))
        cmd_args = []
        cmd_args.append("--dir-inputs %s" % " ".join(dir_inputs))
        cmd_args.append("--dir-output %s" % dir_output_analysis)
        cmd_args.append("--filename %s" % filename)

        exe = re.sub("pyc", "py",
                     os.path.abspath(src.analyse_image_similarities.__file__))
        cmd_args.insert(0, exe)

        cmd = "python %s" % " ".join(cmd_args)
        ph.execute_command(cmd)

    if flag_analyse_slice_residual_similarities:
        dir_inputs = []
        filename = "slice_residuals_postop%d.txt" % flag_postop
        for case_id in cases_similarities:
            dir_inputs.append(
                utils.get_directory_case_slice_residual_similarities(case_id))
        cmd_args = []
        cmd_args.append("--dir-inputs %s" % " ".join(dir_inputs))
        cmd_args.append("--subfolder %s" % " ".join(SEG_MODES))
        cmd_args.append("--dir-output %s" % dir_output_analysis)
        cmd_args.append("--filename %s" % filename)

        exe = re.sub(
            "pyc", "py",
            os.path.abspath(src.analyse_slice_residual_similarities.__file__))
        cmd_args.insert(0, exe)

        cmd = "python %s" % " ".join(cmd_args)
        # print len(cases_similarities)
        # print cases_similarities
        ph.execute_command(cmd)

    if flag_analyse_stacks:
        cases_stacks_N = [len(s) for s in cases_stacks]
        ph.print_subtitle("%d cases -- Number of stacks" % len(cases_stacks))
        ph.print_info("min: %g" % np.min(cases_stacks_N))
        ph.print_info("mean: %g" % np.mean(cases_stacks_N))
        ph.print_info("median: %g" % np.median(cases_stacks_N))
        ph.print_info("max: %g" % np.max(cases_stacks_N))

    elapsed_time = ph.stop_timing(time_start)
    ph.print_title("Summary")
    print("Computational Time for Pipeline: %s" % (elapsed_time))

    return 0
Пример #20
0
def main():

    time_start = ph.start_timing()

    # Set print options for numpy
    np.set_printoptions(precision=3)

    input_parser = InputArgparser(
        description="Volumetric MRI reconstruction framework to reconstruct "
        "an isotropic, high-resolution 3D volume from multiple stacks of 2D "
        "slices with motion correction. The resolution of the computed "
        "Super-Resolution Reconstruction (SRR) is given by the in-plane "
        "spacing of the selected target stack. A region of interest can be "
        "specified by providing a mask for the selected target stack. Only "
        "this region will then be reconstructed by the SRR algorithm which "
        "can substantially reduce the computational time.",
    )
    input_parser.add_filenames(required=True)
    input_parser.add_filenames_masks()
    input_parser.add_output(required=True)
    input_parser.add_suffix_mask(default="_mask")
    input_parser.add_target_stack(default=None)
    input_parser.add_search_angle(default=45)
    input_parser.add_multiresolution(default=0)
    input_parser.add_shrink_factors(default=[3, 2, 1])
    input_parser.add_smoothing_sigmas(default=[1.5, 1, 0])
    input_parser.add_sigma(default=1)
    input_parser.add_reconstruction_type(default="TK1L2")
    input_parser.add_iterations(default=15)
    input_parser.add_alpha(default=0.015)
    input_parser.add_alpha_first(default=0.2)
    input_parser.add_iter_max(default=10)
    input_parser.add_iter_max_first(default=5)
    input_parser.add_dilation_radius(default=3)
    input_parser.add_extra_frame_target(default=10)
    input_parser.add_bias_field_correction(default=0)
    input_parser.add_intensity_correction(default=1)
    input_parser.add_isotropic_resolution(default=1)
    input_parser.add_log_config(default=1)
    input_parser.add_subfolder_motion_correction()
    input_parser.add_write_motion_correction(default=1)
    input_parser.add_verbose(default=0)
    input_parser.add_two_step_cycles(default=3)
    input_parser.add_use_masks_srr(default=0)
    input_parser.add_boundary_stacks(default=[10, 10, 0])
    input_parser.add_metric(default="Correlation")
    input_parser.add_metric_radius(default=10)
    input_parser.add_reference()
    input_parser.add_reference_mask()
    input_parser.add_outlier_rejection(default=1)
    input_parser.add_threshold_first(default=0.5)
    input_parser.add_threshold(default=0.8)
    input_parser.add_interleave(default=3)
    input_parser.add_slice_thicknesses(default=None)
    input_parser.add_viewer(default="itksnap")
    input_parser.add_v2v_method(default="RegAladin")
    input_parser.add_argument(
        "--v2v-robust", "-v2v-robust",
        action='store_true',
        help="If given, a more robust volume-to-volume registration step is "
        "performed, i.e. four rigid registrations are performed using four "
        "rigid transform initializations based on "
        "principal component alignment of associated masks."
    )
    input_parser.add_argument(
        "--s2v-hierarchical", "-s2v-hierarchical",
        action='store_true',
        help="If given, a hierarchical approach for the first slice-to-volume "
        "registration cycle is used, i.e. sub-packages defined by the "
        "specified interleave (--interleave) are registered until each "
        "slice is registered independently."
    )
    input_parser.add_argument(
        "--sda", "-sda",
        action='store_true',
        help="If given, the volumetric reconstructions are performed using "
        "Scattered Data Approximation (Vercauteren et al., 2006). "
        "'alpha' is considered the final 'sigma' for the "
        "iterative adjustment. "
        "Recommended value is, e.g., --alpha 0.8"
    )
    input_parser.add_option(
        option_string="--transforms-history",
        type=int,
        help="Write entire history of applied slice motion correction "
        "transformations to motion correction output directory",
        default=0,
    )

    args = input_parser.parse_args()
    input_parser.print_arguments(args)

    rejection_measure = "NCC"
    threshold_v2v = -2  # 0.3
    debug = False

    if args.v2v_method not in V2V_METHOD_OPTIONS:
        raise ValueError("v2v-method must be in {%s}" % (
            ", ".join(V2V_METHOD_OPTIONS)))

    if np.alltrue([not args.output.endswith(t) for t in ALLOWED_EXTENSIONS]):
        raise ValueError(
            "output filename invalid; allowed extensions are: %s" %
            ", ".join(ALLOWED_EXTENSIONS))

    if args.alpha_first < args.alpha and not args.sda:
        raise ValueError("It must hold alpha-first >= alpha")

    if args.threshold_first > args.threshold:
        raise ValueError("It must hold threshold-first <= threshold")

    dir_output = os.path.dirname(args.output)
    ph.create_directory(dir_output)

    if args.log_config:
        input_parser.log_config(os.path.abspath(__file__))

    # --------------------------------Read Data--------------------------------
    ph.print_title("Read Data")
    data_reader = dr.MultipleImagesReader(
        file_paths=args.filenames,
        file_paths_masks=args.filenames_masks,
        suffix_mask=args.suffix_mask,
        stacks_slice_thicknesses=args.slice_thicknesses,
    )

    if len(args.boundary_stacks) is not 3:
        raise IOError(
            "Provide exactly three values for '--boundary-stacks' to define "
            "cropping in i-, j-, and k-dimension of the input stacks")

    data_reader.read_data()
    stacks = data_reader.get_data()
    ph.print_info("%d input stacks read for further processing" % len(stacks))

    if all(s.is_unity_mask() is True for s in stacks):
        ph.print_warning("No mask is provided! "
                         "Generated reconstruction space may be very big!")
        ph.print_warning("Consider using a mask to speed up computations")

        # args.extra_frame_target = 0
        # ph.wrint_warning("Overwritten: extra-frame-target set to 0")

    # Specify target stack for intensity correction and reconstruction space
    if args.target_stack is None:
        target_stack_index = 0
    else:
        try:
            target_stack_index = args.filenames.index(args.target_stack)
        except ValueError as e:
            raise ValueError(
                "--target-stack must correspond to an image as provided by "
                "--filenames")

    # ---------------------------Data Preprocessing---------------------------
    ph.print_title("Data Preprocessing")

    segmentation_propagator = segprop.SegmentationPropagation(
        # registration_method=regflirt.FLIRT(use_verbose=args.verbose),
        # registration_method=niftyreg.RegAladin(use_verbose=False),
        dilation_radius=args.dilation_radius,
        dilation_kernel="Ball",
    )

    data_preprocessing = dp.DataPreprocessing(
        stacks=stacks,
        segmentation_propagator=segmentation_propagator,
        use_cropping_to_mask=True,
        use_N4BiasFieldCorrector=args.bias_field_correction,
        target_stack_index=target_stack_index,
        boundary_i=args.boundary_stacks[0],
        boundary_j=args.boundary_stacks[1],
        boundary_k=args.boundary_stacks[2],
        unit="mm",
    )
    data_preprocessing.run()
    time_data_preprocessing = data_preprocessing.get_computational_time()

    # Get preprocessed stacks
    stacks = data_preprocessing.get_preprocessed_stacks()

    # Define reference/target stack for registration and reconstruction
    if args.reference is not None:
        reference = st.Stack.from_filename(
            file_path=args.reference,
            file_path_mask=args.reference_mask,
            extract_slices=False)

    else:
        reference = st.Stack.from_stack(stacks[target_stack_index])

    # ------------------------Volume-to-Volume Registration--------------------
    if len(stacks) > 1:

        if args.v2v_method == "FLIRT":
            # Define search angle ranges for FLIRT in all three dimensions
            search_angles = ["-searchr%s -%d %d" %
                             (x, args.search_angle, args.search_angle)
                             for x in ["x", "y", "z"]]
            options = (" ").join(search_angles)
            # options += " -noresample"

            vol_registration = regflirt.FLIRT(
                registration_type="Rigid",
                use_fixed_mask=True,
                use_moving_mask=True,
                options=options,
                use_verbose=False,
            )
        else:
            vol_registration = niftyreg.RegAladin(
                registration_type="Rigid",
                use_fixed_mask=True,
                use_moving_mask=True,
                # options="-ln 2 -voff",
                use_verbose=False,
            )
        v2vreg = pipeline.VolumeToVolumeRegistration(
            stacks=stacks,
            reference=reference,
            registration_method=vol_registration,
            verbose=debug,
            robust=args.v2v_robust,
        )
        v2vreg.run()
        stacks = v2vreg.get_stacks()
        time_registration = v2vreg.get_computational_time()

    else:
        time_registration = ph.get_zero_time()

    # ---------------------------Intensity Correction--------------------------
    if args.intensity_correction:
        ph.print_title("Intensity Correction")
        intensity_corrector = ic.IntensityCorrection()
        intensity_corrector.use_individual_slice_correction(False)
        intensity_corrector.use_reference_mask(True)
        intensity_corrector.use_stack_mask(True)
        intensity_corrector.use_verbose(False)

        for i, stack in enumerate(stacks):
            if i == target_stack_index:
                ph.print_info("Stack %d (%s): Reference image. Skipped." % (
                    i + 1, stack.get_filename()))
                continue
            else:
                ph.print_info("Stack %d (%s): Intensity Correction ... " % (
                    i + 1, stack.get_filename()), newline=False)
            intensity_corrector.set_stack(stack)
            intensity_corrector.set_reference(
                stacks[target_stack_index].get_resampled_stack(
                    resampling_grid=stack.sitk,
                    interpolator="NearestNeighbor",
                ))
            intensity_corrector.run_linear_intensity_correction()
            stacks[i] = intensity_corrector.get_intensity_corrected_stack()
            print("done (c1 = %g) " %
                  intensity_corrector.get_intensity_correction_coefficients())

    # ---------------------------Create first volume---------------------------
    time_tmp = ph.start_timing()

    # Isotropic resampling to define HR target space
    ph.print_title("Reconstruction Space Generation")
    HR_volume = reference.get_isotropically_resampled_stack(
        resolution=args.isotropic_resolution)
    ph.print_info(
        "Isotropic reconstruction space with %g mm resolution is created" %
        HR_volume.sitk.GetSpacing()[0])

    if args.reference is None:
        # Create joint image mask in target space
        joint_image_mask_builder = imb.JointImageMaskBuilder(
            stacks=stacks,
            target=HR_volume,
            dilation_radius=1,
        )
        joint_image_mask_builder.run()
        HR_volume = joint_image_mask_builder.get_stack()
        ph.print_info(
            "Isotropic reconstruction space is centered around "
            "joint stack masks. ")

        # Crop to space defined by mask (plus extra margin)
        HR_volume = HR_volume.get_cropped_stack_based_on_mask(
            boundary_i=args.extra_frame_target,
            boundary_j=args.extra_frame_target,
            boundary_k=args.extra_frame_target,
            unit="mm",
        )

        # Create first volume
        # If outlier rejection is activated, eliminate obvious outliers early
        # from stack and re-run SDA to get initial volume without them
        ph.print_title("First Estimate of HR Volume")
        if args.outlier_rejection and threshold_v2v > -1:
            ph.print_subtitle("SDA Approximation")
            SDA = sda.ScatteredDataApproximation(
                stacks, HR_volume, sigma=args.sigma)
            SDA.run()
            HR_volume = SDA.get_reconstruction()

            # Identify and reject outliers
            ph.print_subtitle("Eliminate slice outliers (%s < %g)" % (
                rejection_measure, threshold_v2v))
            outlier_rejector = outre.OutlierRejector(
                stacks=stacks,
                reference=HR_volume,
                threshold=threshold_v2v,
                measure=rejection_measure,
                verbose=True,
            )
            outlier_rejector.run()
            stacks = outlier_rejector.get_stacks()

        ph.print_subtitle("SDA Approximation Image")
        SDA = sda.ScatteredDataApproximation(
            stacks, HR_volume, sigma=args.sigma)
        SDA.run()
        HR_volume = SDA.get_reconstruction()

        ph.print_subtitle("SDA Approximation Image Mask")
        SDA = sda.ScatteredDataApproximation(
            stacks, HR_volume, sigma=args.sigma, sda_mask=True)
        SDA.run()
        # HR volume contains updated mask based on SDA
        HR_volume = SDA.get_reconstruction()

        HR_volume.set_filename(SDA.get_setting_specific_filename())

    time_reconstruction = ph.stop_timing(time_tmp)

    if args.verbose:
        tmp = list(stacks)
        tmp.insert(0, HR_volume)
        sitkh.show_stacks(tmp, segmentation=HR_volume, viewer=args.viewer)

    # -----------Two-step Slice-to-Volume Registration-Reconstruction----------
    if args.two_step_cycles > 0:

        # Slice-to-volume registration set-up
        if args.metric == "ANTSNeighborhoodCorrelation":
            metric_params = {"radius": args.metric_radius}
        else:
            metric_params = None
        registration = regsitk.SimpleItkRegistration(
            moving=HR_volume,
            use_fixed_mask=True,
            use_moving_mask=True,
            interpolator="Linear",
            metric=args.metric,
            metric_params=metric_params,
            use_multiresolution_framework=args.multiresolution,
            shrink_factors=args.shrink_factors,
            smoothing_sigmas=args.smoothing_sigmas,
            initializer_type="SelfGEOMETRY",
            optimizer="ConjugateGradientLineSearch",
            optimizer_params={
                "learningRate": 1,
                "numberOfIterations": 100,
                "lineSearchUpperLimit": 2,
            },
            scales_estimator="Jacobian",
            use_verbose=debug,
        )

        # Volumetric reconstruction set-up
        if args.sda:
            recon_method = sda.ScatteredDataApproximation(
                stacks,
                HR_volume,
                sigma=args.sigma,
                use_masks=args.use_masks_srr,
            )
            alpha_range = [args.sigma, args.alpha]
        else:
            recon_method = tk.TikhonovSolver(
                stacks=stacks,
                reconstruction=HR_volume,
                reg_type="TK1",
                minimizer="lsmr",
                alpha=args.alpha_first,
                iter_max=np.min([args.iter_max_first, args.iter_max]),
                verbose=True,
                use_masks=args.use_masks_srr,
            )
            alpha_range = [args.alpha_first, args.alpha]

        # Define the regularization parameters for the individual
        # reconstruction steps in the two-step cycles
        alphas = np.linspace(
            alpha_range[0], alpha_range[1], args.two_step_cycles)

        # Define outlier rejection threshold after each S2V-reg step
        thresholds = np.linspace(
            args.threshold_first, args.threshold, args.two_step_cycles)

        two_step_s2v_reg_recon = \
            pipeline.TwoStepSliceToVolumeRegistrationReconstruction(
                stacks=stacks,
                reference=HR_volume,
                registration_method=registration,
                reconstruction_method=recon_method,
                cycles=args.two_step_cycles,
                alphas=alphas[0:args.two_step_cycles - 1],
                outlier_rejection=args.outlier_rejection,
                threshold_measure=rejection_measure,
                thresholds=thresholds,
                interleave=args.interleave,
                viewer=args.viewer,
                verbose=args.verbose,
                use_hierarchical_registration=args.s2v_hierarchical,
            )
        two_step_s2v_reg_recon.run()
        HR_volume_iterations = \
            two_step_s2v_reg_recon.get_iterative_reconstructions()
        time_registration += \
            two_step_s2v_reg_recon.get_computational_time_registration()
        time_reconstruction += \
            two_step_s2v_reg_recon.get_computational_time_reconstruction()
        stacks = two_step_s2v_reg_recon.get_stacks()

    # no two-step s2v-registration/reconstruction iterations
    else:
        HR_volume_iterations = []

    # Write motion-correction results
    ph.print_title("Write Motion Correction Results")
    if args.write_motion_correction:
        dir_output_mc = os.path.join(
            dir_output, args.subfolder_motion_correction)
        ph.clear_directory(dir_output_mc)

        for stack in stacks:
            stack.write(
                dir_output_mc,
                write_stack=False,
                write_mask=False,
                write_slices=False,
                write_transforms=True,
                write_transforms_history=args.transforms_history,
            )

        if args.outlier_rejection:
            deleted_slices_dic = {}
            for i, stack in enumerate(stacks):
                deleted_slices = stack.get_deleted_slice_numbers()
                deleted_slices_dic[stack.get_filename()] = deleted_slices

            # check whether any stack was removed entirely
            stacks0 = data_preprocessing.get_preprocessed_stacks()
            if len(stacks) != len(stacks0):
                stacks_remain = [s.get_filename() for s in stacks]
                for stack in stacks0:
                    if stack.get_filename() in stacks_remain:
                        continue

                    # add info that all slices of this stack were rejected
                    deleted_slices = [
                        slice.get_slice_number()
                        for slice in stack.get_slices()
                    ]
                    deleted_slices_dic[stack.get_filename()] = deleted_slices
                    ph.print_info(
                        "All slices of stack '%s' were rejected entirely. "
                        "Information added." % stack.get_filename())

            ph.write_dictionary_to_json(
                deleted_slices_dic,
                os.path.join(
                    dir_output,
                    args.subfolder_motion_correction,
                    "rejected_slices.json"
                )
            )

    # ---------------------Final Volumetric Reconstruction---------------------
    ph.print_title("Final Volumetric Reconstruction")
    if args.sda:
        recon_method = sda.ScatteredDataApproximation(
            stacks,
            HR_volume,
            sigma=args.alpha,
            use_masks=args.use_masks_srr,
        )
    else:
        if args.reconstruction_type in ["TVL2", "HuberL2"]:
            recon_method = pd.PrimalDualSolver(
                stacks=stacks,
                reconstruction=HR_volume,
                reg_type="TV" if args.reconstruction_type == "TVL2" else "huber",
                iterations=args.iterations,
                use_masks=args.use_masks_srr,
            )
        else:
            recon_method = tk.TikhonovSolver(
                stacks=stacks,
                reconstruction=HR_volume,
                reg_type="TK1" if args.reconstruction_type == "TK1L2" else "TK0",
                use_masks=args.use_masks_srr,
            )
        recon_method.set_alpha(args.alpha)
        recon_method.set_iter_max(args.iter_max)
        recon_method.set_verbose(True)
    recon_method.run()
    time_reconstruction += recon_method.get_computational_time()
    HR_volume_final = recon_method.get_reconstruction()

    ph.print_subtitle("Final SDA Approximation Image Mask")
    SDA = sda.ScatteredDataApproximation(
        stacks, HR_volume_final, sigma=args.sigma, sda_mask=True)
    SDA.run()
    # HR volume contains updated mask based on SDA
    HR_volume_final = SDA.get_reconstruction()
    time_reconstruction += SDA.get_computational_time()

    elapsed_time_total = ph.stop_timing(time_start)

    # Write SRR result
    filename = recon_method.get_setting_specific_filename()
    HR_volume_final.set_filename(filename)
    dw.DataWriter.write_image(
        HR_volume_final.sitk,
        args.output,
        description=filename)
    dw.DataWriter.write_mask(
        HR_volume_final.sitk_mask,
        ph.append_to_filename(args.output, "_mask"),
        description=SDA.get_setting_specific_filename())

    HR_volume_iterations.insert(0, HR_volume_final)
    for stack in stacks:
        HR_volume_iterations.append(stack)

    if args.verbose:
        sitkh.show_stacks(
            HR_volume_iterations,
            segmentation=HR_volume_final,
            viewer=args.viewer,
        )

    # Summary
    ph.print_title("Summary")
    exe_file_info = os.path.basename(os.path.abspath(__file__)).split(".")[0]
    print("%s | Computational Time for Data Preprocessing: %s" %
          (exe_file_info, time_data_preprocessing))
    print("%s | Computational Time for Registrations: %s" %
          (exe_file_info, time_registration))
    print("%s | Computational Time for Reconstructions: %s" %
          (exe_file_info, time_reconstruction))
    print("%s | Computational Time for Entire Reconstruction Pipeline: %s" %
          (exe_file_info, elapsed_time_total))

    ph.print_line_separator()

    return 0