Пример #1
0
    def write_similarities(self, directory):

        # Store information in array
        similarities_nda = np.zeros((len(self._stacks), len(self._measures)))
        filenames = []
        for i_stack, stack in enumerate(self._stacks):
            similarities_nda[i_stack, :] = np.array(
                [self._similarities[m][i_stack] for m in self._measures])
            filenames.append(stack.get_filename())

        # Build header of files
        header = "# Ref: %s, Ref-Mask: %d, %s \n" % (
            self._reference.get_filename(),
            self._use_reference_mask,
            ph.get_time_stamp(),
        )
        header += "# %s\n" % ("\t").join(self._measures)

        # Get filename paths
        path_to_file_filenames, path_to_file_similarities = self._get_filename_paths(
            directory)

        # Write similarities
        ph.write_to_file(path_to_file_similarities, header)
        ph.write_array_to_file(
            path_to_file_similarities, similarities_nda, verbose=self._verbose)

        # Write stack filenames
        text = header
        text += "%s\n" % "\n".join(filenames)
        ph.write_to_file(path_to_file_filenames, text, verbose=self._verbose)
    def write_slice_similarities(self, directory):
        for i_stack, stack in enumerate(self._stacks):
            stack_name = stack.get_filename()
            path_to_file = os.path.join(directory, "%s.txt" % stack_name)

            # Write header info
            header = "# %s, %s\n" % (stack.get_filename(), ph.get_time_stamp())
            header += "# %s\n" % ("\t").join(self._measures)
            ph.write_to_file(path_to_file, header, verbose=self._verbose)

            # Write array information
            N_slices = self._get_original_number_of_slices(stack)
            array = np.ones((N_slices, len(self._measures))) * self._init_value
            for i_m, m in enumerate(self._measures):
                array[:, i_m] = self._slice_similarities[stack_name][m]
            ph.write_array_to_file(path_to_file, array, verbose=self._verbose)
Пример #3
0
 def write_function_call_to_file(function_call, path_to_file):
     text = "#!/bin/zsh\n\n%s" % function_call
     ph.write_to_file(path_to_file, text, verbose=False)
     ph.execute_command("chmod +x %s" % path_to_file, verbose=False)
Пример #4
0
def write_array_to_latex(
        path_to_file,
        nda,
        nda_std=None,
        nda_sig=None,
        rows=None,
        cols=None,
        row_title=None,
        decimal_places=2,
        compact=False,
        nda_bold=None,
        mark_best=None,
        verbose=True,
):

    lines = []
    sep = " & "
    newline = " \\\\\n"
    nan_symbol = "---"

    if mark_best is not None and nda_bold is not None:
        raise ValueError("Either 'mark_best' or 'nda_bold' but not both.")

    # Mark best row values bold (ties are allowed)
    if mark_best is not None:
        e = "'mark_best' must be either a list with " \
            "'min' or 'max' as elements with len(mark_best) == len(cols), " \
            "or a single string being either 'min' or 'max'"
        if type(mark_best) is not list:
            if mark_best != "min" and mark_best != "max":
                raise ValueError(e)
            mark_best = [mark_best] * len(cols)
        else:
            if len(mark_best) != len(cols):
                raise ValueError(e)
            if not all(m in ["min", "max"] for m in mark_best):
                raise ValueError(e)

        # round to output decimal places
        nda_ = nda.round(decimal_places)

        # find best values along rows per column
        i_best = []
        for j in range(nda.shape[1]):
            topper = getattr(np, "arg%s" % mark_best[j])(nda_[:, j])
            i_best.append(topper)

        # find 'ties' that have the same value within each column
        j_i_best = {
            j: np.where(nda_[:, j] == nda_[i_best[j], j])[0]
            for j in range(nda.shape[1])
        }

        # reverse mapping for easier access later
        i_j_best = {i: [] for i in range(nda.shape[0])}
        for j, i_list in six.iteritems(j_i_best):
            for i in i_list:
                i_j_best[i].append(j)

    # Statistical significance (only printed if nda_sig given)
    sym = "$^*$"
    nda_sym = np.chararray(nda.shape, itemsize=8)
    if nda_sig is not None:
        for index, val in np.ndenumerate(nda_sig):
            if val:
                nda_sym[index] = sym
            else:
                nda_sym[index] = ""
    else:
        nda_sym[:] = ""

    # \begin{tabular}{tabular_options}
    tabular_options = 'c' * nda.shape[1]
    if rows is not None:
        tabular_options = 'l' + tabular_options
    lines.append("\\begin{tabular}{%s}\n" % tabular_options)

    # Header: column titles of table
    line_args = ["\\bf %s" % c for c in cols]
    if rows is not None:
        if row_title is None:
            line_args.insert(0, "")
        else:
            line_args.insert(0, "\\bf %s" % row_title)
    lines.append("%s%s" % (sep.join(line_args), newline))
    lines.append("\\hline\n")

    # Entries of the table
    for i_row in range(nda.shape[0]):
        if nda_std is None:
            line_args = [
                # "\\num{%s}" % (
                "%s%s" % (
                    '{:.{prec}f}'.format(f, prec=decimal_places),
                    p,
                ) if not np.isnan(f)
                else nan_symbol
                for (f, p) in zip(nda[i_row, :], nda_sym[i_row, :])
            ]
        else:
            line_args = [
                # "\\num{%s \\pm %s}" % (
                "%s $\\pm$ %s%s" % (
                    '{:.{prec}f}'.format(m, prec=decimal_places),
                    '{:.{prec}f}'.format(s, prec=decimal_places),
                    p,
                ) if not np.isnan(m)
                else nan_symbol
                for (m, s, p) in zip(nda[i_row, :], nda_std[i_row, :], nda_sym[i_row, :])
            ]
            if compact:
                # remove white spaces
                line_args = [re.sub(" ", "", l) for l in line_args]

        if mark_best:
            for j in i_j_best[i_row]:
                line_args[j] = "\\bf %s" % line_args[j]

        if nda_bold is not None:
            for j in range(nda_bold.shape[1]):
                if nda_bold[i_row, j]:
                    line_args[j] = "\\bf %s" % line_args[j]

        if rows is not None:
            line_args.insert(0, "\\bf %s" % rows[i_row])
        lines.append("%s%s" % (sep.join(line_args), newline))

    # \end{tabular}
    lines.append("\\end{tabular}")

    text = "".join(lines)
    if verbose:
        print(text)
    ph.write_to_file(path_to_file, text, access_mode="w", verbose=True)
Пример #5
0
def main():

    # Set print options
    np.set_printoptions(precision=3)
    pd.set_option('display.width', 1000)

    input_parser = InputArgparser(description=".", )
    input_parser.add_filenames(required=True)
    input_parser.add_reference(required=True)
    input_parser.add_reference_mask()
    input_parser.add_dir_output(required=False)
    input_parser.add_measures(
        default=["PSNR", "RMSE", "MAE", "SSIM", "NCC", "NMI"])
    input_parser.add_verbose(default=0)
    args = input_parser.parse_args()
    input_parser.print_arguments(args)

    ph.print_title("Image similarity")
    data_reader = dr.MultipleImagesReader(args.filenames)
    data_reader.read_data()
    stacks = data_reader.get_data()

    reference = st.Stack.from_filename(args.reference, args.reference_mask)

    for stack in stacks:
        try:
            stack.sitk - reference.sitk
        except RuntimeError as e:
            raise IOError(
                "All provided images must be at the same image space")

    x_ref = sitk.GetArrayFromImage(reference.sitk)

    if args.reference_mask is None:
        indices = np.where(x_ref != np.inf)
    else:
        x_ref_mask = sitk.GetArrayFromImage(reference.sitk_mask)
        indices = np.where(x_ref_mask > 0)

    measures_dic = {
        m: lambda x, m=m: SimilarityMeasures.similarity_measures[m]
        (x[indices], x_ref[indices])
        # SimilarityMeasures.similarity_measures[m](x, x_ref)
        for m in args.measures
    }

    observer = obs.Observer()
    observer.set_measures(measures_dic)
    for stack in stacks:
        nda = sitk.GetArrayFromImage(stack.sitk)
        observer.add_x(nda)

    if args.verbose:
        stacks_comparison = [s for s in stacks]
        stacks_comparison.insert(0, reference)
        sitkh.show_stacks(
            stacks_comparison,
            segmentation=reference,
        )

    observer.compute_measures()
    measures = observer.get_measures()

    # Store information in array
    error = np.zeros((len(stacks), len(measures)))
    cols = measures
    rows = []
    for i_stack, stack in enumerate(stacks):
        error[i_stack, :] = np.array([measures[m][i_stack] for m in measures])
        rows.append(stack.get_filename())

    header = "# Ref: %s, Ref-Mask: %d, %s \n" % (
        reference.get_filename(),
        args.reference_mask is None,
        ph.get_time_stamp(),
    )
    header += "# %s\n" % ("\t").join(measures)

    path_to_file_filenames = os.path.join(args.dir_output, "filenames.txt")
    path_to_file_similarities = os.path.join(args.dir_output,
                                             "similarities.txt")

    # Write to files
    ph.write_to_file(path_to_file_similarities, header)
    ph.write_array_to_file(path_to_file_similarities, error, verbose=False)
    text = header
    text += "%s\n" % "\n".join(rows)
    ph.write_to_file(path_to_file_filenames, text)

    # Print to screen
    ph.print_subtitle("Computed Similarities")
    df = pd.DataFrame(error, rows, cols)
    print(df)

    return 0
def main():

    # Read input
    input_parser = InputArgparser(
        description="Script to evaluate the similarity of simulated stack "
        "from obtained reconstruction against the original stack. "
        "This function takes the result of "
        "simulate_stacks_from_reconstruction.py as input.", )
    input_parser.add_filenames(required=True)
    input_parser.add_filenames_masks()
    input_parser.add_dir_output(required=True)
    input_parser.add_suffix_mask(default="_mask")
    input_parser.add_measures(default=["NCC", "SSIM"])
    input_parser.add_option(
        option_string="--prefix-simulated",
        type=str,
        help="Specify the prefix of the simulated stacks to distinguish them "
        "from the original data.",
        default="Simulated_",
    )
    input_parser.add_option(
        option_string="--dir-input-simulated",
        type=str,
        help="Specify the directory where the simulated stacks are. "
        "If not given, it is assumed that they are in the same directory "
        "as the original ones.",
        default=None)
    input_parser.add_slice_thicknesses(default=None)

    args = input_parser.parse_args()
    input_parser.print_arguments(args)

    # --------------------------------Read Data--------------------------------
    ph.print_title("Read Data")

    # Read original data
    filenames_original = args.filenames
    data_reader = dr.MultipleImagesReader(
        file_paths=filenames_original,
        file_paths_masks=args.filenames_masks,
        suffix_mask=args.suffix_mask,
        stacks_slice_thicknesses=args.slice_thicknesses,
    )
    data_reader.read_data()
    stacks_original = data_reader.get_data()

    # Read data simulated from obtained reconstruction
    if args.dir_input_simulated is None:
        dir_input_simulated = os.path.dirname(filenames_original[0])
    else:
        dir_input_simulated = args.dir_input_simulated
    filenames_simulated = [
        os.path.join("%s", "%s%s") %
        (dir_input_simulated, args.prefix_simulated, os.path.basename(f))
        for f in filenames_original
    ]
    data_reader = dr.MultipleImagesReader(filenames_simulated,
                                          suffix_mask=args.suffix_mask)
    data_reader.read_data()
    stacks_simulated = data_reader.get_data()

    for i in range(len(stacks_original)):
        try:
            stacks_original[i].sitk - stacks_simulated[i].sitk
        except:
            raise IOError(
                "Images '%s' and '%s' do not occupy the same space!" %
                (filenames_original[i], filenames_simulated[i]))

    similarity_measures = {
        m: SimilarityMeasures.similarity_measures[m]
        for m in args.measures
    }
    similarities = np.zeros(len(args.measures))

    for i in range(len(stacks_original)):
        nda_3D_original = sitk.GetArrayFromImage(stacks_original[i].sitk)
        nda_3D_simulated = sitk.GetArrayFromImage(stacks_simulated[i].sitk)
        nda_3D_mask = sitk.GetArrayFromImage(stacks_original[i].sitk_mask)

        path_to_file = os.path.join(
            args.dir_output,
            "Similarity_%s.txt" % stacks_original[i].get_filename())
        text = "# Similarity: %s vs %s (%s)." % (os.path.basename(
            filenames_original[i]), os.path.basename(
                filenames_simulated[i]), ph.get_time_stamp())
        text += "\n#\t" + ("\t").join(args.measures)
        text += "\n"
        ph.write_to_file(path_to_file, text, "w")
        for k in range(nda_3D_original.shape[0]):
            x_2D_original = nda_3D_original[k, :, :]
            x_2D_simulated = nda_3D_simulated[k, :, :]

            # zero slice, i.e. rejected during motion correction
            if np.abs(x_2D_simulated).sum() < 1e-6:
                x_2D_simulated[:] = np.nan
            x_2D_mask = nda_3D_mask[k, :, :]

            indices = np.where(x_2D_mask > 0)

            for m, measure in enumerate(args.measures):
                if len(indices[0]) > 0:
                    similarities[m] = similarity_measures[measure](
                        x_2D_original[indices], x_2D_simulated[indices])
                else:
                    similarities[m] = np.nan
            ph.write_array_to_file(path_to_file, similarities.reshape(1, -1))

    return 0