Exemple #1
0
def generate_neurons_io_handler(input_filename, output_filename,
                                parameters_filename):
    """
        Uses generate_neurons to process a input_filename (HDF5 dataset) and outputs results to an output_filename (HDF5
        dataset). Also,

        Args:
            input_filename          HDF5 filename to read from (should be a path to a h5py.Dataset)
            output_filename         HDF5 filename to write to (should be a path to a h5py.Group)
            parameters_filename     JSON filename with parameters.
    """

    # Extract and validate file extensions.

    # Parse parameter filename and validate that the name is acceptable
    parameters_filename_details = pathHelpers.PathComponents(
        parameters_filename)
    # Clean up the extension so it fits the standard.
    if (parameters_filename_details.extension.lower().lstrip(os.extsep)
            not in ["json"]):
        raise Exception(
            "Parameter file with filename: \"" + parameters_filename + "\"" +
            " provided with an unknown file extension: \"" +
            parameters_filename_details.extension +
            "\". If it is a supported format, please run the given file through nanshe_converter first before proceeding."
        )

    # Parse the parameters from the json file.
    parameters = xjson.read_parameters(parameters_filename)

    if (len(parameters) == 1) and ("generate_neurons_blocks" in parameters):
        generate_neurons_blocks(input_filename, output_filename,
                                **parameters["generate_neurons_blocks"])
    else:
        generate_neurons_a_block(input_filename, output_filename, **parameters)
Exemple #2
0
def generate_neurons_io_handler(input_filename, output_filename, parameters_filename):
    """
        Uses generate_neurons to process a input_filename (HDF5 dataset) and outputs results to an output_filename (HDF5
        dataset). Also,

        Args:
            input_filename          HDF5 filename to read from (should be a path to a h5py.Dataset)
            output_filename         HDF5 filename to write to (should be a path to a h5py.Group)
            parameters_filename     JSON filename with parameters.
    """


    # Extract and validate file extensions.

    # Parse parameter filename and validate that the name is acceptable
    parameters_filename_details = pathHelpers.PathComponents(parameters_filename)
    # Clean up the extension so it fits the standard.
    if ( parameters_filename_details.extension.lower().lstrip(os.extsep) not in ["json"] ):
        raise Exception("Parameter file with filename: \"" + parameters_filename + "\"" + " provided with an unknown file extension: \"" + parameters_filename_details.extension + "\". If it is a supported format, please run the given file through nanshe_converter first before proceeding.")

    # Parse the parameters from the json file.
    parameters = xjson.read_parameters(parameters_filename)

    if (len(parameters) == 1) and ("generate_neurons_blocks" in parameters):
        generate_neurons_blocks(input_filename, output_filename, **parameters["generate_neurons_blocks"])
    else:
        generate_neurons_a_block(input_filename, output_filename, **parameters)
Exemple #3
0
def main(*argv):
    """
        Simple main function (like in C). Takes all arguments (as from sys.argv) and returns an exit status.

        Args:
            argv(list):     arguments (includes command line call).

        Returns:
            int:            exit code (0 if success)
    """

    # Only necessary if running main (normally if calling command line). No point in importing otherwise.
    import argparse

    argv = list(argv)

    # Creates command line parser
    parser = argparse.ArgumentParser(description = "Parses input from the command line for a batch job.")

    parser.add_argument("format",
                        choices = ["tiff"],
                        help = "Format to convert from to HDF5.",
    )

    parser.add_argument("config_filename",
                        metavar = "CONFIG_FILE",
                        type = str,
                        help = "JSON file that provides configuration options for how to import TIFF(s)."
    )
    parser.add_argument("input_files",
                        metavar = "INPUT_FILE",
                        type = str,
                        nargs = "+",
                        help = "TIFF file paths (with optional regex e.g. \"./*.tif\")."
    )

    parser.add_argument("output_file",
                        metavar = "OUTPUT_FILE",
                        type = str,
                        nargs = 1,
                        help = "HDF5 file to export (this should include a path to where the internal dataset should be stored)."
    )

    # Results of parsing arguments (ignore the first one as it is the command line call).
    parsed_args = parser.parse_args(argv[1:])

    # Go ahead and stuff in parameters with the other parsed_args
    parsed_args.parameters = xjson.read_parameters(parsed_args.config_filename)

    if parsed_args.format == "tiff":
        xtiff.convert_tiffs(parsed_args.input_files, parsed_args.output_file[0], **parsed_args.parameters)

    return(0)
Exemple #4
0
def main(*argv):
    """
        Simple main function (like in C). Takes all arguments (as from sys.argv) and returns an exit status.

        Args:
            argv(list):     arguments (includes command line call).

        Returns:
            int:            exit code (0 if success)
    """

    # Only necessary if running main (normally if calling command line). No point in importing otherwise.
    import argparse

    argv = list(argv)

    # Creates command line parser
    parser = argparse.ArgumentParser(description = "Parses input from the command line for a registration job.")

    parser.add_argument("config_filename",
                        metavar = "CONFIG_FILE",
                        type = str,
                        help = "JSON file that provides configuration options for how to import TIFF(s)."
    )
    parser.add_argument("input_filenames",
                        metavar = "INPUT_FILE",
                        type = str,
                        nargs = "+",
                        help = "HDF5 file to import (this should include a path to where the internal dataset should be stored)."
    )

    parser.add_argument("output_filenames",
                        metavar = "OUTPUT_FILE",
                        type = str,
                        nargs = 1,
                        help = "HDF5 file to export (this should include a path to where the internal dataset should be stored)."
    )

    # Results of parsing arguments (ignore the first one as it is the command line call).
    parsed_args = parser.parse_args(argv[1:])

    # Go ahead and stuff in parameters with the other parsed_args
    parsed_args.parameters = xjson.read_parameters(parsed_args.config_filename)

    parsed_args.input_file_components = []
    for each_input_filename in parsed_args.input_filenames:
        parsed_args.input_file_components.append(PathComponents(each_input_filename))

    parsed_args.output_file_components = []
    for each_output_filename in parsed_args.output_filenames:
        parsed_args.output_file_components.append(PathComponents(each_output_filename))


    for each_input_filename_components, each_output_filename_components in itertools.izip(parsed_args.input_file_components, parsed_args.output_file_components):
        with h5py.File(each_input_filename_components.externalPath, "r") as input_file:
            with h5py.File(each_output_filename_components.externalPath, "a") as output_file:
                data = input_file[each_input_filename_components.internalPath]
                result_filename = registration.register_mean_offsets(
                    data, to_truncate=True, **parsed_args.parameters
                )
                with h5py.File(result_filename, "r") as result_file:
                    result_file.copy(
                        "reg_frames",
                        output_file[each_output_filename_components.internalDirectory],
                        name=each_output_filename_components.internalDatasetName
                    )
                os.remove(result_filename)
                os.removedirs(os.path.dirname(result_filename))

    return(0)
Exemple #5
0
def main(*argv):
    """
        Simple main function (like in C). Takes all arguments (as from sys.argv) and returns an exit status.

        Args:
            argv(list):     arguments (includes command line call).

        Returns:
            int:            exit code (0 if success)
    """

    # Only necessary if running main (normally if calling command line). No point in importing otherwise.
    import argparse

    argv = list(argv)

    # Creates command line parser
    parser = argparse.ArgumentParser(
        description="Parses input from the command line for a batch job.")

    parser.add_argument(
        "format",
        choices=["tiff"],
        help="Format to convert from to HDF5.",
    )

    parser.add_argument(
        "config_filename",
        metavar="CONFIG_FILE",
        type=str,
        help=
        "JSON file that provides configuration options for how to import TIFF(s)."
    )
    parser.add_argument(
        "input_files",
        metavar="INPUT_FILE",
        type=str,
        nargs="+",
        help="TIFF file paths (with optional regex e.g. \"./*.tif\").")

    parser.add_argument(
        "output_file",
        metavar="OUTPUT_FILE",
        type=str,
        nargs=1,
        help=
        "HDF5 file to export (this should include a path to where the internal dataset should be stored)."
    )

    # Results of parsing arguments (ignore the first one as it is the command line call).
    parsed_args = parser.parse_args(argv[1:])

    # Go ahead and stuff in parameters with the other parsed_args
    parsed_args.parameters = xjson.read_parameters(parsed_args.config_filename)

    if parsed_args.format == "tiff":
        xtiff.convert_tiffs(parsed_args.input_files,
                            parsed_args.output_file[0],
                            **parsed_args.parameters)

    return (0)
Exemple #6
0
def main(*argv):
    """
        Simple main function (like in C). Takes all arguments (as from
        sys.argv) and returns an exit status.

        Args:
            argv(list):     arguments (includes command line call).

        Returns:
            int:            exit code (0 if success)
    """

    # Only necessary if running main (normally if calling command line). No
    # point in importing otherwise.
    import argparse

    argv = list(argv)

    # Creates command line parser
    parser = argparse.ArgumentParser(
        description="Parses input from the command line " +
                    "for a registration job."
    )

    parser.add_argument("config_filename",
                        metavar="CONFIG_FILE",
                        type=str,
                        help="JSON file that provides configuration options " +
                             "for how to import TIFF(s)."
    )
    parser.add_argument("input_filenames",
                        metavar="INPUT_FILE",
                        type=str,
                        nargs=1,
                        help="HDF5 file to import (this should include a " +
                             "path to where the internal dataset should be " +
                             "stored)."
    )

    parser.add_argument("output_filenames",
                        metavar="OUTPUT_FILE",
                        type=str,
                        nargs=1,
                        help="HDF5 file to export (this should include a " +
                             "path to where the internal dataset should be " +
                             "stored)."
    )

    # Results of parsing arguments
    # (ignore the first one as it is the command line call).
    parsed_args = parser.parse_args(argv[1:])

    # Go ahead and stuff in parameters with the other parsed_args
    parsed_args.parameters = xjson.read_parameters(parsed_args.config_filename)

    parsed_args.input_file_components = []
    for each_input_filename in parsed_args.input_filenames:
        parsed_args.input_file_components.append(
            hdf5.serializers.split_hdf5_path(each_input_filename)
        )

    parsed_args.output_file_components = []
    for each_output_filename in parsed_args.output_filenames:
        parsed_args.output_file_components.append(
            hdf5.serializers.split_hdf5_path(each_output_filename)
        )

    for each_input_filename_components, each_output_filename_components in iters.izip(
            parsed_args.input_file_components, parsed_args.output_file_components):
        with h5py.File(each_input_filename_components[0], "r") as input_file:
            with h5py.File(each_output_filename_components[0], "a") as output_file:
                data = input_file[each_input_filename_components[1]]
                result_filename = registration.register_mean_offsets(
                    data, to_truncate=True, **parsed_args.parameters
                )
                with h5py.File(result_filename, "r") as result_file:
                    result_file.copy(
                        "reg_frames",
                        output_file[os.path.dirname(each_output_filename_components[1])],
                        name=each_output_filename_components[1]
                    )

                    if parsed_args.parameters.get("include_shift", False):
                        result_file.copy(
                            "space_shift",
                            output_file[os.path.dirname(each_output_filename_components[1])],
                            name=each_output_filename_components[1] + "_shift"
                        )

                # Copy all attributes from raw data to the final result.
                output = output_file[
                    each_output_filename_components[1]
                ]
                for each_attr_name in data.attrs:
                    output.attrs[each_attr_name] = data.attrs[each_attr_name]

                # Only remove the directory if our input or output files are
                # not stored there.
                os.remove(result_filename)
                in_out_dirnames = set(
                    os.path.dirname(os.path.abspath(_.filename)) for _ in [
                        input_file, output_file
                    ]
                )
                result_dirname = os.path.dirname(result_filename)
                if result_dirname not in in_out_dirnames:
                    os.rmdir(result_dirname)

    return(0)
Exemple #7
0
def main(*argv):
    """
        Simple main function (like in C). Takes all arguments (as from
        sys.argv) and returns an exit status.

        Args:
            argv(list):     arguments (includes command line call).

        Returns:
            int:            exit code (0 if success)
    """

    # Only necessary if running main (normally if calling command line). No
    # point in importing otherwise.
    import argparse

    argv = list(argv)

    # Creates command line parser
    parser = argparse.ArgumentParser(
        description="Parses input from the command line " +
        "for a registration job.")

    parser.add_argument("config_filename",
                        metavar="CONFIG_FILE",
                        type=str,
                        help="JSON file that provides configuration options " +
                        "for how to import TIFF(s).")
    parser.add_argument("input_filenames",
                        metavar="INPUT_FILE",
                        type=str,
                        nargs=1,
                        help="HDF5 file to import (this should include a " +
                        "path to where the internal dataset should be " +
                        "stored).")

    parser.add_argument("output_filenames",
                        metavar="OUTPUT_FILE",
                        type=str,
                        nargs=1,
                        help="HDF5 file to export (this should include a " +
                        "path to where the internal dataset should be " +
                        "stored).")

    # Results of parsing arguments
    # (ignore the first one as it is the command line call).
    parsed_args = parser.parse_args(argv[1:])

    # Go ahead and stuff in parameters with the other parsed_args
    parsed_args.parameters = xjson.read_parameters(parsed_args.config_filename)

    parsed_args.input_file_components = []
    for each_input_filename in parsed_args.input_filenames:
        parsed_args.input_file_components.append(
            hdf5.serializers.split_hdf5_path(each_input_filename))

    parsed_args.output_file_components = []
    for each_output_filename in parsed_args.output_filenames:
        parsed_args.output_file_components.append(
            hdf5.serializers.split_hdf5_path(each_output_filename))

    for each_input_filename_components, each_output_filename_components in iters.izip(
            parsed_args.input_file_components,
            parsed_args.output_file_components):
        with h5py.File(each_input_filename_components[0], "r") as input_file:
            with h5py.File(each_output_filename_components[0],
                           "a") as output_file:
                data = input_file[each_input_filename_components[1]]
                result_filename = registration.register_mean_offsets(
                    data, to_truncate=True, **parsed_args.parameters)
                with h5py.File(result_filename, "r") as result_file:
                    result_file.copy("reg_frames",
                                     output_file[os.path.dirname(
                                         each_output_filename_components[1])],
                                     name=each_output_filename_components[1])

                    if parsed_args.parameters.get("include_shift", False):
                        result_file.copy(
                            "space_shift",
                            output_file[os.path.dirname(
                                each_output_filename_components[1])],
                            name=each_output_filename_components[1] + "_shift")

                # Copy all attributes from raw data to the final result.
                output = output_file[each_output_filename_components[1]]
                for each_attr_name in data.attrs:
                    output.attrs[each_attr_name] = data.attrs[each_attr_name]

                # Only remove the directory if our input or output files are
                # not stored there.
                os.remove(result_filename)
                in_out_dirnames = set(
                    os.path.dirname(os.path.abspath(_.filename))
                    for _ in [input_file, output_file])
                result_dirname = os.path.dirname(result_filename)
                if result_dirname not in in_out_dirnames:
                    os.rmdir(result_dirname)

    return (0)
Exemple #8
0
def main(*argv):
    """
        Simple main function (like in C). Takes all arguments (as from sys.argv) and returns an exit status.

        Args:
            argv(list):     arguments (includes command line call).

        Returns:
            int:            exit code (0 if success)
    """

    # Only necessary if running main (normally if calling command line). No point in importing otherwise.
    import argparse

    argv = list(argv)

    # Creates command line parser
    parser = argparse.ArgumentParser(
        description="Parses input from the command line for a registration job."
    )

    parser.add_argument(
        "config_filename",
        metavar="CONFIG_FILE",
        type=str,
        help=
        "JSON file that provides configuration options for how to import TIFF(s)."
    )
    parser.add_argument(
        "input_filenames",
        metavar="INPUT_FILE",
        type=str,
        nargs="+",
        help=
        "HDF5 file to import (this should include a path to where the internal dataset should be stored)."
    )

    parser.add_argument(
        "output_filenames",
        metavar="OUTPUT_FILE",
        type=str,
        nargs=1,
        help=
        "HDF5 file to export (this should include a path to where the internal dataset should be stored)."
    )

    # Results of parsing arguments (ignore the first one as it is the command line call).
    parsed_args = parser.parse_args(argv[1:])

    # Go ahead and stuff in parameters with the other parsed_args
    parsed_args.parameters = xjson.read_parameters(parsed_args.config_filename)

    parsed_args.input_file_components = []
    for each_input_filename in parsed_args.input_filenames:
        parsed_args.input_file_components.append(
            PathComponents(each_input_filename))

    parsed_args.output_file_components = []
    for each_output_filename in parsed_args.output_filenames:
        parsed_args.output_file_components.append(
            PathComponents(each_output_filename))

    for each_input_filename_components, each_output_filename_components in itertools.izip(
            parsed_args.input_file_components,
            parsed_args.output_file_components):
        with h5py.File(each_input_filename_components.externalPath,
                       "r") as input_file:
            with h5py.File(each_output_filename_components.externalPath,
                           "a") as output_file:
                data = input_file[each_input_filename_components.internalPath]
                result_filename = registration.register_mean_offsets(
                    data, to_truncate=True, **parsed_args.parameters)
                with h5py.File(result_filename, "r") as result_file:
                    result_file.copy(
                        "reg_frames",
                        output_file[
                            each_output_filename_components.internalDirectory],
                        name=each_output_filename_components.
                        internalDatasetName)
                os.remove(result_filename)
                os.removedirs(os.path.dirname(result_filename))

    return (0)
    def _import_configuration(self):
        from PyQt4.QtGui import QFileDialog

        filename = QFileDialog.getOpenFileName(caption="Import Configuration",
                                               filter="*.json")
        filename = str(filename)

        config_all = read_parameters(filename)

        config = config_all

        if "generate_neurons_blocks" in config:
            config = config["generate_neurons_blocks"]

        config = config["generate_neurons"]

        preprocess_config = config["preprocess_data"]
        dictionary_config = config["generate_dictionary"]["spams.trainDL"]
        postprocess_config = config["postprocess_data"]

        if "remove_zeroed_lines" in preprocess_config:
            local_config = preprocess_config["remove_zeroed_lines"]

            erosion_shape = local_config["erosion_shape"]
            dilation_shape = local_config["dilation_shape"]

            self.nanshePreprocessingApplet.topLevelOperator.ToRemoveZeroedLines.setValue(
                True)
            self.nanshePreprocessingApplet.topLevelOperator.ErosionShape.setValue(
                erosion_shape)
            self.nanshePreprocessingApplet.topLevelOperator.DilationShape.setValue(
                dilation_shape)
        else:
            self.nanshePreprocessingApplet.topLevelOperator.ToRemoveZeroedLines.setValue(
                False)

        if "extract_f0" in preprocess_config:
            local_config = preprocess_config["extract_f0"]

            bias = local_config.get("bias")
            temporal_smoothing_gaussian_filter_stdev = local_config[
                "temporal_smoothing_gaussian_filter_stdev"]
            temporal_smoothing_gaussian_filter_window_size = local_config[
                "temporal_smoothing_gaussian_filter_window_size"]
            half_window_size = local_config["half_window_size"]
            which_quantile = local_config["which_quantile"]
            spatial_smoothing_gaussian_filter_stdev = local_config[
                "spatial_smoothing_gaussian_filter_stdev"]
            spatial_smoothing_gaussian_filter_window_size = local_config[
                "spatial_smoothing_gaussian_filter_window_size"]

            self.nanshePreprocessingApplet.topLevelOperator.ToExtractF0.setValue(
                True)

            if bias is not None:
                self.nanshePreprocessingApplet.topLevelOperator.BiasEnabled.setValue(
                    True)
                self.nanshePreprocessingApplet.topLevelOperator.Bias.setValue(
                    bias)
            else:
                self.nanshePreprocessingApplet.topLevelOperator.BiasEnabled.setValue(
                    False)

            self.nanshePreprocessingApplet.topLevelOperator.TemporalSmoothingGaussianFilterStdev.setValue(
                temporal_smoothing_gaussian_filter_stdev)
            self.nanshePreprocessingApplet.topLevelOperator.TemporalSmoothingGaussianFilterWindowSize.setValue(
                temporal_smoothing_gaussian_filter_window_size)
            self.nanshePreprocessingApplet.topLevelOperator.HalfWindowSize.setValue(
                half_window_size)
            self.nanshePreprocessingApplet.topLevelOperator.WhichQuantile.setValue(
                which_quantile)
            self.nanshePreprocessingApplet.topLevelOperator.SpatialSmoothingGaussianFilterStdev.setValue(
                spatial_smoothing_gaussian_filter_stdev)
            self.nanshePreprocessingApplet.topLevelOperator.SpatialSmoothingGaussianFilterWindowSize.setValue(
                spatial_smoothing_gaussian_filter_window_size)
        else:
            self.nanshePreprocessingApplet.topLevelOperator.ToExtractF0.setValue(
                False)

        if "wavelet_transform" in preprocess_config:
            local_config = preprocess_config["wavelet_transform"]
            scale = local_config["scale"]

            self.nanshePreprocessingApplet.topLevelOperator.ToWaveletTransform.setValue(
                True)
            self.nanshePreprocessingApplet.topLevelOperator.Scale.setValue(
                local_config["scale"])
        else:
            self.nanshePreprocessingApplet.topLevelOperator.ToWaveletTransform.setValue(
                False)

        norm = preprocess_config["normalize_data"]["renormalized_images"].get(
            "ord", 2)
        self.nansheDictionaryLearningApplet.topLevelOperator.Ord.setValue(norm)

        self.nansheDictionaryLearningApplet.topLevelOperator.K.setValue(
            dictionary_config["K"])
        self.nansheDictionaryLearningApplet.topLevelOperator.Gamma1.setValue(
            dictionary_config["gamma1"])
        self.nansheDictionaryLearningApplet.topLevelOperator.Gamma2.setValue(
            dictionary_config["gamma2"])
        self.nansheDictionaryLearningApplet.topLevelOperator.NumThreads.setValue(
            dictionary_config["numThreads"])
        self.nansheDictionaryLearningApplet.topLevelOperator.Batchsize.setValue(
            dictionary_config["batchsize"])
        self.nansheDictionaryLearningApplet.topLevelOperator.NumIter.setValue(
            dictionary_config["iter"])
        self.nansheDictionaryLearningApplet.topLevelOperator.Lambda1.setValue(
            dictionary_config["lambda1"])
        self.nansheDictionaryLearningApplet.topLevelOperator.Lambda2.setValue(
            dictionary_config["lambda2"])
        self.nansheDictionaryLearningApplet.topLevelOperator.PosAlpha.setValue(
            dictionary_config["posAlpha"])
        self.nansheDictionaryLearningApplet.topLevelOperator.PosD.setValue(
            dictionary_config["posD"])
        self.nansheDictionaryLearningApplet.topLevelOperator.Clean.setValue(
            dictionary_config["clean"])
        self.nansheDictionaryLearningApplet.topLevelOperator.Mode.setValue(
            dictionary_config["mode"])
        self.nansheDictionaryLearningApplet.topLevelOperator.ModeD.setValue(
            dictionary_config["modeD"])

        self.nanshePostprocessingApplet.topLevelOperator.SignificanceThreshold.setValue(
            postprocess_config["wavelet_denoising"]["estimate_noise"]
            ["significance_threshold"])
        self.nanshePostprocessingApplet.topLevelOperator.WaveletTransformScale.setValue(
            postprocess_config["wavelet_denoising"]["wavelet.transform"]
            ["scale"])
        self.nanshePostprocessingApplet.topLevelOperator.NoiseThreshold.setValue(
            postprocess_config["wavelet_denoising"]["significant_mask"]
            ["noise_threshold"])

        major_axis_length_config = postprocess_config["wavelet_denoising"][
            "accepted_region_shape_constraints"].get("major_axis_length")
        if major_axis_length_config is not None:
            min = major_axis_length_config.get("min")
            max = major_axis_length_config.get("max")

            if min is not None:
                self.nanshePostprocessingApplet.topLevelOperator.AcceptedRegionShapeConstraints_MajorAxisLength_Min_Enabled.setValue(
                    True)
                self.nanshePostprocessingApplet.topLevelOperator.AcceptedRegionShapeConstraints_MajorAxisLength_Min.setValue(
                    min)
            else:
                self.nanshePostprocessingApplet.topLevelOperator.AcceptedRegionShapeConstraints_MajorAxisLength_Min_Enabled.setValue(
                    False)

            if max is not None:
                self.nanshePostprocessingApplet.topLevelOperator.AcceptedRegionShapeConstraints_MajorAxisLength_Max_Enabled.setValue(
                    True)
                self.nanshePostprocessingApplet.topLevelOperator.AcceptedRegionShapeConstraints_MajorAxisLength_Max.setValue(
                    max)
            else:
                self.nanshePostprocessingApplet.topLevelOperator.AcceptedRegionShapeConstraints_MajorAxisLength_Max_Enabled.setValue(
                    False)

        self.nanshePostprocessingApplet.topLevelOperator.PercentagePixelsBelowMax.setValue(
            postprocess_config["wavelet_denoising"]
            ["remove_low_intensity_local_maxima"]
            ["percentage_pixels_below_max"])
        self.nanshePostprocessingApplet.topLevelOperator.MinLocalMaxDistance.setValue(
            postprocess_config["wavelet_denoising"]
            ["remove_too_close_local_maxima"]["min_local_max_distance"])

        area_config = postprocess_config["wavelet_denoising"][
            "accepted_neuron_shape_constraints"].get("area")
        if area_config is not None:
            min = area_config.get("min")
            max = area_config.get("max")

            if min is not None:
                self.nanshePostprocessingApplet.topLevelOperator.AcceptedNeuronShapeConstraints_Area_Min_Enabled.setValue(
                    True)
                self.nanshePostprocessingApplet.topLevelOperator.AcceptedNeuronShapeConstraints_Area_Min.setValue(
                    min)
            else:
                self.nanshePostprocessingApplet.topLevelOperator.AcceptedNeuronShapeConstraints_Area_Min_Enabled.setValue(
                    False)

            if max is not None:
                self.nanshePostprocessingApplet.topLevelOperator.AcceptedNeuronShapeConstraints_Area_Max_Enabled.setValue(
                    True)
                self.nanshePostprocessingApplet.topLevelOperator.AcceptedNeuronShapeConstraints_Area_Max.setValue(
                    max)
            else:
                self.nanshePostprocessingApplet.topLevelOperator.AcceptedNeuronShapeConstraints_Area_Max_Enabled.setValue(
                    False)

        eccentricity_config = postprocess_config["wavelet_denoising"][
            "accepted_neuron_shape_constraints"].get("eccentricity")
        if eccentricity_config is not None:
            min = eccentricity_config.get("min")
            max = eccentricity_config.get("max")

            if min is not None:
                self.nanshePostprocessingApplet.topLevelOperator.AcceptedNeuronShapeConstraints_Eccentricity_Min_Enabled.setValue(
                    True)
                self.nanshePostprocessingApplet.topLevelOperator.AcceptedNeuronShapeConstraints_Eccentricity_Min.setValue(
                    min)
            else:
                self.nanshePostprocessingApplet.topLevelOperator.AcceptedNeuronShapeConstraints_Eccentricity_Min_Enabled.setValue(
                    False)

            if max is not None:
                self.nanshePostprocessingApplet.topLevelOperator.AcceptedNeuronShapeConstraints_Eccentricity_Max_Enabled.setValue(
                    True)
                self.nanshePostprocessingApplet.topLevelOperator.AcceptedNeuronShapeConstraints_Eccentricity_Max.setValue(
                    max)
            else:
                self.nanshePostprocessingApplet.topLevelOperator.AcceptedNeuronShapeConstraints_Eccentricity_Max_Enabled.setValue(
                    False)

        self.nanshePostprocessingApplet.topLevelOperator.AlignmentMinThreshold.setValue(
            postprocess_config["merge_neuron_sets"]["alignment_min_threshold"])
        self.nanshePostprocessingApplet.topLevelOperator.OverlapMinThreshold.setValue(
            postprocess_config["merge_neuron_sets"]["overlap_min_threshold"])
        self.nanshePostprocessingApplet.topLevelOperator.Fuse_FractionMeanNeuronMaxThreshold.setValue(
            postprocess_config["merge_neuron_sets"]["fuse_neurons"]
            ["fraction_mean_neuron_max_threshold"])
    def _import_configuration(self):
        from PyQt4.QtGui import QFileDialog

        filename = QFileDialog.getOpenFileName(caption="Import Configuration", filter="*.json")
        filename = str(filename)

        config_all = read_parameters(filename)

        config = config_all


        if "generate_neurons_blocks" in config:
            config = config["generate_neurons_blocks"]

        config = config["generate_neurons"]


        preprocess_config = config["preprocess_data"]
        dictionary_config = config["generate_dictionary"]["spams.trainDL"]
        postprocess_config = config["postprocess_data"]

        if "remove_zeroed_lines" in preprocess_config:
            local_config = preprocess_config["remove_zeroed_lines"]

            erosion_shape = local_config["erosion_shape"]
            dilation_shape = local_config["dilation_shape"]

            self.nanshePreprocessingApplet.topLevelOperator.ToRemoveZeroedLines.setValue(True)
            self.nanshePreprocessingApplet.topLevelOperator.ErosionShape.setValue(erosion_shape)
            self.nanshePreprocessingApplet.topLevelOperator.DilationShape.setValue(dilation_shape)
        else:
            self.nanshePreprocessingApplet.topLevelOperator.ToRemoveZeroedLines.setValue(False)

        if "extract_f0" in preprocess_config:
            local_config = preprocess_config["extract_f0"]

            bias = local_config.get("bias")
            temporal_smoothing_gaussian_filter_stdev = local_config["temporal_smoothing_gaussian_filter_stdev"]
            temporal_smoothing_gaussian_filter_window_size = local_config["temporal_smoothing_gaussian_filter_window_size"]
            half_window_size = local_config["half_window_size"]
            which_quantile = local_config["which_quantile"]
            spatial_smoothing_gaussian_filter_stdev = local_config["spatial_smoothing_gaussian_filter_stdev"]
            spatial_smoothing_gaussian_filter_window_size = local_config["spatial_smoothing_gaussian_filter_window_size"]


            self.nanshePreprocessingApplet.topLevelOperator.ToExtractF0.setValue(True)

            if bias is not None:
                self.nanshePreprocessingApplet.topLevelOperator.BiasEnabled.setValue(True)
                self.nanshePreprocessingApplet.topLevelOperator.Bias.setValue(bias)
            else:
                self.nanshePreprocessingApplet.topLevelOperator.BiasEnabled.setValue(False)

            self.nanshePreprocessingApplet.topLevelOperator.TemporalSmoothingGaussianFilterStdev.setValue(temporal_smoothing_gaussian_filter_stdev)
            self.nanshePreprocessingApplet.topLevelOperator.TemporalSmoothingGaussianFilterWindowSize.setValue(temporal_smoothing_gaussian_filter_window_size)
            self.nanshePreprocessingApplet.topLevelOperator.HalfWindowSize.setValue(half_window_size)
            self.nanshePreprocessingApplet.topLevelOperator.WhichQuantile.setValue(which_quantile)
            self.nanshePreprocessingApplet.topLevelOperator.SpatialSmoothingGaussianFilterStdev.setValue(spatial_smoothing_gaussian_filter_stdev)
            self.nanshePreprocessingApplet.topLevelOperator.SpatialSmoothingGaussianFilterWindowSize.setValue(spatial_smoothing_gaussian_filter_window_size)
        else:
            self.nanshePreprocessingApplet.topLevelOperator.ToExtractF0.setValue(False)

        if "wavelet_transform" in preprocess_config:
            local_config = preprocess_config["wavelet_transform"]
            scale = local_config["scale"]

            self.nanshePreprocessingApplet.topLevelOperator.ToWaveletTransform.setValue(True)
            self.nanshePreprocessingApplet.topLevelOperator.Scale.setValue(local_config["scale"])
        else:
            self.nanshePreprocessingApplet.topLevelOperator.ToWaveletTransform.setValue(False)


        norm = preprocess_config["normalize_data"]["renormalized_images"].get("ord", 2)
        self.nansheDictionaryLearningApplet.topLevelOperator.Ord.setValue(norm)

        self.nansheDictionaryLearningApplet.topLevelOperator.K.setValue(dictionary_config["K"])
        self.nansheDictionaryLearningApplet.topLevelOperator.Gamma1.setValue(dictionary_config["gamma1"])
        self.nansheDictionaryLearningApplet.topLevelOperator.Gamma2.setValue(dictionary_config["gamma2"])
        self.nansheDictionaryLearningApplet.topLevelOperator.NumThreads.setValue(dictionary_config["numThreads"])
        self.nansheDictionaryLearningApplet.topLevelOperator.Batchsize.setValue(dictionary_config["batchsize"])
        self.nansheDictionaryLearningApplet.topLevelOperator.NumIter.setValue(dictionary_config["iter"])
        self.nansheDictionaryLearningApplet.topLevelOperator.Lambda1.setValue(dictionary_config["lambda1"])
        self.nansheDictionaryLearningApplet.topLevelOperator.Lambda2.setValue(dictionary_config["lambda2"])
        self.nansheDictionaryLearningApplet.topLevelOperator.PosAlpha.setValue(dictionary_config["posAlpha"])
        self.nansheDictionaryLearningApplet.topLevelOperator.PosD.setValue(dictionary_config["posD"])
        self.nansheDictionaryLearningApplet.topLevelOperator.Clean.setValue(dictionary_config["clean"])
        self.nansheDictionaryLearningApplet.topLevelOperator.Mode.setValue(dictionary_config["mode"])
        self.nansheDictionaryLearningApplet.topLevelOperator.ModeD.setValue(dictionary_config["modeD"])


        self.nanshePostprocessingApplet.topLevelOperator.SignificanceThreshold.setValue(
            postprocess_config["wavelet_denoising"]["estimate_noise"]["significance_threshold"]
        )
        self.nanshePostprocessingApplet.topLevelOperator.WaveletTransformScale.setValue(
            postprocess_config["wavelet_denoising"]["wavelet.transform"]["scale"]
        )
        self.nanshePostprocessingApplet.topLevelOperator.NoiseThreshold.setValue(
            postprocess_config["wavelet_denoising"]["significant_mask"]["noise_threshold"]
        )

        major_axis_length_config = postprocess_config["wavelet_denoising"]["accepted_region_shape_constraints"].get("major_axis_length")
        if major_axis_length_config is not None:
            min = major_axis_length_config.get("min")
            max = major_axis_length_config.get("max")

            if min is not None:
                self.nanshePostprocessingApplet.topLevelOperator.AcceptedRegionShapeConstraints_MajorAxisLength_Min_Enabled.setValue(True)
                self.nanshePostprocessingApplet.topLevelOperator.AcceptedRegionShapeConstraints_MajorAxisLength_Min.setValue(min)
            else:
                self.nanshePostprocessingApplet.topLevelOperator.AcceptedRegionShapeConstraints_MajorAxisLength_Min_Enabled.setValue(False)

            if max is not None:
                self.nanshePostprocessingApplet.topLevelOperator.AcceptedRegionShapeConstraints_MajorAxisLength_Max_Enabled.setValue(True)
                self.nanshePostprocessingApplet.topLevelOperator.AcceptedRegionShapeConstraints_MajorAxisLength_Max.setValue(max)
            else:
                self.nanshePostprocessingApplet.topLevelOperator.AcceptedRegionShapeConstraints_MajorAxisLength_Max_Enabled.setValue(False)

        self.nanshePostprocessingApplet.topLevelOperator.PercentagePixelsBelowMax.setValue(
            postprocess_config["wavelet_denoising"]["remove_low_intensity_local_maxima"]["percentage_pixels_below_max"]
        )
        self.nanshePostprocessingApplet.topLevelOperator.MinLocalMaxDistance.setValue(
            postprocess_config["wavelet_denoising"]["remove_too_close_local_maxima"]["min_local_max_distance"]
        )

        area_config = postprocess_config["wavelet_denoising"]["accepted_neuron_shape_constraints"].get("area")
        if area_config is not None:
            min = area_config.get("min")
            max = area_config.get("max")

            if min is not None:
                self.nanshePostprocessingApplet.topLevelOperator.AcceptedNeuronShapeConstraints_Area_Min_Enabled.setValue(True)
                self.nanshePostprocessingApplet.topLevelOperator.AcceptedNeuronShapeConstraints_Area_Min.setValue(min)
            else:
                self.nanshePostprocessingApplet.topLevelOperator.AcceptedNeuronShapeConstraints_Area_Min_Enabled.setValue(False)

            if max is not None:
                self.nanshePostprocessingApplet.topLevelOperator.AcceptedNeuronShapeConstraints_Area_Max_Enabled.setValue(True)
                self.nanshePostprocessingApplet.topLevelOperator.AcceptedNeuronShapeConstraints_Area_Max.setValue(max)
            else:
                self.nanshePostprocessingApplet.topLevelOperator.AcceptedNeuronShapeConstraints_Area_Max_Enabled.setValue(False)

        eccentricity_config = postprocess_config["wavelet_denoising"]["accepted_neuron_shape_constraints"].get("eccentricity")
        if eccentricity_config is not None:
            min = eccentricity_config.get("min")
            max = eccentricity_config.get("max")

            if min is not None:
                self.nanshePostprocessingApplet.topLevelOperator.AcceptedNeuronShapeConstraints_Eccentricity_Min_Enabled.setValue(True)
                self.nanshePostprocessingApplet.topLevelOperator.AcceptedNeuronShapeConstraints_Eccentricity_Min.setValue(min)
            else:
                self.nanshePostprocessingApplet.topLevelOperator.AcceptedNeuronShapeConstraints_Eccentricity_Min_Enabled.setValue(False)

            if max is not None:
                self.nanshePostprocessingApplet.topLevelOperator.AcceptedNeuronShapeConstraints_Eccentricity_Max_Enabled.setValue(True)
                self.nanshePostprocessingApplet.topLevelOperator.AcceptedNeuronShapeConstraints_Eccentricity_Max.setValue(max)
            else:
                self.nanshePostprocessingApplet.topLevelOperator.AcceptedNeuronShapeConstraints_Eccentricity_Max_Enabled.setValue(False)

        self.nanshePostprocessingApplet.topLevelOperator.AlignmentMinThreshold.setValue(
            postprocess_config["merge_neuron_sets"]["alignment_min_threshold"]
        )
        self.nanshePostprocessingApplet.topLevelOperator.OverlapMinThreshold.setValue(
            postprocess_config["merge_neuron_sets"]["overlap_min_threshold"]
        )
        self.nanshePostprocessingApplet.topLevelOperator.Fuse_FractionMeanNeuronMaxThreshold.setValue(
            postprocess_config["merge_neuron_sets"]["fuse_neurons"]["fraction_mean_neuron_max_threshold"]
        )