Exemple #1
0
def load(filenames=None,
         signal_type=None,
         stack=False,
         stack_axis=None,
         new_axis_name="stack_element",
         lazy=False,
         **kwds):
    """Load supported files into a pyxem structure.

    Supported formats: hspy (HDF5), Medipix (hdr + mib), blockfile, Gatan dm3,
    tif, msa, Bruker bcf, FEI ser and emi, SEMPER unf, EMD, EDAX spd/spc, tif,
    and a number of image formats.

    Additional keyword arguments are passed to the corresponding reader. For
    available options see their individual documentation, which may be found in
    either pyxem or hyperspy.

    Parameters
    ----------
    filenames :  None, str or list of strings
        The filename to be loaded. If None, a window will open to select
        a file to load. If a valid filename is passed in that single
        file is loaded. If multiple file names are passed in
        a list, a list of objects or a single object containing the data
        of the individual files stacked are returned. This behaviour is
        controlled by the `stack` parameter (see bellow). Multiple
        files can be loaded by using simple shell-style wildcards,
        e.g. 'my_file*.msa' loads all the files that starts
        by 'my_file' and has the '.msa' extension.
    signal_type : {None, "electron_diffraction", "diffraction_profile",
                   "diffraction_vectors", "crystallographic_map", str}
        The acronym that identifies the signal type.
        The value provided may determine the Signal subclass assigned to the
        data.
        If None the value is read/guessed from the file. Any other value
        overrides the value stored in the file if any.
        For electron energy-loss spectroscopy use "EELS".
        For energy dispersive x-rays use "EDS_TEM"
        if acquired from an electron-transparent sample — as it is usually
        the case in a transmission electron  microscope (TEM) —,
        "EDS_SEM" if acquired from a non electron-transparent sample
        — as it is usually the case in a scanning electron  microscope (SEM) —.
        If "" (empty string) the value is not read from the file and is
        considered undefined.
    stack : bool
        If True and multiple filenames are passed in, stacking all
        the data into a single object is attempted. All files must match
        in shape. If each file contains multiple (N) signals, N stacks will be
        created, with the requirement that each file contains the same number
        of signals.
    stack_axis : {None, int, str}
        If None, the signals are stacked over a new axis. The data must
        have the same dimensions. Otherwise the
        signals are stacked over the axis given by its integer index or
        its name. The data must have the same shape, except in the dimension
        corresponding to `axis`.
    new_axis_name : string
        The name of the new axis when `axis` is None.
        If an axis with this name already
        exists it automatically append '-i', where `i` are integers,
        until it finds a name that is not yet in use.
    lazy : {None, bool}
        Open the data lazily - i.e. without actually reading the data from the
        disk until required. Allows opening arbitrary-sized datasets. default
        is `False`.
    print_info: bool
        For SEMPER unf- and EMD (Berkley)-files, if True (default is False)
        additional information read during loading is printed for a quick
        overview.

    downsample : int (1–4095)
        For Bruker bcf files, if set to integer (>=2) (default 1)
        bcf is parsed into down-sampled size array by given integer factor,
        multiple values from original bcf pixels are summed forming downsampled
        pixel. This allows to improve signal and conserve the memory with the
        cost of lower resolution.
    cutoff_at_kV : {None, int, float}
       For Bruker bcf files, if set to numerical (default is None)
       bcf is parsed into array with depth cutoff at coresponding given energy.
       This allows to conserve the memory, with cutting-off unused spectra's
       tail, or force enlargement of the spectra size.
    select_type: {'spectrum', 'image', None}
       For Bruker bcf files, if one of 'spectrum' or 'image' (default is None)
       the loader returns either only hypermap or only SEM/TEM electron images.

    Returns
    -------
    Signal instance or list of signal instances

    Examples
    --------
    Loading a single file providing the signal type:
    >>> d = hs.load('file.dm3', signal_type="EDS_TEM")

    Loading multiple files:

    >>> d = hs.load('file1.dm3','file2.dm3')

    Loading multiple files matching the pattern:

    >>> d = hs.load('file*.dm3')

    Loading (potentially larger than the available memory) files lazily and
    stacking:

    >>> s = hs.load('file*.blo', lazy=True, stack=True)

    """
    deprecated = ['mmap_dir', 'load_to_memory']
    warn_str = "'{}' argument is deprecated, please use 'lazy' instead"
    for k in deprecated:
        if k in kwds:
            lazy = True
            warnings.warn(warn_str.format(k), VisibleDeprecationWarning)
            del kwds[k]
    kwds['signal_type'] = signal_type

    if filenames is None:
        from hyperspy.signal_tools import Load
        load_ui = Load()
        get_gui(load_ui, toolkey="load")
        if load_ui.filename:
            filenames = load_ui.filename
            lazy = load_ui.lazy
        if filenames is None:
            raise ValueError("No file provided to reader")

    if isinstance(filenames, str):
        filenames = natsorted(
            [f for f in glob.glob(filenames) if os.path.isfile(f)])
        if not filenames:
            raise ValueError('No file name matches this pattern')
    elif not isinstance(filenames, (list, tuple)):
        raise ValueError(
            'The filenames parameter must be a list, tuple, string or None')
    if not filenames:
        raise ValueError('No file provided to reader.')
    else:
        if len(filenames) > 1:
            _logger.info('Loading individual files')
        if stack is True:
            # We are loading a stack!
            # Note that while each file might contain several signals, all
            # files are required to contain the same number of signals. We
            # therefore use the first file to determine the number of signals.
            for i, filename in enumerate(filenames):
                obj = load_single_file(filename, lazy=lazy, **kwds)
                if i == 0:
                    # First iteration, determine number of signals, if several:
                    if isinstance(obj, (list, tuple)):
                        n = len(obj)
                    else:
                        n = 1
                    # Initialize signal 2D list:
                    signals = [[] for j in range(n)]
                else:
                    # Check that number of signals per file doesn't change
                    # for other files:
                    if isinstance(obj, (list, tuple)):
                        if n != len(obj):
                            raise ValueError(
                                "The number of sub-signals per file does not "
                                "match:\n" + (f_error_fmt %
                                              (1, n, filenames[0])) +
                                (f_error_fmt % (i, len(obj), filename)))
                    elif n != 1:
                        raise ValueError(
                            "The number of sub-signals per file does not "
                            "match:\n" + (f_error_fmt % (1, n, filenames[0])) +
                            (f_error_fmt % (i, len(obj), filename)))
                # Append loaded signals to 2D list:
                if n == 1:
                    signals[0].append(obj)
                elif n > 1:
                    for j in range(n):
                        signals[j].append(obj[j])
            # Next, merge the signals in the `stack_axis` direction:
            # When each file had N signals, we create N stacks!
            objects = []
            for i in range(n):
                signal = signals[i]  # Sublist, with len = len(filenames)
                signal = stack_method(signal,
                                      axis=stack_axis,
                                      new_axis_name=new_axis_name,
                                      lazy=lazy)
                signal.metadata.General.title = os.path.split(
                    os.path.split(os.path.abspath(filenames[0]))[0])[1]
                _logger.info('Individual files loaded correctly')
                _logger.info(signal._summary())
                objects.append(signal)
        else:
            # No stack, so simply we load all signals in all files separately
            objects = [
                load_single_file(filename, lazy=lazy, **kwds)
                for filename in filenames
            ]

        if len(objects) == 1:
            objects = objects[0]
    return objects
Exemple #2
0
def load(filenames=None,
         signal_type=None,
         stack=False,
         stack_axis=None,
         new_axis_name='stack_element',
         lazy=False,
         convert_units=False,
         **kwargs):
    """Load potentially multiple supported files into a KikuchiPy
    structure.

    Any extra keyword is passed to the corresponding reader. For
    available options see their individual documentation, which may be
    found in either KikuchiPy or HyperSpy.

    Parameters
    ----------
    filenames : {None, str or list of strings}, optional
        The filenames to be loaded. If None, a window will open to
        select a file to load. If a valid filename is passed in that
        single file is loaded. If multiple file names are passed in a
        list, a list of objects or a single object containing the data
        of the individual files stacked are returned. This behaviour is
        controlled by the `stack` parameter (see bellow). Multiple
        files can be loaded by using simple shell-style wildcards,
        e.g. 'my_file*.dat' loads all the files that starts
        by 'my_file' and has the '.dat' extension.
    signal_type : {None, 'EBSD', str}, optional
        The name or acronym that identifies the signal type. The value
        provided may determine the Signal subclass assigned to the
        data. If None the value is read/guessed from the file. Any
        other value overrides the value stored in the file if any. If ''
        (empty string) the value is not read from the file and is
        considered undefined.
    stack : bool, optional
        If True and multiple filenames are passed in, stacking all
        the data into a single object is attempted. All files must
        match in shape. If each file contains multiple (N) _signals, N
        stacks will be created, with the requirement that each file
        contains the same number of _signals.
    stack_axis : {None, int, str}, optional
        If None, the _signals are stacked over a new axis. The data must
        have the same dimensions. Otherwise the _signals are stacked
        over the axis given by its integer index or its name. The data
        must have the same shape, except in the dimension corresponding
        to `axis`.
    new_axis_name : str, optional
        The name of the new axis when `axis` is None. If an axis with
        this name already exists it automatically appends '-i', where
        `i` are integers, until it finds a name that is not yet in use.
    lazy : bool, optional
        Open the data lazily - i.e. without actually reading the data
        from the disk until required. Allows opening arbitrary-sized
        datasets. The default is `False`.
    convert_units : bool, optional
        If True, convert the units using the `convert_to_units` method
        of the `axes_manager`. If False (default), nothing is done.
    **kwargs :
        Keyword arguments passed to the corresponding reader.

    Returns
    -------
    objects : signal instance or list of signal instances

    Examples
    --------
    Loading a single file providing the signal type:
    >>> s = kp.load('Pattern.h5', signal_type='EBSD')
    Loading multiple scans from single file:
    >>> s1, s2 = kp.load('Pattern.h5', scans=[1, 2])
    """
    kwargs['signal_type'] = signal_type
    kwargs['convert_units'] = convert_units

    if filenames is None:
        from hyperspy.signal_tools import Load
        load_ui = Load()
        get_gui(load_ui, toolkey='load')
        if load_ui.filename:
            filenames = load_ui.filename
            lazy = load_ui.lazy
        if filenames is None:
            raise IOError("No file provided to reader.")

    if isinstance(filenames, str):
        filenames = natsorted(
            [f for f in glob.glob(filenames) if os.path.isfile(f)])
        if not filenames:
            raise ValueError("No file name matches this pattern.")
    elif not isinstance(filenames, (list, tuple)):
        raise ValueError("The filenames parameter must be a list, tuple, "
                         "string or None.")

    if stack is True:
        # We are loading a stack!
        # Note that while each file might contain several signals, all
        # files are required to contain the same number of signals. We
        # therefore use the first file to determine the number of signals.
        for i, filename in enumerate(filenames):
            obj = load_single_file(filename, lazy=lazy, **kwargs)
            if i == 0:
                # First iteration, determine number of signals, if several
                n = 1
                if isinstance(obj, (list, tuple)):
                    n = len(obj)
                # Initialize signal 2D list
                signals = [[] for j in range(n)]
            else:
                # Check that number of signals per file doesn't change for other
                # files
                if isinstance(obj, (list, tuple)):
                    if n != len(obj):
                        raise ValueError(
                            "The number of signals per file does not match:\n"
                            + (f_error_fmt % (1, n, filenames[0])) +
                            (f_error_fmt % (i, len(obj), filename)))
            # Append loaded _signals to 2D list
            if n == 1:
                signals[0].append(obj)
            elif n > 1:
                for j in range(n):
                    signals[j].append(obj[j])
        # Next, merge the signals in the `stack_axis` direction.
        # When each file has N signals, we create N stacks.
        objects = []
        for i in range(n):
            signal = signals[i]  # Sublist, with len = len(filenames)
            signal = stack_method(signal,
                                  axis=stack_axis,
                                  new_axis_name=new_axis_name,
                                  lazy=lazy)
            signal.metadata.General.title = os.path.split(
                os.path.split(os.path.abspath(filenames[0]))[0])[1]
            objects.append(signal)
    else:  # No stack, so simply load all signals in all files separately
        objects = [
            load_single_file(filename, lazy=lazy, **kwargs)
            for filename in filenames
        ]

    if len(objects) == 1:
        objects = objects[0]

    return objects
Exemple #3
0
def load(filenames=None,
         signal_type=None,
         stack=False,
         stack_axis=None,
         new_axis_name="stack_element",
         lazy=False,
         convert_units=False,
         escape_square_brackets=False,
         **kwds):
    """Load potentially multiple supported files into HyperSpy.

    Supported formats: hspy (HDF5), msa, Gatan dm3, Ripple (rpl+raw),
    Bruker bcf and spx, FEI ser and emi, SEMPER unf, EMD, EDAX spd/spc,
    tif, and a number of image formats.

    Depending on the number of datasets to load in the file, this function will
    return a HyperSpy signal instance or list of HyperSpy signal instances.

    Any extra keywords are passed to the corresponding reader. For
    available options, see their individual documentation.

    Parameters
    ----------
    filenames :  None or str or list(str) or pathlib.Path or list(pathlib.Path)
        The filename to be loaded. If None, a window will open to select
        a file to load. If a valid filename is passed in that single
        file is loaded. If multiple file names are passed in
        a list, a list of objects or a single object containing the data
        of the individual files stacked are returned. This behaviour is
        controlled by the `stack` parameter (see bellow). Multiple
        files can be loaded by using simple shell-style wildcards,
        e.g. 'my_file*.msa' loads all the files that starts
        by 'my_file' and has the '.msa' extension.
    signal_type : {None, "EELS", "EDS_SEM", "EDS_TEM", "", str}
        The acronym that identifies the signal type.
        The value provided may determine the Signal subclass assigned to the
        data.
        If None the value is read/guessed from the file. Any other value
        overrides the value stored in the file if any.
        For electron energy-loss spectroscopy use "EELS".
        For energy dispersive x-rays use "EDS_TEM"
        if acquired from an electron-transparent sample — as it is usually
        the case in a transmission electron  microscope (TEM) —,
        "EDS_SEM" if acquired from a non electron-transparent sample
        — as it is usually the case in a scanning electron  microscope (SEM).
        If "" (empty string) the value is not read from the file and is
        considered undefined.
    stack : bool
        If True and multiple filenames are passed in, stacking all
        the data into a single object is attempted. All files must match
        in shape. If each file contains multiple (N) signals, N stacks will be
        created, with the requirement that each file contains the same number
        of signals.
    stack_axis : {None, int, str}
        If None, the signals are stacked over a new axis. The data must
        have the same dimensions. Otherwise the
        signals are stacked over the axis given by its integer index or
        its name. The data must have the same shape, except in the dimension
        corresponding to `axis`.
    new_axis_name : string
        The name of the new axis when `axis` is None.
        If an axis with this name already
        exists it automatically append '-i', where `i` are integers,
        until it finds a name that is not yet in use.
    lazy : {None, bool}
        Open the data lazily - i.e. without actually reading the data from the
        disk until required. Allows opening arbitrary-sized datasets. The default
        is `False`.
    convert_units : {bool}
        If True, convert the units using the `convert_to_units` method of
        the `axes_manager`. If False, does nothing. The default is False.
    escape_square_brackets : bool, default False
        If True, and ``filenames`` is a str containing square brackets,
        then square brackets are escaped before wildcard matching with
        ``glob.glob()``. If False, square brackets are used to represent
        character classes (e.g. ``[a-z]`` matches lowercase letters.
    reader : None or str or custom file reader object, default None
        Specify the file reader to use when loading the file(s). If None,
        will use the file extension to infer the file type and appropriate
        reader. If str, will select the appropriate file reader from the
        list of available readers in HyperSpy. If a custom reader object,
        it should implement the ``file_reader`` function, which returns
        a dictionary containing the data and metadata for conversion to
        a HyperSpy signal.
    print_info: bool, default False
        For SEMPER unf- and EMD (Berkeley)-files, if True
        additional information read during loading is printed for a quick
        overview.
    downsample : int (1–4095)
        For Bruker bcf files, if set to integer (>=2) (default 1)
        bcf is parsed into down-sampled size array by given integer factor,
        multiple values from original bcf pixels are summed forming downsampled
        pixel. This allows to improve signal and conserve the memory with the
        cost of lower resolution.
    cutoff_at_kV : {None, int, float}
        For Bruker bcf files, if set to numerical (default is None)
        bcf is parsed into array with depth cutoff at coresponding given energy.
        This allows to conserve the memory, with cutting-off unused spectra's
        tail, or force enlargement of the spectra size.
    select_type : {'spectrum_image', 'image', 'single_spectrum', None}
        If `None` (default), all data are loaded.
        For Bruker bcf and Velox emd files: if one of 'spectrum_image', 'image'
        or 'single_spectrum', the loader return single_spectrumns either only
        the spectrum image or only the images (including EDS map for Velox emd
        files) or only the single spectra (for Velox emd files).
    first_frame : int (default 0)
        Only for Velox emd files: load only the data acquired after the
        specified fname.
    last_frame : None or int (default None)
        Only for Velox emd files: load only the data acquired up to specified
        fname. If None, load up the data to the end.
    sum_frames : bool (default is True)
        Only for Velox emd files: if False, load each EDS frame individually.
    sum_EDS_detectors : bool (default is True)
        Only for Velox emd files: if True, the signal from the different
        detector are summed. If False, a distinct signal is returned for each
        EDS detectors.
    rebin_energy : int, a multiple of the length of the energy dimension (default 1)
        Only for Velox emd files: rebin the energy axis by the integer provided
        during loading in order to save memory space.
    SI_dtype : numpy.dtype
        Only for Velox emd files: set the dtype of the spectrum image data in
        order to save memory space. If None, the default dtype from the Velox emd
        file is used.
    load_SI_image_stack : bool (default False)
        Only for Velox emd files: if True, load the stack of STEM images
        acquired simultaneously as the EDS spectrum image.
    dataset_path : None, str or list of str, optional
        For filetypes which support several datasets in the same file, this
        will only load the specified dataset. Several datasets can be loaded
        by using a list of strings. Only for EMD (NCEM) and hdf5 (USID) files.
    stack_group : bool, optional
        Only for EMD NCEM. Stack datasets of groups with common name. Relevant
        for emd file version >= 0.5 where groups can be named 'group0000',
        'group0001', etc.
    ignore_non_linear_dims : bool, default is True
        Only for HDF5 USID. If True, parameters that were varied non-linearly
        in the desired dataset will result in Exceptions.
        Else, all such non-linearly varied parameters will be treated as
        linearly varied parameters and a Signal object will be generated.
    only_valid_data : bool, optional
        Only for FEI emi/ser file in case of series or linescan with the
        acquisition stopped before the end: if True, load only the acquired
        data. If False, fill empty data with zeros. Default is False and this
        default value will change to True in version 2.0.

    Returns
    -------
    Signal instance or list of signal instances

    Examples
    --------
    Loading a single file providing the signal type:

    >>> d = hs.load('file.dm3', signal_type="EDS_TEM")

    Loading multiple files:

    >>> d = hs.load('file1.dm3','file2.dm3')

    Loading multiple files matching the pattern:

    >>> d = hs.load('file*.dm3')

    Loading multiple files containing square brackets:

    >>> d = hs.load('file[*].dm3', escape_square_brackets=True)

    Loading (potentially larger than the available memory) files lazily and
    stacking:

    >>> s = hs.load('file*.blo', lazy=True, stack=True)

    Specify the file reader to use

    >>> s = hs.load('a_nexus_file.h5', reader='nxs')

    """
    deprecated = ['mmap_dir', 'load_to_memory']
    warn_str = "'{}' argument is deprecated, please use 'lazy' instead"
    for k in deprecated:
        if k in kwds:
            lazy = True
            warnings.warn(warn_str.format(k), VisibleDeprecationWarning)
            del kwds[k]
    kwds['signal_type'] = signal_type
    kwds['convert_units'] = convert_units
    if filenames is None:
        from hyperspy.signal_tools import Load
        load_ui = Load()
        get_gui(load_ui, toolkey="hyperspy.load")
        if load_ui.filename:
            filenames = load_ui.filename
            lazy = load_ui.lazy
        if filenames is None:
            raise ValueError("No file provided to reader")

    if isinstance(filenames, str):
        pattern = filenames
        if escape_square_brackets:
            filenames = _escape_square_brackets(filenames)

        filenames = natsorted([f for f in glob.glob(filenames)
                               if os.path.isfile(f)])

        if not filenames:
            raise ValueError(f'No filename matches the pattern "{pattern}"')

    elif isinstance(filenames, Path):
        # Just convert to list for now, pathlib.Path not
        # fully supported in io_plugins
        filenames = [f for f in [filenames] if f.is_file()]

    elif isgenerator(filenames):
        filenames = list(filenames)

    elif not isinstance(filenames, (list, tuple)):
        raise ValueError(
            'The filenames parameter must be a list, tuple, '
            f'string or None, not {type(filenames)}'
        )

    if not filenames:
        raise ValueError('No file(s) provided to reader.')

    # pathlib.Path not fully supported in io_plugins,
    # so convert to str here to maintain compatibility
    filenames = [str(f) if isinstance(f, Path) else f for f in filenames]

    if len(filenames) > 1:
        _logger.info('Loading individual files')

    if stack is True:
        # We are loading a stack!
        # Note that while each file might contain several signals, all
        # files are required to contain the same number of signals. We
        # therefore use the first file to determine the number of signals.
        for i, filename in enumerate(filenames):
            obj = load_single_file(filename, lazy=lazy, **kwds)

            if i == 0:
                # First iteration, determine number of signals, if several:
                n = len(obj) if isinstance(obj, (list, tuple)) else 1

                # Initialize signal 2D list:
                signals = [[] for j in range(n)]
            else:
                # Check that number of signals per file doesn't change
                # for other files:
                if isinstance(obj, (list, tuple)):
                    if n != len(obj):
                        raise ValueError(
                            "The number of sub-signals per file does not match:\n" +
                            (f_error_fmt % (1, n, filenames[0])) +
                            (f_error_fmt % (i, len(obj), filename))
                        )
                elif n != 1:
                    raise ValueError(
                        "The number of sub-signals per file does not match:\n" +
                        (f_error_fmt % (1, n, filenames[0])) +
                        (f_error_fmt % (i, len(obj), filename))
                    )

            # Append loaded signals to 2D list:
            if n == 1:
                signals[0].append(obj)
            elif n > 1:
                for j in range(n):
                    signals[j].append(obj[j])

        # Next, merge the signals in the `stack_axis` direction:
        # When each file had N signals, we create N stacks!
        objects = []
        for i in range(n):
            signal = signals[i]   # Sublist, with len = len(filenames)
            signal = stack_method(
                signal,
                axis=stack_axis,
                new_axis_name=new_axis_name,
                lazy=lazy,
            )
            signal.metadata.General.title = Path(filenames[0]).parent.stem
            _logger.info('Individual files loaded correctly')
            _logger.info(signal._summary())
            objects.append(signal)
    else:
        # No stack, so simply we load all signals in all files separately
        objects = [load_single_file(filename, lazy=lazy, **kwds) for filename in filenames]

    if len(objects) == 1:
        objects = objects[0]

    return objects
Exemple #4
0
def file_reader(filename, *args, **kwds):
    """Load data into CLSEMSpectrum lumispy object.
    Parameters
    ----------
    filename : str, None
        The HYPCard.bin filepath for the file to be loaded, created by AttoLight software.
        Please, state the directory.
        If None, a pop-up window will be loaded.
    lazy : bool
        If True the file will be opened lazily, i.e. without actually reading
        the data from the disk until required. Allows datasets much larger than
        available memory to be loaded.
    metadata_file_name: str
        By default, AttoLight software names it 'MicroscopeStatus.txt'.
        Otherwise, specify.
    attolight_acquisition_system : str
        Specify which acquisition system the HYPCard was taken with, from the
        attolight_systems dictionary file. By default, it assumes it is
        the Cambridge Attolight SEM system.
    Returns
    -------
    s : Signal
        A CLSEMSpectrum lumispy object containing loaded data.
    """
    # Read the kwds, else return their default
    lazy = kwds.pop('lazy', False)
    attolight_acquisition_system = kwds.pop('attolight_acquisition_system', 'cambridge_uk_attolight')
    metadata_file_name = kwds.pop('metadata_file_name',
                                  attolight_systems[attolight_acquisition_system]['metadata_file_name'])

    def _get_metadata(filename, md_file_name, attolight_acquisition_system):
        """Import the metadata from the MicroscopeStatus.txt file.
        Returns binning, nx, ny, FOV, grating and central_wavelength.
        Parameters
        ----------
        filename : str
            The absolute folder path where the md_file_name exists.
        """
        path = os.path.join(filename, md_file_name)
        with open(path, encoding='windows-1252') as status:
            for line in status:
                if 'Horizontal Binning:' in line:
                    binning = int(line[line.find(':') + 1:-1])  # binning = binning status
                if 'Resolution_X' in line:
                    nx = int(line[line.find(':') + 1:-7])
                    # nx = pixel in x-direction
                if 'Resolution_Y' in line:
                    ny = int(line[line.find(':') + 1:-7])
                    # ny = pixel in y-direction
                if 'Real Magnification' in line:
                    FOV = float(line[line.find(':') + 1:-1])
                if 'Grating - Groove Density:' in line:
                    grating = float(line[line.find(':') + 1:-6])
                if 'Central wavelength:' in line:
                    central_wavelength_nm = float(line[line.find(':') + 1:-4])
                if 'Channels:' in line:
                    total_channels = int(line[line.find(':') + 1:-1])
                if 'Signal Amplification:' in line:
                    amplification = int(line[line.find(':x') + 2:-1])
                if 'Readout Rate (horizontal pixel shift):' in line:
                    readout_rate_khz = int(line[line.find(':') + 1:-4])

                if 'Exposure Time:' in line:
                    exposure_time_ccd_s = float(line[line.find(':') + 1:-3])
                if 'HYP Dwelltime:' in line:
                    dwell_time_scan_s = float(line[line.find(':') + 1:-4]) / 1000
                if 'Beam Energy:' in line:
                    beam_acc_voltage_kv = float(line[line.find(':') + 1:-3]) / 1000
                if 'Gun Lens:' in line:
                    gun_lens_amps = float(line[line.find(':') + 1:-3])
                if 'Objective Lens:' in line:
                    obj_lens_amps = float(line[line.find(':') + 1:-3])
                if 'Aperture:' in line:
                    aperture_um = float(line[line.find(':') + 1:-4])
                if 'Aperture Chamber Pressure:' in line:
                    chamber_pressure_torr = float(line[line.find(':') + 1:-6])
                if 'Real Magnification:' in line:
                    real_magnification = float(line[line.find(':') + 1:-3])

        # Correct channels to the actual value, accounting for binning. Get
        # channels on the detector used (if channels not defined, then assume
        # its 1024)
        try:
            total_channels
        except:
            total_channels = attolight_systems[attolight_acquisition_system]['channels']
        channels = total_channels // binning

        # Return metadata
        return binning, nx, ny, FOV, grating, central_wavelength_nm, channels, amplification, readout_rate_khz, exposure_time_ccd_s, dwell_time_scan_s, beam_acc_voltage_kv, gun_lens_amps, obj_lens_amps, aperture_um, chamber_pressure_torr, real_magnification

    def _store_metadata(cl_object, hypcard_folder, md_file_name,
                       attolight_acquisition_system):
        """
        TO BE ADDED
        Store metadata in the CLSpectrum object metadata property. Stores
        binning, nx, ny, FOV, grating and central_wavelength.
        Parameters
        ----------
        cl_object: CLSpectrum object
            The CLSpectrum object where to save the metadata.
        hypcard_folder : str
            The absolute folder path where the metadata_file_name exists.
        """
        # Get metadata
        binning, nx, ny, FOV, grating, central_wavelength_nm, channels, amplification, readout_rate_khz, exposure_time_ccd_s, dwell_time_scan_s, beam_acc_voltage_kv, gun_lens_amps, obj_lens_amps, aperture_um, chamber_pressure_torr, real_magnification = _get_metadata(
            hypcard_folder, md_file_name, attolight_acquisition_system)

        # Store metadata
        cl_object.metadata.set_item("Acquisition_instrument.Spectrometer.grating",
                                    grating)
        cl_object.metadata.set_item("Acquisition_instrument.Spectrometer.central_wavelength_nm",
                                    central_wavelength_nm)
        cl_object.metadata.set_item("Acquisition_instrument.SEM.resolution_x",
                                    nx)
        cl_object.metadata.set_item("Acquisition_instrument.SEM.resolution_y",
                                    ny)
        cl_object.metadata.set_item("Acquisition_instrument.SEM.FOV", FOV)
        cl_object.metadata.set_item("Acquisition_instrument.CCD.binning",
                                    binning)
        cl_object.metadata.set_item("Acquisition_instrument.CCD.channels",
                                    channels)
        cl_object.metadata.set_item("Acquisition_instrument.acquisition_system",
                                    attolight_acquisition_system)
        cl_object.metadata.set_item("Acquisition_instrument.CCD.amplification", amplification)
        cl_object.metadata.set_item("Acquisition_instrument.CCD.readout_rate_khz", readout_rate_khz)
        cl_object.metadata.set_item("Acquisition_instrument.CCD.exposure_time_s", exposure_time_ccd_s)
        cl_object.metadata.set_item("Acquisition_instrument.SEM.dwell_time_scan_s", dwell_time_scan_s)
        cl_object.metadata.set_item("Acquisition_instrument.SEM.beam_acc_voltage_kv", beam_acc_voltage_kv)
        cl_object.metadata.set_item("Acquisition_instrument.SEM.gun_lens_amps", gun_lens_amps)
        cl_object.metadata.set_item("Acquisition_instrument.SEM.obj_lens_amps", obj_lens_amps)
        cl_object.metadata.set_item("Acquisition_instrument.SEM.aperture_um", aperture_um)
        cl_object.metadata.set_item("Acquisition_instrument.SEM.chamber_pressure_torr", chamber_pressure_torr)
        cl_object.metadata.set_item("Acquisition_instrument.SEM.real_magnification", real_magnification)
        cl_object.metadata.set_item("General.folder_path", hypcard_folder)

        return cl_object

    def _calibrate_signal_axis_wavelength(cl_object):
        """
        Based on the Attolight software export function. Need to be automatised.
        Two calibrated sets show the trend:
        #Centre at 650 nm:
            spec_start= 377.436, spec_end = 925.122
        #Centre at 750:
            spec_start= 478.2, spec_end = 1024.2472
        Returns
        ----------
        spectra_offset_array: []
            Array containing the spectrum energy axis start and end points in
            nm (from the MeanSpectrum file), such as [spec_start, spec_end]
        """
        # Get relevant parameters from metadata
        central_wavelength = cl_object.metadata.Acquisition_instrument.Spectrometer.central_wavelength_nm

        # Estimate start and end wavelengths
        spectra_offset_array = [central_wavelength - 273, central_wavelength + 273]

        # Apply calibration
        dx = cl_object.axes_manager.signal_axes[0]
        dx.name = 'Wavelength'
        dx.scale = (spectra_offset_array[1] - spectra_offset_array[0]) \
                   / cl_object.axes_manager.signal_size
        dx.offset = spectra_offset_array[0]
        dx.units = '$nm$'

        return cl_object

    def _calibrate_navigation_axis(cl_object):
        # Edit the navigation axes
        x = cl_object.axes_manager.navigation_axes[0]
        y = cl_object.axes_manager.navigation_axes[1]

        # Get relevant parameters from metadata and acquisition_systems
        # parameters
        attolight_acquisition_system \
            = cl_object.metadata.Acquisition_instrument.acquisition_system
        cal_factor_x_axis \
            = attolight_systems[attolight_acquisition_system]['cal_factor_x_axis']
        FOV = cl_object.metadata.Acquisition_instrument.SEM.FOV
        nx = cl_object.metadata.Acquisition_instrument.SEM.resolution_x

        # Get the calibrated scanning axis scale from the acquisition_systems
        # dictionary
        calax = cal_factor_x_axis / (FOV * nx)
        x.name = 'x'
        x.scale = calax * 1000
        # changes micrometer to nm, value for the size of 1 pixel
        x.units = 'nm'
        y.name = 'y'
        y.scale = calax * 1000
        # changes micrometer to nm, value for the size of 1 pixel
        y.units = 'nm'

        return cl_object

    def _save_background_metadata(cl_object, hypcard_folder, background_file_name='Background*.txt'):
        """
        Based on the Attolight background savefunction.
        If background is found in the folder, it saves background as in the metadata.
        """
        # Get the absolute path
        path = os.path.join(hypcard_folder, background_file_name)

        # Try to load the file, if it exists.
        try:
            # Find the exact filename, using the * wildcard
            path = glob.glob(path)[0]
            # Load the file as a numpy array
            bkg = np.loadtxt(path)
            # The bkg file contains [wavelength, background]
            cl_object.metadata.set_item("Signal.background", bkg)
            return cl_object
        except:
            cl_object.metadata.set_item("Signal.background", None)
            return cl_object


    #################################

    # Loading function starts here

    # Check if a path has been given
    if filename is None:
        from hyperspy.signal_tools import Load
        from hyperspy.ui_registry import get_gui
        load_ui = Load()
        get_gui(load_ui, toolkey="hyperspy.load")
        if load_ui.filename:
            filename = load_ui.filename
            lazy = load_ui.lazy
        if filename is None:
            raise ValueError("No file provided to reader")

    # Import folder name
    hypcard_folder = os.path.split(os.path.abspath(filename))[0]

    # Import metadata
    metadata_file_name \
        = attolight_systems[attolight_acquisition_system]['metadata_file_name']

    binning, nx, ny, FOV, grating, central_wavelength_nm, channels, amplification, readout_rate_khz, \
    exposure_time_ccd_s, dwell_time_scan_s, beam_acc_voltage_kv, gun_lens_amps, obj_lens_amps, aperture_um,\
    chamber_pressure_torr, real_magnification = \
        _get_metadata(hypcard_folder, metadata_file_name, attolight_acquisition_system)

    # Load file
    with open(filename, 'rb') as f:
        data = np.fromfile(f, dtype=[('bar', '<i4')], count=channels * nx * ny)
        array = np.reshape(data, [channels, nx, ny], order='F')

    # Swap x-y axes to get the right xy orientation
    sarray = np.swapaxes(array, 1, 2)

    # Make the CLSEMSpectrum object
    # Load the transposed data
    s = Signal2D(sarray).T
    s.change_dtype('float')
    s = CLSEMSpectrum(s)

    # Add all parameters as metadata
    _store_metadata(s, hypcard_folder, metadata_file_name, attolight_acquisition_system)

    # Add name as metadata
    experiment_name = os.path.basename(hypcard_folder)
    if attolight_acquisition_system == 'cambridge_attolight':
        # CAUTION: Specifically delimeted by Attolight default naming system
        if len(experiment_name) > 37:
            name = experiment_name[:-37]
        else:
            name = experiment_name
    else:
        name = experiment_name
    s.metadata.General.title = name

    # Calibrate navigation axis
    _calibrate_navigation_axis(s)

    # Calibrate signal axis
    _calibrate_signal_axis_wavelength(s)

    # Save background file if exisent (after calibrating signal axis)
    _save_background_metadata(s, hypcard_folder,)

    return s