Exemplo n.º 1
0
    def _get_existing_datasets(self, index=-1):
        """
        Extracts references to the existing datasets that hold the results
        
        :param index: which existing dataset to get
        :type index:
        
        """

        if not self.override:

            self.h5_results_grp = find_dataset(self.h5_main.parent,
                                               'Inst_Freq')[index].parent
            self.h5_new_spec_vals = self.h5_results_grp['Spectroscopic_Values']
            self.h5_tfp = self.h5_results_grp['tfp']
            self.h5_shift = self.h5_results_grp['shift']
            self.h5_if = self.h5_results_grp['Inst_Freq']
            self.h5_amp = self.h5_results_grp['Amplitude']
            self.h5_phase = self.h5_results_grp['Phase']
            try:
                self.h5_pwrdis = self.h5_results_grp['PowerDissipation']
                self.h5_tfp_cal = self.h5_results_grp['tfp_cal']
            except:
                pass
        return
Exemplo n.º 2
0
 def test_legal(self):
     with h5py.File(data_utils.std_beps_path, mode='r') as h5_f:
         h5_group = h5_f['/Raw_Measurement/']
         expected_dsets = [
             h5_f['/Raw_Measurement/Spectroscopic_Indices'], h5_f[
                 '/Raw_Measurement/source_main-Fitter_000/Spectroscopic_Indices'],
             h5_f[
                 '/Raw_Measurement/source_main-Fitter_001/Spectroscopic_Indices']
         ]
         ret_val = hdf_utils.find_dataset(h5_group, 'Spectroscopic_Indices')
         self.assertEqual(set(ret_val), set(expected_dsets))
Exemplo n.º 3
0
    def is_valid_file(file_path):
        """
        Checks whether the provided file can be read by this translator

        Parameters
        ----------
        file_path : str
            Path to raw data file

        Returns
        -------
        obj : str
            Path to file that will be accepted by the translate() function if
            this translator is indeed capable of translating the provided file.
            Otherwise, None will be returned
        """
        if not isinstance(file_path, (str, unicode)):
            raise TypeError('file_path should be a string object')
        if not os.path.isfile(file_path):
            return None

        file_path = os.path.abspath(file_path)
        extension = os.path.splitext(file_path)[1][1:]
        if extension not in ['h5', 'hdf5']:
            return None
        try:
            h5_f = h5py.File(file_path, 'r+')
        except:
            return None

        # TODO: Make this check as lot stronger. Currently brittle
        if 'DAQ_software_version_name' not in h5_f.attrs.keys():
            return None

        if len(find_dataset(h5_f, 'Raw_Data')) < 1:
            return None
        return file_path
Exemplo n.º 4
0
def plot_svd(h5_main, savefig=False, num_plots=16, **kwargs):
    '''
    Replots the SVD showing the skree, abundance maps, and eigenvectors.
    If h5_main is a Dataset, it will default to the most recent SVD group from that
    Dataset.
    If h5_main is the results group, then it will plot the values for that group.
    
    :param h5_main:
    :type h5_main: USIDataset or h5py Dataset or h5py Group
    
    :param savefig: Saves the figures to disk with some default names
    :type savefig: bool, optional
        
    :param num_plots: Default number of eigenvectors and abundance plots to show
    :type num_plots: int
        
    :param kwargs: keyword arguments for svd filtering
    :type kwarrgs: dict, optional
        
    '''

    if isinstance(h5_main, h5py.Group):

        _U = find_dataset(h5_main, 'U')[-1]
        _V = find_dataset(h5_main, 'V')[-1]
        units = 'arbitrary (a.u.)'
        h5_spec_vals = np.arange(_V.shape[1])
        h5_svd_group = _U.parent

    else:

        h5_svd_group = find_results_groups(h5_main, 'SVD')[-1]
        units = h5_main.attrs['quantity']
        h5_spec_vals = h5_main.get_spec_values('Time')

    h5_U = h5_svd_group['U']
    h5_V = h5_svd_group['V']
    h5_S = h5_svd_group['S']

    _U = USIDataset(h5_U)
    [num_rows, num_cols] = _U.pos_dim_sizes

    abun_maps = np.reshape(h5_U[:, :16], (num_rows, num_cols, -1))
    eigen_vecs = h5_V[:16, :]

    skree_sum = np.zeros(h5_S.shape)
    for i in range(h5_S.shape[0]):
        skree_sum[i] = np.sum(h5_S[:i]) / np.sum(h5_S)

    plt.figure()
    plt.plot(skree_sum, 'bo')
    plt.title('Cumulative Variance')
    plt.xlabel('Total Components')
    plt.ylabel('Total variance ratio (a.u.)')

    if savefig:
        plt.savefig('Cumulative_variance_plot.png')

    fig_skree, axes = plot_utils.plot_scree(h5_S, title='Scree plot')
    fig_skree.tight_layout()

    if savefig:
        plt.savefig('Scree_plot.png')

    fig_abun, axes = plot_utils.plot_map_stack(abun_maps,
                                               num_comps=num_plots,
                                               title='SVD Abundance Maps',
                                               color_bar_mode='single',
                                               cmap='inferno',
                                               reverse_dims=True,
                                               fig_mult=(3.5, 3.5),
                                               facecolor='white',
                                               **kwargs)
    fig_abun.tight_layout()
    if savefig:
        plt.savefig('Abundance_maps.png')

    fig_eigvec, axes = plot_utils.plot_curves(h5_spec_vals * 1e3,
                                              eigen_vecs,
                                              use_rainbow_plots=False,
                                              x_label='Time (ms)',
                                              y_label=units,
                                              num_plots=num_plots,
                                              subtitle_prefix='Component',
                                              title='SVD Eigenvectors',
                                              evenly_spaced=False,
                                              **kwargs)
    fig_eigvec.tight_layout()
    if savefig:
        plt.savefig('Eigenvectors.png')

    return
Exemplo n.º 5
0
    def translate(self, h5_path, force_patch=False, **kwargs):
        """
        Add the needed references and attributes to the h5 file that are not created by the
        LabView data aquisition program.

        Parameters
        ----------
        h5_path : str
            path to the h5 file
        force_patch : bool, optional
            Should the check to see if the file has already been patched be ignored.
            Default False.

        Returns
        -------
        h5_file : h5py.File
            patched hdf5 file

        """
        # Open the file and check if a patch is needed
        h5_file = h5py.File(os.path.abspath(h5_path), 'r+')
        if h5_file.attrs.get('translator') is not None and not force_patch:
            print('File is already Pycroscopy ready.')
            return h5_file
        '''
        Get the list of all Raw_Data Datasets
        Loop over the list and update the needed attributes
        '''
        raw_list = find_dataset(h5_file, 'Raw_Data')
        for h5_raw in raw_list:
            if 'quantity' not in h5_raw.attrs:
                h5_raw.attrs['quantity'] = 'quantity'
            if 'units' not in h5_raw.attrs:
                h5_raw.attrs['units'] = 'a.u.'

            # Grab the channel and measurement group of the data to check some needed attributes
            h5_chan = h5_raw.parent
            try:
                c_type = get_attr(h5_chan, 'channel_type')

            except KeyError:
                warn_str = "'channel_type' was not found as an attribute of {}.\n".format(
                    h5_chan.name)
                warn_str += "If this is BEPS or BELine data from the LabView aquisition software, " + \
                            "please run the following piece of code.  Afterwards, run this function again.\n" + \
                            "CODE: " \
                            "hdf.file['{}'].attrs['channel_type'] = 'BE'".format(h5_chan.name)
                warn(warn_str)
                return h5_file

            except:
                raise

            if c_type != 'BE':
                continue

            h5_meas = h5_chan.parent
            h5_meas.attrs['num_UDVS_steps'] = h5_meas.attrs['num_steps']

            # Get the object handles for the Indices and Values datasets
            h5_pos_inds = h5_chan['Position_Indices']
            h5_pos_vals = h5_chan['Position_Values']
            h5_spec_inds = h5_chan['Spectroscopic_Indices']
            h5_spec_vals = h5_chan['Spectroscopic_Values']

            # Make sure we have correct spectroscopic indices for the given values
            ds_spec_inds = create_spec_inds_from_vals(h5_spec_vals[()])
            if not np.allclose(ds_spec_inds, h5_spec_inds[()]):
                h5_spec_inds[:, :] = ds_spec_inds[:, :]
                h5_file.flush()

            # Get the labels and units for the Spectroscopic datasets
            h5_spec_labels = h5_spec_inds.attrs['labels']
            inds_and_vals = [
                h5_pos_inds, h5_pos_vals, h5_spec_inds, h5_spec_vals
            ]
            for dset in inds_and_vals:
                spec_labels = dset.attrs['labels']
                try:
                    spec_units = dset.attrs['units']

                    if len(spec_units) != len(spec_labels):
                        raise KeyError

                except KeyError:
                    dset['units'] = ['' for _ in spec_labels]
                except:
                    raise

            for ilabel, label in enumerate(h5_spec_labels):
                label_slice = (slice(ilabel, ilabel + 1), slice(None))
                if label == '':
                    label = 'Step'
                h5_spec_inds.attrs[label] = h5_spec_inds.regionref[label_slice]
                h5_spec_vals.attrs[label] = h5_spec_vals.regionref[label_slice]

            # Link the references to the Indices and Values datasets to the Raw_Data
            link_as_main(h5_raw, h5_pos_inds, h5_pos_vals, h5_spec_inds,
                         h5_spec_vals)

            # Also link the Bin_Frequencies and Bin_Wfm_Type datasets
            h5_freqs = h5_chan['Bin_Frequencies']
            aux_dset_names = ['Bin_Frequencies']
            aux_dset_refs = [h5_freqs.ref]
            check_and_link_ancillary(h5_raw,
                                     aux_dset_names,
                                     anc_refs=aux_dset_refs)
            '''
            Get all SHO_Fit groups for the Raw_Data and loop over them
            Get the Guess and Spectroscopic Datasets for each SHO_Fit group
            '''
            sho_list = find_results_groups(h5_raw, 'SHO_Fit')
            for h5_sho in sho_list:
                h5_sho_guess = h5_sho['Guess']
                h5_sho_spec_inds = h5_sho['Spectroscopic_Indices']
                h5_sho_spec_vals = h5_sho['Spectroscopic_Values']

                # Make sure we have correct spectroscopic indices for the given values
                ds_sho_spec_inds = create_spec_inds_from_vals(
                    h5_sho_spec_inds[()])
                if not np.allclose(ds_sho_spec_inds, h5_sho_spec_inds[()]):
                    h5_sho_spec_inds[:, :] = ds_sho_spec_inds[:, :]

                # Get the labels and units for the Spectroscopic datasets
                h5_sho_spec_labels = get_attr(h5_sho_spec_inds, 'labels')
                link_as_main(h5_sho_guess, h5_pos_inds, h5_pos_vals,
                             h5_sho_spec_inds, h5_sho_spec_vals)
                sho_inds_and_vals = [h5_sho_spec_inds, h5_sho_spec_vals]

                for dset in sho_inds_and_vals:
                    spec_labels = get_attr(dset, 'labels')
                    try:
                        spec_units = get_attr(dset, 'units')

                        if len(spec_units) != len(spec_labels):
                            raise KeyError

                    except KeyError:
                        spec_units = [''.encode('utf-8') for _ in spec_labels]
                        dset.attrs['units'] = spec_units

                    except:
                        raise

                # Make region references in the
                for ilabel, label in enumerate(h5_sho_spec_labels):
                    label_slice = (slice(ilabel, ilabel + 1), slice(None))
                    if label == '':
                        label = 'Step'.encode('utf-8')
                    h5_sho_spec_inds.attrs[label] = h5_sho_spec_inds.regionref[
                        label_slice]
                    h5_sho_spec_vals.attrs[label] = h5_sho_spec_vals.regionref[
                        label_slice]

            h5_file.flush()

        h5_file.attrs['translator'] = 'V3patcher'.encode('utf-8')

        return h5_file
Exemplo n.º 6
0
    def translate(self, h5_path, force_patch=False, **kwargs):
        """
        Add the needed references and attributes to the h5 file that are not created by the
        LabView data aquisition program.

        Parameters
        ----------
        h5_path : str
            path to the h5 file
        force_patch : bool, optional
            Should the check to see if the file has already been patched be ignored.
            Default False.

        Returns
        -------
        h5_file : str
            path to the patched dataset

        """
        # Open the file and check if a patch is needed
        h5_file = h5py.File(os.path.abspath(h5_path), 'r+')
        if h5_file.attrs.get('translator') is not None and not force_patch:
            print('File is already Pycroscopy ready.')
            h5_file.close()
            return h5_path
        '''
        Get the list of all Raw_Data Datasets
        Loop over the list and update the needed attributes
        '''
        raw_list = find_dataset(h5_file, 'Raw_Data')
        for h5_raw in raw_list:
            if 'quantity' not in h5_raw.attrs:
                h5_raw.attrs['quantity'] = 'quantity'
            if 'units' not in h5_raw.attrs:
                h5_raw.attrs['units'] = 'a.u.'

            # Grab the channel and measurement group of the data to check some needed attributes
            h5_chan = h5_raw.parent
            try:
                c_type = get_attr(h5_chan, 'channel_type')

            except KeyError:
                warn_str = "'channel_type' was not found as an attribute of {}.\n".format(
                    h5_chan.name)
                warn_str += "If this is BEPS or BELine data from the LabView aquisition software, " + \
                            "please run the following piece of code.  Afterwards, run this function again.\n" + \
                            "CODE: " \
                            "hdf.file['{}'].attrs['channel_type'] = 'BE'".format(h5_chan.name)
                warn(warn_str)
                h5_file.close()
                return h5_path

            except:
                raise

            if c_type != 'BE':
                continue

            h5_meas = h5_chan.parent
            h5_meas.attrs['num_UDVS_steps'] = h5_meas.attrs['num_steps']

            # Get the object handles for the Indices and Values datasets
            h5_pos_inds = h5_chan['Position_Indices']
            h5_pos_vals = h5_chan['Position_Values']
            h5_spec_inds = h5_chan['Spectroscopic_Indices']
            h5_spec_vals = h5_chan['Spectroscopic_Values']

            # Make sure we have correct spectroscopic indices for the given values
            ds_spec_inds = create_spec_inds_from_vals(h5_spec_vals[()])
            if not np.allclose(ds_spec_inds, h5_spec_inds[()]):
                h5_spec_inds[:, :] = ds_spec_inds[:, :]
                h5_file.flush()

            # Get the labels and units for the Spectroscopic datasets
            h5_spec_labels = h5_spec_inds.attrs['labels']
            inds_and_vals = [
                h5_pos_inds, h5_pos_vals, h5_spec_inds, h5_spec_vals
            ]
            for dset in inds_and_vals:
                spec_labels = dset.attrs['labels']
                try:
                    spec_units = dset.attrs['units']

                    if len(spec_units) != len(spec_labels):
                        raise KeyError

                except KeyError:
                    dset['units'] = ['' for _ in spec_labels]
                except:
                    raise
            """"
            In early versions, too many spectroscopic dimension labels and 
            units were listed compared to the number of rows. Remove here:
            """
            remove_non_exist_spec_dim_labs(h5_spec_inds,
                                           h5_spec_vals,
                                           h5_meas,
                                           verbose=False)
            """
            Add back some standard metadata to be consistent with older
            BE data
            """
            missing_metadata = dict()
            if 'File_file_name' not in h5_meas.attrs.keys():
                missing_metadata['File_file_name'] = os.path.split(
                    h5_raw.file.filename)[-1].replace('.h5', '')
            if 'File_date_and_time' not in h5_meas.attrs.keys():
                try:
                    date_str = get_attr(h5_raw.file, 'date_string')
                    time_str = get_attr(h5_raw.file, 'time_string')
                    full_str = date_str.strip() + ' ' + time_str.strip()
                    """
                    convert:
                        date_string : 2018-12-05
                        time_string : 3:41:45 PM
                    to: 
                        File_date_and_time: 19-Jun-2009 18:44:56
                    """
                    try:
                        dt_obj = datetime.datetime.strptime(
                            full_str, "%Y-%m-%d %I:%M:%S %p")
                        missing_metadata[
                            'File_date_and_time'] = dt_obj.strftime(
                                '%d-%b-%Y %H:%M:%S')
                    except ValueError:
                        pass
                except KeyError:
                    pass
            # Now write to measurement group:
            if len(missing_metadata) > 0:
                write_simple_attrs(h5_meas, missing_metadata)

            # Link the references to the Indices and Values datasets to the Raw_Data
            link_as_main(h5_raw, h5_pos_inds, h5_pos_vals, h5_spec_inds,
                         h5_spec_vals)

            # Also link the Bin_Frequencies and Bin_Wfm_Type datasets
            h5_freqs = h5_chan['Bin_Frequencies']
            aux_dset_names = ['Bin_Frequencies']
            aux_dset_refs = [h5_freqs.ref]
            check_and_link_ancillary(h5_raw,
                                     aux_dset_names,
                                     anc_refs=aux_dset_refs)
            '''
            Get all SHO_Fit groups for the Raw_Data and loop over them
            Get the Guess and Spectroscopic Datasets for each SHO_Fit group
            '''
            sho_list = find_results_groups(h5_raw, 'SHO_Fit')
            for h5_sho in sho_list:
                h5_sho_guess = h5_sho['Guess']
                h5_sho_spec_inds = h5_sho['Spectroscopic_Indices']
                h5_sho_spec_vals = h5_sho['Spectroscopic_Values']

                # Make sure we have correct spectroscopic indices for the given values
                ds_sho_spec_inds = create_spec_inds_from_vals(
                    h5_sho_spec_inds[()])
                if not np.allclose(ds_sho_spec_inds, h5_sho_spec_inds[()]):
                    h5_sho_spec_inds[:, :] = ds_sho_spec_inds[:, :]

                # Get the labels and units for the Spectroscopic datasets
                h5_sho_spec_labels = get_attr(h5_sho_spec_inds, 'labels')
                link_as_main(h5_sho_guess, h5_pos_inds, h5_pos_vals,
                             h5_sho_spec_inds, h5_sho_spec_vals)
                sho_inds_and_vals = [h5_sho_spec_inds, h5_sho_spec_vals]

                for dset in sho_inds_and_vals:
                    spec_labels = get_attr(dset, 'labels')
                    try:
                        spec_units = get_attr(dset, 'units')

                        if len(spec_units) != len(spec_labels):
                            raise KeyError

                    except KeyError:
                        spec_units = [''.encode('utf-8') for _ in spec_labels]
                        dset.attrs['units'] = spec_units

                    except:
                        raise

            h5_file.flush()

        h5_file.attrs['translator'] = 'V3patcher'.encode('utf-8')

        h5_file.close()

        return h5_path
Exemplo n.º 7
0
    def translate(self, h5_path, force_patch=False, **kwargs):
        """
        Add the needed references and attributes to the h5 file that are not created by the
        LabView data aquisition program.

        Parameters
        ----------
        h5_path : str
            path to the h5 file
        force_patch : bool, optional
            Should the check to see if the file has already been patched be ignored.
            Default False.

        Returns
        -------
        h5_file : h5py.File
            patched hdf5 file

        """
        # Open the file and check if a patch is needed
        h5_file = h5py.File(os.path.abspath(h5_path), 'r+')
        if h5_file.attrs.get('translator') is not None and not force_patch:
            print('File is already Pycroscopy ready.')
            return h5_file

        '''
        Get the list of all Raw_Data Datasets
        Loop over the list and update the needed attributes
        '''
        raw_list = find_dataset(h5_file, 'Raw_Data')
        for h5_raw in raw_list:
            if 'quantity' not in h5_raw.attrs:
                h5_raw.attrs['quantity'] = 'quantity'
            if 'units' not in h5_raw.attrs:
                h5_raw.attrs['units'] = 'a.u.'

            # Grab the channel and measurement group of the data to check some needed attributes
            h5_chan = h5_raw.parent
            try:
                c_type = get_attr(h5_chan, 'channel_type')

            except KeyError:
                warn_str = "'channel_type' was not found as an attribute of {}.\n".format(h5_chan.name)
                warn_str += "If this is BEPS or BELine data from the LabView aquisition software, " + \
                            "please run the following piece of code.  Afterwards, run this function again.\n" + \
                            "CODE: " \
                            "hdf.file['{}'].attrs['channel_type'] = 'BE'".format(h5_chan.name)
                warn(warn_str)
                return h5_file

            except:
                raise

            if c_type != 'BE':
                continue

            h5_meas = h5_chan.parent
            h5_meas.attrs['num_UDVS_steps'] = h5_meas.attrs['num_steps']

            # Get the object handles for the Indices and Values datasets
            h5_pos_inds = h5_chan['Position_Indices']
            h5_pos_vals = h5_chan['Position_Values']
            h5_spec_inds = h5_chan['Spectroscopic_Indices']
            h5_spec_vals = h5_chan['Spectroscopic_Values']

            # Make sure we have correct spectroscopic indices for the given values
            ds_spec_inds = create_spec_inds_from_vals(h5_spec_vals[()])
            if not np.allclose(ds_spec_inds, h5_spec_inds[()]):
                h5_spec_inds[:, :] = ds_spec_inds[:, :]
                h5_file.flush()

            # Get the labels and units for the Spectroscopic datasets
            h5_spec_labels = h5_spec_inds.attrs['labels']
            inds_and_vals = [h5_pos_inds, h5_pos_vals, h5_spec_inds, h5_spec_vals]
            for dset in inds_and_vals:
                spec_labels = dset.attrs['labels']
                try:
                    spec_units = dset.attrs['units']

                    if len(spec_units) != len(spec_labels):
                        raise KeyError

                except KeyError:
                    dset['units'] = ['' for _ in spec_labels]
                except:
                    raise

            for ilabel, label in enumerate(h5_spec_labels):
                label_slice = (slice(ilabel, ilabel + 1), slice(None))
                if label == '':
                    label = 'Step'
                h5_spec_inds.attrs[label] = h5_spec_inds.regionref[label_slice]
                h5_spec_vals.attrs[label] = h5_spec_vals.regionref[label_slice]

            # Link the references to the Indices and Values datasets to the Raw_Data
            link_as_main(h5_raw, h5_pos_inds, h5_pos_vals, h5_spec_inds, h5_spec_vals)

            # Also link the Bin_Frequencies and Bin_Wfm_Type datasets
            h5_freqs = h5_chan['Bin_Frequencies']
            aux_dset_names = ['Bin_Frequencies']
            aux_dset_refs = [h5_freqs.ref]
            check_and_link_ancillary(h5_raw, aux_dset_names, anc_refs=aux_dset_refs)

            '''
            Get all SHO_Fit groups for the Raw_Data and loop over them
            Get the Guess and Spectroscopic Datasets for each SHO_Fit group
            '''
            sho_list = find_results_groups(h5_raw, 'SHO_Fit')
            for h5_sho in sho_list:
                h5_sho_guess = h5_sho['Guess']
                h5_sho_spec_inds = h5_sho['Spectroscopic_Indices']
                h5_sho_spec_vals = h5_sho['Spectroscopic_Values']

                # Make sure we have correct spectroscopic indices for the given values
                ds_sho_spec_inds = create_spec_inds_from_vals(h5_sho_spec_inds[()])
                if not np.allclose(ds_sho_spec_inds, h5_sho_spec_inds[()]):
                    h5_sho_spec_inds[:, :] = ds_sho_spec_inds[:, :]

                # Get the labels and units for the Spectroscopic datasets
                h5_sho_spec_labels = get_attr(h5_sho_spec_inds, 'labels')
                link_as_main(h5_sho_guess, h5_pos_inds, h5_pos_vals, h5_sho_spec_inds, h5_sho_spec_vals)
                sho_inds_and_vals = [h5_sho_spec_inds, h5_sho_spec_vals]

                for dset in sho_inds_and_vals:
                    spec_labels = get_attr(dset, 'labels')
                    try:
                        spec_units = get_attr(dset, 'units')

                        if len(spec_units) != len(spec_labels):
                            raise KeyError

                    except KeyError:
                        spec_units = [''.encode('utf-8') for _ in spec_labels]
                        dset.attrs['units'] = spec_units

                    except:
                        raise

                # Make region references in the
                for ilabel, label in enumerate(h5_sho_spec_labels):
                    label_slice = (slice(ilabel, ilabel + 1), slice(None))
                    if label == '':
                        label = 'Step'.encode('utf-8')
                    h5_sho_spec_inds.attrs[label] = h5_sho_spec_inds.regionref[label_slice]
                    h5_sho_spec_vals.attrs[label] = h5_sho_spec_vals.regionref[label_slice]

            h5_file.flush()

        h5_file.attrs['translator'] = 'V3patcher'.encode('utf-8')

        return h5_file
Exemplo n.º 8
0
def save_CSV_from_file(h5_file,
                       h5_path='/',
                       append='',
                       mirror=False,
                       offset=0):
    """
    Saves the tfp, shift, and fixed_tfp as CSV files
    
    :param h5_file: Reminder you can always type: h5_svd.file or h5_avg.file for this
    :type h5_file: H5Py file of FFtrEFM class
        
    :param h5_path: specific folder path to search for the tfp data. Usually not needed.
    :type h5_path: str, optional
        
    :param append: text to append to file name
    :type append: str, optional
    
    :param mirror:
    :type mirror: bool, optional
    
    :param offset: if calculating tFP with a fixed offset for fitting, this subtracts it out
    :type offset: float
        
    """

    h5_ff = h5_file

    if isinstance(h5_file, ffta.hdf_utils.process.FFtrEFM):
        print('Saving from FFtrEFM Class')
        h5_ff = h5_file.h5_main.file
        h5_path = h5_file.h5_results_grp.name

    elif not isinstance(h5_file, h5py.File):
        print('Saving from pyUSID object')
        h5_ff = h5_file.file

    tfp = find_dataset(h5_ff[h5_path], 'tfp')[0][()]
    shift = find_dataset(h5_ff[h5_path], 'shift')[0][()]

    try:
        tfp_cal = find_dataset(h5_ff[h5_path], 'tfp_cal')[0][()]
    except:
        tfp_cal = None

    tfp_fixed, _ = badpixels.fix_array(tfp, threshold=2)
    tfp_fixed = np.array(tfp_fixed)

    if isinstance(tfp_cal, np.ndarray):

        tfp_cal_fixed, _ = badpixels.fix_array(tfp_cal, threshold=2)
        tfp_cal_fixed = np.array(tfp_cal_fixed)

    print(find_dataset(h5_ff[h5_path], 'shift')[0].parent.name)

    path = h5_ff.file.filename.replace('\\', '/')
    path = '/'.join(path.split('/')[:-1]) + '/'
    os.chdir(path)

    if mirror:
        np.savetxt('tfp-' + append + '.csv',
                   np.fliplr(tfp - offset).T,
                   delimiter=',')
        np.savetxt('shift-' + append + '.csv',
                   np.fliplr(shift).T,
                   delimiter=',')
        np.savetxt('tfp_fixed-' + append + '.csv',
                   np.fliplr(tfp_fixed - offset).T,
                   delimiter=',')
    else:
        np.savetxt('tfp-' + append + '.csv', (tfp - offset).T, delimiter=',')
        np.savetxt('shift-' + append + '.csv', shift.T, delimiter=',')
        np.savetxt('tfp_fixed-' + append + '.csv', (tfp_fixed - offset).T,
                   delimiter=',')

    if isinstance(tfp_cal, np.ndarray):

        if mirror:
            np.savetxt('tfp_cal-' + append + '.csv',
                       np.fliplr(tfp_cal - offset).T,
                       delimiter=',')
            np.savetxt('tfp_cal_fixed-' + append + '.csv',
                       np.fliplr(tfp_cal_fixed - offset).T,
                       delimiter=',')
        else:
            np.savetxt('tfp_cal-' + append + '.csv', (tfp_cal - offset).T,
                       delimiter=',')
            np.savetxt('tfp_cal_fixed-' + append + '.csv',
                       (tfp_cal_fixed - offset).T,
                       delimiter=',')

    return