Ejemplo n.º 1
0
def baseplane_rmsd(error=0.0, spectrum_id=None, spin_id=None):
    """Set the peak intensity errors, as defined as the baseplane RMSD.

    @param error:           The peak intensity error value defined as the RMSD of the base plane
                            noise.
    @type error:            float
    @keyword spectrum_id:   The spectrum id.
    @type spectrum_id:      str
    @param spin_id:         The spin identification string.
    @type spin_id:          str
    """

    # Data checks.
    check_pipe()
    check_mol_res_spin_data()
    check_spectrum_id(spectrum_id)

    # The scaling by NC_proc.
    if hasattr(cdp, 'ncproc') and spectrum_id in cdp.ncproc:
        scale = 1.0 / 2**cdp.ncproc[spectrum_id]
    else:
        scale = 1.0

    # Loop over the spins.
    for spin in spin_loop(spin_id):
        # Skip deselected spins.
        if not spin.select:
            continue

        # Initialise or update the baseplane_rmsd data structure as necessary.
        if not hasattr(spin, 'baseplane_rmsd'):
            spin.baseplane_rmsd = {}

        # Set the error.
        spin.baseplane_rmsd[spectrum_id] = float(error) * scale
Ejemplo n.º 2
0
def baseplane_rmsd(error=0.0, spectrum_id=None, spin_id=None):
    """Set the peak intensity errors, as defined as the baseplane RMSD.

    @param error:           The peak intensity error value defined as the RMSD of the base plane
                            noise.
    @type error:            float
    @keyword spectrum_id:   The spectrum id.
    @type spectrum_id:      str
    @param spin_id:         The spin identification string.
    @type spin_id:          str
    """

    # Data checks.
    check_pipe()
    check_mol_res_spin_data()
    check_spectrum_id(spectrum_id)

    # The scaling by NC_proc.
    if hasattr(cdp, 'ncproc') and spectrum_id in cdp.ncproc:
        scale = 1.0 / 2**cdp.ncproc[spectrum_id]
    else:
        scale = 1.0

    # Loop over the spins.
    for spin in spin_loop(spin_id):
        # Skip deselected spins.
        if not spin.select:
            continue

        # Initialise or update the baseplane_rmsd data structure as necessary.
        if not hasattr(spin, 'baseplane_rmsd'):
            spin.baseplane_rmsd = {}

        # Set the error.
        spin.baseplane_rmsd[spectrum_id] = float(error) * scale
Ejemplo n.º 3
0
def read_spins(file=None, dir=None, dim=1, spin_id_col=None, mol_name_col=None, res_num_col=None, res_name_col=None, spin_num_col=None, spin_name_col=None, sep=None, spin_id=None, verbose=True):
    """Read the peak intensity data.

    @keyword file:          The name of the file containing the peak intensities.
    @type file:             str
    @keyword dir:           The directory where the file is located.
    @type dir:              str
    @keyword dim:           The dimension of the peak list to associate the data with.
    @type dim:              int
    @keyword spin_id_col:   The column containing the spin ID strings (used by the generic intensity file format).  If supplied, the mol_name_col, res_name_col, res_num_col, spin_name_col, and spin_num_col arguments must be none.
    @type spin_id_col:      int or None
    @keyword mol_name_col:  The column containing the molecule name information (used by the generic intensity file format).  If supplied, spin_id_col must be None.
    @type mol_name_col:     int or None
    @keyword res_name_col:  The column containing the residue name information (used by the generic intensity file format).  If supplied, spin_id_col must be None.
    @type res_name_col:     int or None
    @keyword res_num_col:   The column containing the residue number information (used by the generic intensity file format).  If supplied, spin_id_col must be None.
    @type res_num_col:      int or None
    @keyword spin_name_col: The column containing the spin name information (used by the generic intensity file format).  If supplied, spin_id_col must be None.
    @type spin_name_col:    int or None
    @keyword spin_num_col:  The column containing the spin number information (used by the generic intensity file format).  If supplied, spin_id_col must be None.
    @type spin_num_col:     int or None
    @keyword sep:           The column separator which, if None, defaults to whitespace.
    @type sep:              str or None
    @keyword spin_id:       The spin ID string used to restrict data loading to a subset of all spins.  If 'auto' is provided for a NMRPipe seriesTab formatted file, the ID's are auto generated in form of Z_Ai.
    @type spin_id:          None or str
    @keyword verbose:       A flag which if True will cause all relaxation data loaded to be printed out.
    @type verbose:          bool
    """

    # Data checks.
    check_pipe()

    # Check the file name.
    if file == None:
        raise RelaxError("The file name must be supplied.")

    # Read the peak list data.
    peak_list = read_peak_list(file=file, dir=dir, spin_id_col=spin_id_col, mol_name_col=mol_name_col, res_num_col=res_num_col, res_name_col=res_name_col, spin_num_col=spin_num_col, spin_name_col=spin_name_col, sep=sep, spin_id=spin_id)

    # Loop over the peak_list.
    created_spins = []
    for assign in peak_list:
        mol_name = assign.mol_names[dim-1]
        res_num = assign.res_nums[dim-1]
        res_name = assign.res_names[dim-1]
        spin_num = assign.spin_nums[dim-1]
        spin_name = assign.spin_names[dim-1]

        # Generate the spin_id.
        spin_id = generate_spin_id_unique(mol_name=mol_name, res_num=res_num, res_name=res_name, spin_name=spin_name)

        # Check if the spin already exist.
        if return_spin(spin_id=spin_id) == None:
            # Create the spin if not exist.
            create_spin(spin_num=spin_num, spin_name=spin_name, res_num=res_num, res_name=res_name, mol_name=mol_name)

    # Test that data exists.
    check_mol_res_spin_data()
Ejemplo n.º 4
0
def display(sep=None, mol_name_flag=True, res_num_flag=True, res_name_flag=True, spin_num_flag=True, spin_name_flag=True):
    """Display the current spin selections.

    @keyword sep:               The column seperator which, if None, defaults to whitespace.
    @type sep:                  str or None
    @keyword mol_name_flag:     A flag which if True will cause the molecule name column to be written.
    @type mol_name_flag:        bool
    @keyword res_num_flag:      A flag which if True will cause the residue number column to be written.
    @type res_num_flag:         bool
    @keyword res_name_flag:     A flag which if True will cause the residue name column to be written.
    @type res_name_flag:        bool
    @keyword spin_name_flag:    A flag which if True will cause the spin name column to be written.
    @type spin_name_flag:       bool
    @keyword spin_num_flag:     A flag which if True will cause the spin number column to be written.
    @type spin_num_flag:        bool
    """

    # Test if the sequence data is loaded.
    check_mol_res_spin_data()

    # Init the data.
    mol_names = []
    res_nums = []
    res_names = []
    spin_nums = []
    spin_names = []
    selections = []

    # Spin loop.
    for spin, mol_name, res_num, res_name in spin_loop(full_info=True, skip_desel=False):
        mol_names.append(mol_name)
        res_nums.append(res_num)
        res_names.append(res_name)
        spin_nums.append(spin.num)
        spin_names.append(spin.name)
        selections.append(spin.select)

    # Remove unwanted data.
    if not mol_name_flag:
        mol_names = None
    if not res_num_flag:
        res_nums = None
    if not res_name_flag:
        res_names = None
    if not spin_num_flag:
        spin_nums = None
    if not spin_name_flag:
        spin_names = None

    # Write the data.
    write_spin_data(file=sys.stdout, sep=sep, mol_names=mol_names, res_nums=res_nums, res_names=res_names, spin_nums=spin_nums, spin_names=spin_names, data=selections, data_name="selection")
Ejemplo n.º 5
0
def display(sep=None, mol_name_flag=True, res_num_flag=True, res_name_flag=True, spin_num_flag=True, spin_name_flag=True):
    """Display the current spin selections.

    @keyword sep:               The column seperator which, if None, defaults to whitespace.
    @type sep:                  str or None
    @keyword mol_name_flag:     A flag which if True will cause the molecule name column to be written.
    @type mol_name_flag:        bool
    @keyword res_num_flag:      A flag which if True will cause the residue number column to be written.
    @type res_num_flag:         bool
    @keyword res_name_flag:     A flag which if True will cause the residue name column to be written.
    @type res_name_flag:        bool
    @keyword spin_name_flag:    A flag which if True will cause the spin name column to be written.
    @type spin_name_flag:       bool
    @keyword spin_num_flag:     A flag which if True will cause the spin number column to be written.
    @type spin_num_flag:        bool
    """

    # Test if the sequence data is loaded.
    check_mol_res_spin_data()

    # Init the data.
    mol_names = []
    res_nums = []
    res_names = []
    spin_nums = []
    spin_names = []
    selections = []

    # Spin loop.
    for spin, mol_name, res_num, res_name in spin_loop(full_info=True, skip_desel=False):
        mol_names.append(mol_name)
        res_nums.append(res_num)
        res_names.append(res_name)
        spin_nums.append(spin.num)
        spin_names.append(spin.name)
        selections.append(spin.select)

    # Remove unwanted data.
    if not mol_name_flag:
        mol_names = None
    if not res_num_flag:
        res_nums = None
    if not res_name_flag:
        res_names = None
    if not spin_num_flag:
        spin_nums = None
    if not spin_name_flag:
        spin_names = None

    # Write the data.
    write_spin_data(file=sys.stdout, sep=sep, mol_names=mol_names, res_nums=res_nums, res_names=res_names, spin_nums=spin_nums, spin_names=spin_names, data=selections, data_name="selection")
Ejemplo n.º 6
0
def delete(spectrum_id=None):
    """Delete spectral data corresponding to the spectrum ID.

    @keyword spectrum_id:   The spectrum ID string.
    @type spectrum_id:      str
    """

    # Data checks.
    check_pipe()
    check_mol_res_spin_data()
    check_spectrum_id(spectrum_id)

    # Remove the ID.
    cdp.spectrum_ids.pop(cdp.spectrum_ids.index(spectrum_id))

    # The ncproc parameter.
    if hasattr(cdp, 'ncproc') and spectrum_id in cdp.ncproc:
        del cdp.ncproc[spectrum_id]

    # Replicates.
    if hasattr(cdp, 'replicates'):
        # Loop over the replicates.
        for i in range(len(cdp.replicates)):
            # The spectrum is replicated.
            if spectrum_id in cdp.replicates[i]:
                # Duplicate.
                if len(cdp.replicates[i]) == 2:
                    cdp.replicates.pop(i)

                # More than two spectra:
                else:
                    cdp.replicates[i].pop(cdp.replicates[i].index(spectrum_id))

                # No need to check further.
                break

    # Errors.
    if hasattr(cdp, 'sigma_I') and spectrum_id in cdp.sigma_I:
        del cdp.sigma_I[spectrum_id]
    if hasattr(cdp, 'var_I') and spectrum_id in cdp.var_I:
        del cdp.var_I[spectrum_id]

    # Loop over the spins.
    for spin in spin_loop():
        # Intensity data.
        if hasattr(spin,
                   'peak_intensity') and spectrum_id in spin.peak_intensity:
            del spin.peak_intensity[spectrum_id]
Ejemplo n.º 7
0
def delete(spectrum_id=None):
    """Delete spectral data corresponding to the spectrum ID.

    @keyword spectrum_id:   The spectrum ID string.
    @type spectrum_id:      str
    """

    # Data checks.
    check_pipe()
    check_mol_res_spin_data()
    check_spectrum_id(spectrum_id)

    # Remove the ID.
    cdp.spectrum_ids.pop(cdp.spectrum_ids.index(spectrum_id))

    # The ncproc parameter.
    if hasattr(cdp, 'ncproc') and spectrum_id in cdp.ncproc:
        del cdp.ncproc[spectrum_id]

    # Replicates.
    if hasattr(cdp, 'replicates'):
        # Loop over the replicates.
        for i in range(len(cdp.replicates)):
            # The spectrum is replicated.
            if spectrum_id in cdp.replicates[i]:
                # Duplicate.
                if len(cdp.replicates[i]) == 2:
                    cdp.replicates.pop(i)

                # More than two spectra:
                else:
                    cdp.replicates[i].pop(cdp.replicates[i].index(spectrum_id))

                # No need to check further.
                break

    # Errors.
    if hasattr(cdp, 'sigma_I') and spectrum_id in cdp.sigma_I:
        del cdp.sigma_I[spectrum_id]
    if hasattr(cdp, 'var_I') and spectrum_id in cdp.var_I:
        del cdp.var_I[spectrum_id]

    # Loop over the spins.
    for spin in spin_loop():
        # Intensity data.
        if hasattr(spin, 'peak_intensity') and spectrum_id in spin.peak_intensity:
            del spin.peak_intensity[spectrum_id]
Ejemplo n.º 8
0
    def overfit_deselect(self, data_check=True, verbose=True):
        """Deselect spins which have insufficient data to support minimisation.

        @keyword data_check:    A flag to signal if the presence of base data is to be checked for.
        @type data_check:       bool
        @keyword verbose:       A flag which if True will allow printouts.
        @type verbose:          bool
        """

        # Print out.
        if verbose:
            print("\nOver-fit spin deselection:")

        # Checks.
        check_mol_res_spin_data()
        check_model_setup()

        # Loop over spin data.
        deselect_flag = False
        for spin, spin_id in spin_loop(return_id=True):
            # Skip deselected spins.
            if not spin.select:
                continue

            # Check if data exists.
            if not hasattr(spin, 'peak_intensity'):
                warn(RelaxDeselectWarning(spin_id, 'missing intensity data'))
                spin.select = False
                deselect_flag = True
                continue

            # Require 3 or more data points.
            elif len(spin.peak_intensity) < 3:
                warn(RelaxDeselectWarning(spin_id, 'insufficient data, 3 or more data points are required'))
                spin.select = False
                deselect_flag = True
                continue

            # Check that the number of relaxation times is complete.
            if len(spin.peak_intensity) != len(cdp.relax_times):
                raise RelaxError("The %s peak intensity points of the spin '%s' does not match the expected number of %s (the IDs %s do not match %s)." % (len(spin.peak_intensity), spin_id, len(cdp.relax_times), sorted(spin.peak_intensity.keys()), sorted(cdp.relax_times.keys())))

        # Final printout.
        if verbose and not deselect_flag:
            print("No spins have been deselected.")
Ejemplo n.º 9
0
    def overfit_deselect(self, data_check=True, verbose=True):
        """Deselect spins which have insufficient data to support minimisation.

        @keyword data_check:    A flag to signal if the presence of base data is to be checked for.
        @type data_check:       bool
        @keyword verbose:       A flag which if True will allow printouts.
        @type verbose:          bool
        """

        # Print out.
        if verbose:
            print("\nOver-fit spin deselection:")

        # Checks.
        check_mol_res_spin_data()
        check_model_setup()

        # Loop over spin data.
        deselect_flag = False
        for spin, spin_id in spin_loop(return_id=True):
            # Skip deselected spins.
            if not spin.select:
                continue

            # Check if data exists.
            if not hasattr(spin, 'peak_intensity'):
                warn(RelaxDeselectWarning(spin_id, 'missing intensity data'))
                spin.select = False
                deselect_flag = True
                continue

            # Require 3 or more data points.
            elif len(spin.peak_intensity) < 3:
                warn(RelaxDeselectWarning(spin_id, 'insufficient data, 3 or more data points are required'))
                spin.select = False
                deselect_flag = True
                continue

            # Check that the number of relaxation times is complete.
            if len(spin.peak_intensity) != len(cdp.relax_times):
                raise RelaxError("The %s peak intensity points of the spin '%s' does not match the expected number of %s (the IDs %s do not match %s)." % (len(spin.peak_intensity), spin_id, len(cdp.relax_times), sorted(spin.peak_intensity.keys()), sorted(cdp.relax_times.keys())))

        # Final printout.
        if verbose and not deselect_flag:
            print("No spins have been deselected.")
Ejemplo n.º 10
0
def catia_input(file='Fit.catia', dir=None, output_dir='output', force=False):
    """Create the CATIA input files.

    @keyword file:          The main CATIA execution file.
    @type file:             str
    @keyword dir:           The optional directory to place the files into.  If None, then the files will be placed into the current directory.
    @type dir:              str or None
    @keyword output_dir:    The CATIA output directory, located within the directory specified by the dir argument.  This directory will be created.
    @type output_dir:       str
    @keyword force:         A flag which if True will cause all pre-existing files to be overwritten.
    @type force:            Bool
    """

    # Data checks.
    check_pipe()
    check_mol_res_spin_data()
    check_spectra_id_setup()
    check_model_type()

    # Check that this is CPMG data.
    for id in cdp.spectrum_ids:
        if cdp.exp_type[id] != 'SQ CPMG':
            raise RelaxError("Only CPMG type data is supported.")

    # Directory creation.
    if dir != None:
        mkdir_nofail(dir, verbosity=0)

    # Create the R2eff files.
    write_r2eff_files(input_dir='input_r2eff', base_dir=dir, force=force)

    # Create the parameter files.
    write_param_files(global_file="ParamGlobal.inp",
                      set_file="ParamSet1.inp",
                      dir=dir,
                      force=force)

    # Create the main execution file.
    write_main_file(file=file, dir=dir, output_dir=output_dir, force=force)

    # Create the output directory as needed by CATIA (it does not create it itself).
    mkdir_nofail(dir + sep + output_dir, verbosity=0)
Ejemplo n.º 11
0
def catia_input(file='Fit.catia', dir=None, output_dir='output', force=False):
    """Create the CATIA input files.

    @keyword file:          The main CATIA execution file.
    @type file:             str
    @keyword dir:           The optional directory to place the files into.  If None, then the files will be placed into the current directory.
    @type dir:              str or None
    @keyword output_dir:    The CATIA output directory, located within the directory specified by the dir argument.  This directory will be created.
    @type output_dir:       str
    @keyword force:         A flag which if True will cause all pre-existing files to be overwritten.
    @type force:            Bool
    """

    # Data checks.
    pipes.test()
    check_mol_res_spin_data()
    check_spectra_id_setup()
    check_model_type()

    # Check that this is CPMG data.
    for id in cdp.spectrum_ids:
        if cdp.exp_type[id] != 'SQ CPMG':
            raise RelaxError("Only CPMG type data is supported.")

    # Directory creation.
    if dir != None:
        mkdir_nofail(dir, verbosity=0)

    # Create the R2eff files.
    write_r2eff_files(input_dir='input_r2eff', base_dir=dir, force=force)

    # Create the parameter files.
    write_param_files(global_file="ParamGlobal.inp", set_file="ParamSet1.inp", dir=dir, force=force)

    # Create the main execution file.
    write_main_file(file=file, dir=dir, output_dir=output_dir, force=force)

    # Create the output directory as needed by CATIA (it does not create it itself).
    mkdir_nofail(dir + sep + output_dir, verbosity=0)
Ejemplo n.º 12
0
def select_model(model=MODEL_R2EFF):
    """Set up the model for the relaxation dispersion analysis.

    @keyword model: The relaxation dispersion analysis type.
    @type model:    str
    """

    # Data checks.
    check_pipe()
    check_pipe_type()
    check_mol_res_spin_data()
    check_exp_type()

    # The curve type.
    curve_type = get_curve_type()
    if model == MODEL_R2EFF and curve_type == 'exponential':
        check_c_modules()

    # Invalid model.
    if model not in MODEL_DESC:
        raise RelaxError("The model '%s' must be one of %s." %
                         (model, MODEL_LIST_FULL))

    # R2eff/R1rho model.
    if model == MODEL_R2EFF:
        if curve_type == 'exponential':
            params = ['r2eff', 'i0']
        else:
            params = ['r2eff']

    # All other models.
    else:
        params = deepcopy(MODEL_PARAMS[model])

    # Printout.
    print(MODEL_DESC[model])

    # Set up the model.
    model_setup(model, params)
Ejemplo n.º 13
0
def error_analysis(subset=None):
    """Determine the peak intensity standard deviation.

    @keyword subset:    The list of spectrum ID strings to restrict the analysis to.
    @type subset:       list of str
    """

    # Tests.
    check_pipe()
    check_mol_res_spin_data()

    # Test if spectra have been loaded.
    if not hasattr(cdp, 'spectrum_ids'):
        raise RelaxError(
            "Error analysis is not possible, no spectra have been loaded.")

    # Check the IDs.
    if subset:
        for id in subset:
            if id not in cdp.spectrum_ids:
                raise RelaxError(
                    "The spectrum ID '%s' has not been loaded into relax." %
                    id)

    # Peak height category.
    if cdp.int_method == 'height':
        # Print out.
        print("Intensity measure:  Peak heights.")

        # Do we have replicated spectra?
        if hasattr(cdp, 'replicates'):
            # Print out.
            print("Replicated spectra:  Yes.")

            # Set the errors.
            __errors_repl(subset=subset)

        # No replicated spectra.
        else:
            # Print out.
            print("Replicated spectra:  No.")
            if subset:
                print("Spectra ID subset ignored.")

            # Set the errors.
            __errors_height_no_repl()

    # Peak volume category.
    if cdp.int_method == 'point sum':
        # Print out.
        print("Intensity measure:  Peak volumes.")

        # Do we have replicated spectra?
        if hasattr(cdp, 'replicates'):
            # Print out.
            print("Replicated spectra:  Yes.")

            # Set the errors.
            __errors_repl(subset=subset)

        # No replicated spectra.
        else:
            # Print out.
            print("Replicated spectra:  No.")

            # No implemented.
            raise RelaxImplementError

            # Set the errors.
            __errors_vol_no_repl()
Ejemplo n.º 14
0
def read(file=None,
         dir=None,
         spectrum_id=None,
         dim=1,
         int_col=None,
         int_method=None,
         spin_id_col=None,
         mol_name_col=None,
         res_num_col=None,
         res_name_col=None,
         spin_num_col=None,
         spin_name_col=None,
         sep=None,
         spin_id=None,
         ncproc=None,
         verbose=True):
    """Read the peak intensity data.

    @keyword file:          The name of the file(s) containing the peak intensities.
    @type file:             str or list of str
    @keyword dir:           The directory where the file is located.
    @type dir:              str
    @keyword spectrum_id:   The spectrum identification string.
    @type spectrum_id:      str or list of str
    @keyword dim:           The dimension of the peak list to associate the data with.
    @type dim:              int
    @keyword int_col:       The column containing the peak intensity data (used by the generic intensity file format).
    @type int_col:          int or list of int
    @keyword int_method:    The integration method, one of 'height', 'point sum' or 'other'.
    @type int_method:       str
    @keyword spin_id_col:   The column containing the spin ID strings (used by the generic intensity file format).  If supplied, the mol_name_col, res_name_col, res_num_col, spin_name_col, and spin_num_col arguments must be none.
    @type spin_id_col:      int or None
    @keyword mol_name_col:  The column containing the molecule name information (used by the generic intensity file format).  If supplied, spin_id_col must be None.
    @type mol_name_col:     int or None
    @keyword res_name_col:  The column containing the residue name information (used by the generic intensity file format).  If supplied, spin_id_col must be None.
    @type res_name_col:     int or None
    @keyword res_num_col:   The column containing the residue number information (used by the generic intensity file format).  If supplied, spin_id_col must be None.
    @type res_num_col:      int or None
    @keyword spin_name_col: The column containing the spin name information (used by the generic intensity file format).  If supplied, spin_id_col must be None.
    @type spin_name_col:    int or None
    @keyword spin_num_col:  The column containing the spin number information (used by the generic intensity file format).  If supplied, spin_id_col must be None.
    @type spin_num_col:     int or None
    @keyword sep:           The column separator which, if None, defaults to whitespace.
    @type sep:              str or None
    @keyword spin_id:       The spin ID string used to restrict data loading to a subset of all spins.  If 'auto' is provided for a NMRPipe seriesTab formatted file, the ID's are auto generated in form of Z_Ai.
    @type spin_id:          None or str
    @keyword ncproc:        The Bruker ncproc binary intensity scaling factor.
    @type ncproc:           int or None
    @keyword verbose:       A flag which if True will cause all relaxation data loaded to be printed out.
    @type verbose:          bool
    """

    # Data checks.
    check_pipe()
    check_mol_res_spin_data()

    # Check the file name.
    if file == None:
        raise RelaxError("The file name must be supplied.")

    # Test that the intensity measures are identical.
    if hasattr(cdp, 'int_method') and cdp.int_method != int_method:
        raise RelaxError(
            "The '%s' measure of peak intensities does not match '%s' of the previously loaded spectra."
            % (int_method, cdp.int_method))

    # Multiple ID flags.
    flag_multi = False
    flag_multi_file = False
    flag_multi_col = False
    if isinstance(spectrum_id, list) or spectrum_id == 'auto':
        flag_multi = True
    if isinstance(file, list):
        flag_multi_file = True
    if isinstance(int_col, list) or spectrum_id == 'auto':
        flag_multi_col = True

    # List argument checks.
    if flag_multi:
        # Too many lists.
        if flag_multi_file and flag_multi_col:
            raise RelaxError(
                "If a list of spectrum IDs is supplied, the file names and intensity column arguments cannot both be lists."
            )

        # Not enough lists.
        if not flag_multi_file and not flag_multi_col:
            raise RelaxError(
                "If a list of spectrum IDs is supplied, either the file name or intensity column arguments must be a list of equal length."
            )

        # List lengths for multiple files.
        if flag_multi_file and len(spectrum_id) != len(file):
            raise RelaxError(
                "The file list %s and spectrum ID list %s do not have the same number of elements."
                % (file, spectrum_id))

        # List lengths for multiple intensity columns.
        if flag_multi_col and spectrum_id != 'auto' and len(
                spectrum_id) != len(int_col):
            raise RelaxError(
                "The spectrum ID list %s and intensity column list %s do not have the same number of elements."
                % (spectrum_id, int_col))

    # More list argument checks (when only one spectrum ID is supplied).
    else:
        # Multiple files.
        if flag_multi_file:
            raise RelaxError(
                "If multiple files are supplied, then multiple spectrum IDs must also be supplied."
            )

        # Multiple intensity columns.
        if flag_multi_col:
            raise RelaxError(
                "If multiple intensity columns are supplied, then multiple spectrum IDs must also be supplied."
            )

    # Intensity column checks.
    if spectrum_id != 'auto' and not flag_multi and flag_multi_col:
        raise RelaxError(
            "If a list of intensity columns is supplied, the spectrum ID argument must also be a list of equal length."
        )

    # Check the intensity measure.
    if not int_method in ['height', 'point sum', 'other']:
        raise RelaxError(
            "The intensity measure '%s' is not one of 'height', 'point sum', 'other'."
            % int_method)

    # Set the peak intensity measure.
    cdp.int_method = int_method

    # Convert the file argument to a list if necessary.
    if not isinstance(file, list):
        file = [file]

    # Loop over all files.
    for file_index in range(len(file)):
        # Read the peak list data.
        peak_list = read_peak_list(file=file[file_index],
                                   dir=dir,
                                   int_col=int_col,
                                   spin_id_col=spin_id_col,
                                   mol_name_col=mol_name_col,
                                   res_num_col=res_num_col,
                                   res_name_col=res_name_col,
                                   spin_num_col=spin_num_col,
                                   spin_name_col=spin_name_col,
                                   sep=sep,
                                   spin_id=spin_id)

        # Automatic spectrum IDs.
        if spectrum_id == 'auto':
            spectrum_id = peak_list[0].intensity_name

        # Loop over the assignments.
        data = []
        data_flag = False
        for assign in peak_list:
            # Generate the spin_id.
            spin_id = generate_spin_id_unique(res_num=assign.res_nums[dim - 1],
                                              spin_name=assign.spin_names[dim -
                                                                          1])

            # Convert the intensity data to a list if needed.
            intensity = assign.intensity
            if not isinstance(intensity, list):
                intensity = [intensity]

            # Loop over the intensity data.
            for int_index in range(len(intensity)):
                # Sanity check.
                if intensity[int_index] == 0.0:
                    warn(
                        RelaxWarning(
                            "A peak intensity of zero has been encountered for the spin '%s' - this could be fatal later on."
                            % spin_id))

                # Get the spin container.
                spin = return_spin(spin_id=spin_id)
                if not spin:
                    warn(RelaxNoSpinWarning(spin_id))
                    continue

                # Skip deselected spins.
                if not spin.select:
                    continue

                # Initialise.
                if not hasattr(spin, 'peak_intensity'):
                    spin.peak_intensity = {}

                # Intensity scaling.
                if ncproc != None:
                    intensity[int_index] = intensity[int_index] / float(2**
                                                                        ncproc)

                # Add the data.
                if flag_multi_file:
                    id = spectrum_id[file_index]
                elif flag_multi_col:
                    id = spectrum_id[int_index]
                else:
                    id = spectrum_id
                spin.peak_intensity[id] = intensity[int_index]

                # Switch the flag.
                data_flag = True

                # Append the data for printing out.
                data.append([spin_id, repr(intensity[int_index])])

        # Add the spectrum id (and ncproc) to the relax data store.
        spectrum_ids = spectrum_id
        if isinstance(spectrum_id, str):
            spectrum_ids = [spectrum_id]
        if ncproc != None and not hasattr(cdp, 'ncproc'):
            cdp.ncproc = {}
        for i in range(len(spectrum_ids)):
            add_spectrum_id(spectrum_ids[i])
            if ncproc != None:
                cdp.ncproc[spectrum_ids[i]] = ncproc

        # No data.
        if not data_flag:
            # Delete all the data.
            delete(spectrum_id)

            # Raise the error.
            raise RelaxError("No data could be loaded from the peak list")

        # Printout.
        if verbose:
            print(
                "\nThe following intensities have been loaded into the relax data store:\n"
            )
            write_data(out=sys.stdout,
                       headings=["Spin_ID", "Intensity"],
                       data=data)
        print('')
Ejemplo n.º 15
0
    def minimise(self, min_algor=None, min_options=None, func_tol=None, grad_tol=None, max_iterations=None, constraints=False, scaling_matrix=None, verbosity=0, sim_index=None, lower=None, upper=None, inc=None):
        """Relaxation curve fitting minimisation method.

        @keyword min_algor:         The minimisation algorithm to use.
        @type min_algor:            str
        @keyword min_options:       An array of options to be used by the minimisation algorithm.
        @type min_options:          array of str
        @keyword func_tol:          The function tolerance which, when reached, terminates optimisation.  Setting this to None turns of the check.
        @type func_tol:             None or float
        @keyword grad_tol:          The gradient tolerance which, when reached, terminates optimisation.  Setting this to None turns of the check.
        @type grad_tol:             None or float
        @keyword max_iterations:    The maximum number of iterations for the algorithm.
        @type max_iterations:       int
        @keyword constraints:       If True, constraints are used during optimisation.
        @type constraints:          bool
        @keyword scaling_matrix:    The per-model list of diagonal and square scaling matrices.
        @type scaling_matrix:       list of numpy rank-2, float64 array or list of None
        @keyword verbosity:         The amount of information to print.  The higher the value, the greater the verbosity.
        @type verbosity:            int
        @keyword sim_index:         The index of the simulation to optimise.  This should be None if normal optimisation is desired.
        @type sim_index:            None or int
        @keyword lower:             The per-model lower bounds of the grid search which must be equal to the number of parameters in the model.  This optional argument is only used when doing a grid search.
        @type lower:                list of lists of numbers
        @keyword upper:             The per-model upper bounds of the grid search which must be equal to the number of parameters in the model.  This optional argument is only used when doing a grid search.
        @type upper:                list of lists of numbers
        @keyword inc:               The per-model increments for each dimension of the space for the grid search.  The number of elements in the array must equal to the number of parameters in the model.  This argument is only used when doing a grid search.
        @type inc:                  list of lists of int
        """

        # Checks.
        check_mol_res_spin_data()

        # Loop over the sequence.
        model_index = 0
        for spin, spin_id in self.model_loop():
            # Skip deselected spins.
            if not spin.select:
                continue

            # Skip spins which have no data.
            if not hasattr(spin, 'peak_intensity'):
                continue

            # Create the initial parameter vector.
            param_vector = assemble_param_vector(spin=spin)

            # Diagonal scaling.
            if scaling_matrix[model_index] is not None:
                param_vector = dot(inv(scaling_matrix[model_index]), param_vector)

            # Linear constraints.
            if constraints:
                A, b = linear_constraints(spin=spin, scaling_matrix=scaling_matrix[model_index])
            else:
                A, b = None, None

            # Print out.
            if verbosity >= 1:
                # Individual spin printout.
                if verbosity >= 2:
                    print("\n\n")

                string = "Fitting to spin " + repr(spin_id)
                print("\n\n" + string)
                print(len(string) * '~')


            # Initialise the function to minimise.
            ######################################

            # The peak intensities and times.
            values = []
            errors = []
            times = []
            for key in spin.peak_intensity:
                # The values.
                if sim_index == None:
                    values.append(spin.peak_intensity[key])
                else:
                    values.append(spin.peak_intensity_sim[sim_index][key])

                # The errors.
                errors.append(spin.peak_intensity_err[key])

                # The relaxation times.
                times.append(cdp.relax_times[key])

            # The scaling matrix in a diagonalised list form.
            scaling_list = []
            if scaling_matrix[model_index] is None:
                for i in range(len(param_vector)):
                    scaling_list.append(1.0)
            else:
                for i in range(len(scaling_matrix[model_index])):
                    scaling_list.append(scaling_matrix[model_index][i, i])

            # Set up the target function.
            model = Relax_fit_opt(model=spin.model, num_params=len(spin.params), values=values, errors=errors, relax_times=times, scaling_matrix=scaling_list)


            # Setup the minimisation algorithm when constraints are present.
            ################################################################

            if constraints and not match('^[Gg]rid', min_algor):
                algor = min_options[0]
            else:
                algor = min_algor


            # Levenberg-Marquardt minimisation.
            ###################################

            if match('[Ll][Mm]$', algor) or match('[Ll]evenburg-[Mm]arquardt$', algor):
                # Reconstruct the error data structure.
                lm_error = zeros(len(spin.relax_times), float64)
                index = 0
                for k in range(len(spin.relax_times)):
                    lm_error[index:index+len(relax_error[k])] = relax_error[k]
                    index = index + len(relax_error[k])

                min_options = min_options + (self.relax_fit.lm_dri, lm_error)


            # Minimisation.
            ###############

            # Grid search.
            if search('^[Gg]rid', min_algor):
                results = grid(func=model.func, args=(), num_incs=inc[model_index], lower=lower[model_index], upper=upper[model_index], A=A, b=b, verbosity=verbosity)

                # Unpack the results.
                param_vector, chi2, iter_count, warning = results
                f_count = iter_count
                g_count = 0.0
                h_count = 0.0

            # Minimisation.
            else:
                results = generic_minimise(func=model.func, dfunc=model.dfunc, d2func=model.d2func, args=(), x0=param_vector, min_algor=min_algor, min_options=min_options, func_tol=func_tol, grad_tol=grad_tol, maxiter=max_iterations, A=A, b=b, full_output=True, print_flag=verbosity)

                # Unpack the results.
                if results == None:
                    return
                param_vector, chi2, iter_count, f_count, g_count, h_count, warning = results

            # Scaling.
            if scaling_matrix[model_index] is not None:
                param_vector = dot(scaling_matrix[model_index], param_vector)

            # Disassemble the parameter vector.
            disassemble_param_vector(param_vector=param_vector, spin=spin, sim_index=sim_index)

            # Monte Carlo minimisation statistics.
            if sim_index != None:
                # Chi-squared statistic.
                spin.chi2_sim[sim_index] = chi2

                # Iterations.
                spin.iter_sim[sim_index] = iter_count

                # Function evaluations.
                spin.f_count_sim[sim_index] = f_count

                # Gradient evaluations.
                spin.g_count_sim[sim_index] = g_count

                # Hessian evaluations.
                spin.h_count_sim[sim_index] = h_count

                # Warning.
                spin.warning_sim[sim_index] = warning


            # Normal statistics.
            else:
                # Chi-squared statistic.
                spin.chi2 = chi2

                # Iterations.
                spin.iter = iter_count

                # Function evaluations.
                spin.f_count = f_count

                # Gradient evaluations.
                spin.g_count = g_count

                # Hessian evaluations.
                spin.h_count = h_count

                # Warning.
                spin.warning = warning

            # Increment the model index.
            model_index += 1
Ejemplo n.º 16
0
    def minimise(self, min_algor=None, min_options=None, func_tol=None, grad_tol=None, max_iterations=None, constraints=False, scaling_matrix=None, verbosity=0, sim_index=None, lower=None, upper=None, inc=None):
        """Relaxation curve fitting minimisation method.

        @keyword min_algor:         The minimisation algorithm to use.
        @type min_algor:            str
        @keyword min_options:       An array of options to be used by the minimisation algorithm.
        @type min_options:          array of str
        @keyword func_tol:          The function tolerance which, when reached, terminates optimisation.  Setting this to None turns of the check.
        @type func_tol:             None or float
        @keyword grad_tol:          The gradient tolerance which, when reached, terminates optimisation.  Setting this to None turns of the check.
        @type grad_tol:             None or float
        @keyword max_iterations:    The maximum number of iterations for the algorithm.
        @type max_iterations:       int
        @keyword constraints:       If True, constraints are used during optimisation.
        @type constraints:          bool
        @keyword scaling_matrix:    The per-model list of diagonal and square scaling matrices.
        @type scaling_matrix:       list of numpy rank-2, float64 array or list of None
        @keyword verbosity:         The amount of information to print.  The higher the value, the greater the verbosity.
        @type verbosity:            int
        @keyword sim_index:         The index of the simulation to optimise.  This should be None if normal optimisation is desired.
        @type sim_index:            None or int
        @keyword lower:             The per-model lower bounds of the grid search which must be equal to the number of parameters in the model.  This optional argument is only used when doing a grid search.
        @type lower:                list of lists of numbers
        @keyword upper:             The per-model upper bounds of the grid search which must be equal to the number of parameters in the model.  This optional argument is only used when doing a grid search.
        @type upper:                list of lists of numbers
        @keyword inc:               The per-model increments for each dimension of the space for the grid search.  The number of elements in the array must equal to the number of parameters in the model.  This argument is only used when doing a grid search.
        @type inc:                  list of lists of int
        """

        # Checks.
        check_mol_res_spin_data()

        # Loop over the sequence.
        model_index = 0
        for spin, spin_id in self.model_loop():
            # Skip deselected spins.
            if not spin.select:
                continue

            # Skip spins which have no data.
            if not hasattr(spin, 'peak_intensity'):
                continue

            # Create the initial parameter vector.
            param_vector = assemble_param_vector(spin=spin)

            # Diagonal scaling.
            if scaling_matrix[model_index] is not None:
                param_vector = dot(inv(scaling_matrix[model_index]), param_vector)

            # Linear constraints.
            if constraints:
                A, b = linear_constraints(spin=spin, scaling_matrix=scaling_matrix[model_index])
            else:
                A, b = None, None

            # Print out.
            if verbosity >= 1:
                # Individual spin printout.
                if verbosity >= 2:
                    print("\n\n")

                string = "Fitting to spin " + repr(spin_id)
                print("\n\n" + string)
                print(len(string) * '~')


            # Initialise the function to minimise.
            ######################################

            # The peak intensities and times.
            values = []
            errors = []
            times = []
            for key in spin.peak_intensity:
                # The values.
                if sim_index == None:
                    values.append(spin.peak_intensity[key])
                else:
                    values.append(spin.peak_intensity_sim[sim_index][key])

                # The errors.
                errors.append(spin.peak_intensity_err[key])

                # The relaxation times.
                times.append(cdp.relax_times[key])

            # The scaling matrix in a diagonalised list form.
            scaling_list = []
            if scaling_matrix[model_index] is None:
                for i in range(len(param_vector)):
                    scaling_list.append(1.0)
            else:
                for i in range(len(scaling_matrix[model_index])):
                    scaling_list.append(scaling_matrix[model_index][i, i])

            # Set up the target function.
            model = Relax_fit_opt(model=spin.model, num_params=len(spin.params), values=values, errors=errors, relax_times=times, scaling_matrix=scaling_list)


            # Setup the minimisation algorithm when constraints are present.
            ################################################################

            if constraints and not match('^[Gg]rid', min_algor):
                algor = min_options[0]
            else:
                algor = min_algor


            # Levenberg-Marquardt minimisation.
            ###################################

            if match('[Ll][Mm]$', algor) or match('[Ll]evenburg-[Mm]arquardt$', algor):
                # Reconstruct the error data structure.
                lm_error = zeros(len(spin.relax_times), float64)
                index = 0
                for k in range(len(spin.relax_times)):
                    lm_error[index:index+len(relax_error[k])] = relax_error[k]
                    index = index + len(relax_error[k])

                min_options = min_options + (self.relax_fit.lm_dri, lm_error)


            # Minimisation.
            ###############

            # Grid search.
            if search('^[Gg]rid', min_algor):
                results = grid(func=model.func, args=(), num_incs=inc[model_index], lower=lower[model_index], upper=upper[model_index], A=A, b=b, verbosity=verbosity)

                # Unpack the results.
                param_vector, chi2, iter_count, warning = results
                f_count = iter_count
                g_count = 0.0
                h_count = 0.0

            # Minimisation.
            else:
                results = generic_minimise(func=model.func, dfunc=model.dfunc, d2func=model.d2func, args=(), x0=param_vector, min_algor=min_algor, min_options=min_options, func_tol=func_tol, grad_tol=grad_tol, maxiter=max_iterations, A=A, b=b, full_output=True, print_flag=verbosity)

                # Unpack the results.
                if results == None:
                    return
                param_vector, chi2, iter_count, f_count, g_count, h_count, warning = results

            # Scaling.
            if scaling_matrix[model_index] is not None:
                param_vector = dot(scaling_matrix[model_index], param_vector)

            # Disassemble the parameter vector.
            disassemble_param_vector(param_vector=param_vector, spin=spin, sim_index=sim_index)

            # Monte Carlo minimisation statistics.
            if sim_index != None:
                # Chi-squared statistic.
                spin.chi2_sim[sim_index] = chi2

                # Iterations.
                spin.iter_sim[sim_index] = iter_count

                # Function evaluations.
                spin.f_count_sim[sim_index] = f_count

                # Gradient evaluations.
                spin.g_count_sim[sim_index] = g_count

                # Hessian evaluations.
                spin.h_count_sim[sim_index] = h_count

                # Warning.
                spin.warning_sim[sim_index] = warning


            # Normal statistics.
            else:
                # Chi-squared statistic.
                spin.chi2 = chi2

                # Iterations.
                spin.iter = iter_count

                # Function evaluations.
                spin.f_count = f_count

                # Gradient evaluations.
                spin.g_count = g_count

                # Hessian evaluations.
                spin.h_count = h_count

                # Warning.
                spin.warning = warning

            # Increment the model index.
            model_index += 1
Ejemplo n.º 17
0
def sn_ratio_deselection(ratio=10.0,
                         operation='<',
                         all_sn=False,
                         select=False,
                         verbose=True):
    """Use user function deselect.spin on spins with signal to noise ratio higher or lower than ratio.  The operation determines the selection operation.

    @keyword ratio:         The ratio to compare to.
    @type ratio:            float
    @keyword operation:     The comparison operation by which to select the spins.  Of the operation(sn_ratio, ratio), where operation can either be:  '<', '<=', '>', '>=', '==', '!='.
    @type operation:        str
    @keyword all_sn:        A flag specifying if all the signal to noise ratios per spin should match the comparison operator, of if just a single comparison match is enough.
    @type all_sn:           bool
    @keyword select:        A flag specifying if the user function select.spin should be used instead.
    @type select:           bool
    @keyword verbose:       A flag which if True will print additional information out.
    @type verbose:          bool
    """

    # Tests.
    check_pipe()
    check_mol_res_spin_data()

    # Test if spectra have been loaded.
    if not hasattr(cdp, 'spectrum_ids'):
        raise RelaxError("No spectra have been loaded.")

    # Assign the comparison operator.
    # "'<' : strictly less than"
    if operation == '<':
        op = operator.lt

    # "'<=' : less than or equal"
    elif operation == '<=':
        op = operator.le

    # "'>' : strictly greater than"
    elif operation == '>':
        op = operator.gt

    # "'>=' : greater than or equal"
    elif operation == '>=':
        op = operator.ge

    # "'==' : equal"
    elif operation == '==':
        op = operator.eq

    # "'!=' : not equal",
    elif operation == '!=':
        op = operator.ne

    # If not assigned, raise error.
    else:
        raise RelaxError(
            "The compare operation does not belong to the allowed list of methods: ['<', '<=', '>', '>=', '==', '!=']"
        )

    # Assign text for print out.
    if all_sn:
        text_all_sn = "all"
    else:
        text_all_sn = "any"

    if select:
        text_sel = "selected"
        sel_func = sel_spin
    else:
        text_sel = "deselected"
        sel_func = desel_spin

    # Print
    section(file=sys.stdout,
            text="Signal to noise ratio comparison selection",
            prespace=1,
            postspace=0)
    print("For the comparion test: S/N %s %1.1f" % (operation, ratio))

    # Loop over the spins.
    spin_ids = []
    for spin, spin_id in spin_loop(return_id=True):
        # Skip spins missing sn_ratio.
        if not hasattr(spin, 'sn_ratio'):
            # Skip warning for deselected spins.
            if spin.select:
                warn(
                    RelaxWarning(
                        "Spin '%s' does not contain Signal to Noise calculations. Perform the user function 'spectrum.sn_ratio'. This spin is skipped."
                        % spin_id))
            continue

        # Loop over the ID, collect and sort.
        ids = []
        for id in spin.peak_intensity:
            # Append the ID to the list.
            ids.append(id)

        # Sort the ids alphanumeric.
        ids = sort_filenames(filenames=ids, rev=False)

        # Loop over the sorted ids.
        sn_val = []
        for id in ids:
            # Append the Signal to Noise in the list.
            sn_val.append(spin.sn_ratio[id])

        # Convert the list to array.
        sn_val = asarray(sn_val)

        # Make the comparison for the whole array.
        test_arr = op(sn_val, ratio)

        # Determine how the test should evaluate.
        if all_sn:
            test = test_arr.all()
        else:
            test = test_arr.any()

        # Make an numpy array for the ids, an extract id which failed the test.
        ids_arr = asarray(ids)
        ids_test_arr = ids_arr[test_arr]

        # Make inversion of bool
        test_arr_inv = test_arr == False
        ids_test_arr_inv = ids_arr[test_arr_inv]

        # print
        if verbose:
            subsection(
                file=sys.stdout,
                text="Signal to noise ratio comparison for spin ID '%s'" %
                spin_id,
                prespace=1,
                postspace=0)
            print("Following spectra ID evaluated to True: %s" % ids_test_arr)
            print("Following spectra ID evaluated to False: %s" %
                  ids_test_arr_inv)
            print(
                "'%s' comparisons have been used for evaluation, which evaluated to: %s"
                % (text_all_sn, test))
            if test:
                print("The spin ID '%s' is %s" % (spin_id, text_sel))
            else:
                print("The spin ID '%s' is skipped" % spin_id)

        # If the test evaluates to True, then do selection action.
        if test:
            # Select/Deselect the spin.
            sel_func(spin_id=spin_id)

            # Assign spin_id to list, for printing.
            spin_ids.append(spin_id)

    # Make summary
    if verbose:
        if len(spin_ids) > 0:
            subsection(
                file=sys.stdout,
                text=
                "For all of the S/N comparion test, the following spin ID's was %s"
                % text_sel,
                prespace=1,
                postspace=0)
            print(spin_ids)
Ejemplo n.º 18
0
def sn_ratio_deselection(ratio=10.0, operation='<', all_sn=False, select=False, verbose=True):
    """Use user function deselect.spin on spins with signal to noise ratio higher or lower than ratio.  The operation determines the selection operation.

    @keyword ratio:         The ratio to compare to.
    @type ratio:            float
    @keyword operation:     The comparison operation by which to select the spins.  Of the operation(sn_ratio, ratio), where operation can either be:  '<', '<=', '>', '>=', '==', '!='.
    @type operation:        str
    @keyword all_sn:        A flag specifying if all the signal to noise ratios per spin should match the comparison operator, of if just a single comparison match is enough.
    @type all_sn:           bool
    @keyword select:        A flag specifying if the user function select.spin should be used instead.
    @type select:           bool
    @keyword verbose:       A flag which if True will print additional information out.
    @type verbose:          bool
    """

    # Tests.
    check_pipe()
    check_mol_res_spin_data()

    # Test if spectra have been loaded.
    if not hasattr(cdp, 'spectrum_ids'):
        raise RelaxError("No spectra have been loaded.")

    # Assign the comparison operator.
    # "'<' : strictly less than"
    if operation == '<':
        op = operator.lt

    # "'<=' : less than or equal"
    elif operation == '<=':
        op = operator.le

    # "'>' : strictly greater than"
    elif operation == '>':
        op = operator.gt

    # "'>=' : greater than or equal"
    elif operation == '>=':
        op = operator.ge

    # "'==' : equal"
    elif operation == '==':
        op = operator.eq

    # "'!=' : not equal",
    elif operation == '!=':
        op = operator.ne

    # If not assigned, raise error.
    else:
        raise RelaxError("The compare operation does not belong to the allowed list of methods: ['<', '<=', '>', '>=', '==', '!=']")

    # Assign text for print out.
    if all_sn:
        text_all_sn = "all"
    else:
        text_all_sn = "any"

    if select:
        text_sel = "selected"
        sel_func = sel_spin
    else:
        text_sel = "deselected"
        sel_func = desel_spin

    # Print
    section(file=sys.stdout, text="Signal to noise ratio comparison selection", prespace=1, postspace=0)
    print("For the comparion test: S/N %s %1.1f"%(operation, ratio))

    # Loop over the spins.
    spin_ids = []
    for spin, spin_id in spin_loop(return_id=True):
        # Skip spins missing sn_ratio.
        if not hasattr(spin, 'sn_ratio'):
            # Skip warning for deselected spins.
            if spin.select:
                warn(RelaxWarning("Spin '%s' does not contain Signal to Noise calculations. Perform the user function 'spectrum.sn_ratio'. This spin is skipped." % spin_id))
            continue

        # Loop over the ID, collect and sort.
        ids = []
        for id in spin.peak_intensity:
            # Append the ID to the list.
            ids.append(id)

        # Sort the ids alphanumeric.
        ids = sort_filenames(filenames=ids, rev=False)

        # Loop over the sorted ids.
        sn_val = []
        for id in ids:
            # Append the Signal to Noise in the list.
            sn_val.append(spin.sn_ratio[id])

        # Convert the list to array.
        sn_val = asarray(sn_val)

        # Make the comparison for the whole array.
        test_arr = op(sn_val, ratio)

        # Determine how the test should evaluate.
        if all_sn:
            test = test_arr.all()
        else:
            test = test_arr.any()

        # Make an numpy array for the ids, an extract id which failed the test.
        ids_arr = asarray(ids)
        ids_test_arr = ids_arr[test_arr]

        # Make inversion of bool
        test_arr_inv = test_arr == False
        ids_test_arr_inv = ids_arr[test_arr_inv]

        # print
        if verbose:
            subsection(file=sys.stdout, text="Signal to noise ratio comparison for spin ID '%s'"%spin_id, prespace=1, postspace=0)
            print("Following spectra ID evaluated to True: %s"%ids_test_arr)
            print("Following spectra ID evaluated to False: %s"%ids_test_arr_inv)
            print("'%s' comparisons have been used for evaluation, which evaluated to: %s"%(text_all_sn, test))
            if test:
                print("The spin ID '%s' is %s"%(spin_id, text_sel))
            else:
                print("The spin ID '%s' is skipped"%spin_id)

        # If the test evaluates to True, then do selection action.
        if test:
            # Select/Deselect the spin.
            sel_func(spin_id=spin_id)

            # Assign spin_id to list, for printing.
            spin_ids.append(spin_id)

    # Make summary
    if verbose:
        if len(spin_ids) > 0:
            subsection(file=sys.stdout, text="For all of the S/N comparion test, the following spin ID's was %s"%text_sel, prespace=1, postspace=0)
            print(spin_ids)
Ejemplo n.º 19
0
def signal_noise_ratio(verbose=True):
    """Calculate the signal to noise ratio per spin.

    @keyword verbose:       A flag which if True will print additional information out.
    @type verbose:          bool
    """

    # Tests.
    check_pipe()
    check_mol_res_spin_data()

    # Test if spectra have been loaded.
    if not hasattr(cdp, 'spectrum_ids'):
        raise RelaxError("No spectra have been loaded.")

    # Possible print.
    if verbose:
        print("\nThe following signal to noise ratios has been calculated:\n")

    # Set the spin specific signal to noise ratio.
    for spin, spin_id in spin_loop(return_id=True):
        # Skip deselected spins.
        if not spin.select:
            continue

        # Skip spins missing intensity data.
        if not hasattr(spin, 'peak_intensity'):
            continue

        # Test if error analysis has been performed.
        if not hasattr(spin, 'peak_intensity_err'):
            raise RelaxError("Intensity error analysis has not been performed.  Please see spectrum.error_analysis().")

        # If necessary, create the dictionary.
        if not hasattr(spin, 'sn_ratio'):
            spin.sn_ratio = {}

        # Loop over the ID.
        ids = []
        for id in spin.peak_intensity:
            # Append the ID to the list.
            ids.append(id)

            # Calculate the sn_ratio.
            pint = float(spin.peak_intensity[id])
            pint_err = float(spin.peak_intensity_err[id])
            sn_ratio = pint / pint_err

            # Assign the sn_ratio.
            spin.sn_ratio[id] = sn_ratio

        # Sort the ids alphanumeric.
        ids = sort_filenames(filenames=ids, rev=False)

        # Collect the data under sorted ids.
        data_i = []
        for id in ids:
            # Get the values.
            pint = spin.peak_intensity[id]
            pint_err = spin.peak_intensity_err[id]
            sn_ratio = spin.sn_ratio[id]

            # Store the data.
            data_i.append([id, repr(pint), repr(pint_err), repr(sn_ratio)])

        if verbose:
            section(file=sys.stdout, text="Signal to noise ratio for spin ID '%s'"%spin_id, prespace=1)
            write_data(out=sys.stdout, headings=["Spectrum ID", "Signal", "Noise", "S/N"], data=data_i)
Ejemplo n.º 20
0
def read_spins(file=None,
               dir=None,
               dim=1,
               spin_id_col=None,
               mol_name_col=None,
               res_num_col=None,
               res_name_col=None,
               spin_num_col=None,
               spin_name_col=None,
               sep=None,
               spin_id=None,
               verbose=True):
    """Read the peak intensity data.

    @keyword file:          The name of the file containing the peak intensities.
    @type file:             str
    @keyword dir:           The directory where the file is located.
    @type dir:              str
    @keyword dim:           The dimension of the peak list to associate the data with.
    @type dim:              int
    @keyword spin_id_col:   The column containing the spin ID strings (used by the generic intensity file format).  If supplied, the mol_name_col, res_name_col, res_num_col, spin_name_col, and spin_num_col arguments must be none.
    @type spin_id_col:      int or None
    @keyword mol_name_col:  The column containing the molecule name information (used by the generic intensity file format).  If supplied, spin_id_col must be None.
    @type mol_name_col:     int or None
    @keyword res_name_col:  The column containing the residue name information (used by the generic intensity file format).  If supplied, spin_id_col must be None.
    @type res_name_col:     int or None
    @keyword res_num_col:   The column containing the residue number information (used by the generic intensity file format).  If supplied, spin_id_col must be None.
    @type res_num_col:      int or None
    @keyword spin_name_col: The column containing the spin name information (used by the generic intensity file format).  If supplied, spin_id_col must be None.
    @type spin_name_col:    int or None
    @keyword spin_num_col:  The column containing the spin number information (used by the generic intensity file format).  If supplied, spin_id_col must be None.
    @type spin_num_col:     int or None
    @keyword sep:           The column separator which, if None, defaults to whitespace.
    @type sep:              str or None
    @keyword spin_id:       The spin ID string used to restrict data loading to a subset of all spins.  If 'auto' is provided for a NMRPipe seriesTab formatted file, the ID's are auto generated in form of Z_Ai.
    @type spin_id:          None or str
    @keyword verbose:       A flag which if True will cause all relaxation data loaded to be printed out.
    @type verbose:          bool
    """

    # Data checks.
    check_pipe()

    # Check the file name.
    if file == None:
        raise RelaxError("The file name must be supplied.")

    # Read the peak list data.
    peak_list = read_peak_list(file=file,
                               dir=dir,
                               spin_id_col=spin_id_col,
                               mol_name_col=mol_name_col,
                               res_num_col=res_num_col,
                               res_name_col=res_name_col,
                               spin_num_col=spin_num_col,
                               spin_name_col=spin_name_col,
                               sep=sep,
                               spin_id=spin_id)

    # Loop over the peak_list.
    created_spins = []
    for assign in peak_list:
        mol_name = assign.mol_names[dim - 1]
        res_num = assign.res_nums[dim - 1]
        res_name = assign.res_names[dim - 1]
        spin_num = assign.spin_nums[dim - 1]
        spin_name = assign.spin_names[dim - 1]

        # Generate the spin_id.
        spin_id = generate_spin_id_unique(mol_name=mol_name,
                                          res_num=res_num,
                                          res_name=res_name,
                                          spin_name=spin_name)

        # Check if the spin already exist.
        if return_spin(spin_id=spin_id) == None:
            # Create the spin if not exist.
            create_spin(spin_num=spin_num,
                        spin_name=spin_name,
                        res_num=res_num,
                        res_name=res_name,
                        mol_name=mol_name)

    # Test that data exists.
    check_mol_res_spin_data()
Ejemplo n.º 21
0
def write_xy(format='grace', x_data_type='res_num', y_data_type=None, spin_id=None, plot_data='value', norm_type='first', file=None, dir=None, force=False, norm=True):
    """Writing data to a file.

    @keyword format:        The specific backend to use.  The currently support backends are 'grace'.
    @type format:           str
    @keyword x_data_type:   The category of the X-axis data.
    @type x_data_type:      str
    @keyword y_data_type:   The category of the Y-axis data.
    @type y_data_type:      str
    @keyword spin_id:       The spin identification string.
    @type spin_id:          str
    @keyword plot_data:     The type of the plotted data, one of 'value', 'error', or 'sim'.
    @type plot_data:        str
    @keyword norm_type:     The point to normalise to 1.  This can be 'first' or 'last'.
    @type norm_type:        str
    @keyword file:          The name of the Grace file to create.
    @type file:             str
    @keyword dir:           The optional directory to place the file into.
    @type dir:              str
    @param force:           Boolean argument which if True causes the file to be overwritten if it already exists.
    @type force:            bool
    @keyword norm:          The normalisation flag which if set to True will cause all graphs to be normalised to a starting value of 1.
    @type norm:             bool
    """

    # Checks.
    check_pipe()
    check_mol_res_spin_data()

    # Test if the plot_data argument is one of 'value', 'error', or 'sim'.
    if plot_data not in ['value', 'error', 'sim']:
        raise RelaxError("The plot data argument " + repr(plot_data) + " must be set to either 'value', 'error', 'sim'.")

    # Test if the simulations exist.
    if plot_data == 'sim' and not hasattr(cdp, 'sim_number'):
        raise RelaxNoSimError

    # Open the file for writing.
    file_path = get_file_path(file, dir)
    file = open_write_file(file, dir, force)

    # Get the data.
    data, set_names, graph_type = assemble_data(spin_id, x_data_name=x_data_type, y_data_name=y_data_type, plot_data=plot_data)

    # Convert the graph type.
    if graph_type == 'X,Y':
        graph_type = 'xy'
    elif graph_type == 'X,Y,dX':
        graph_type = 'xydx'
    elif graph_type == 'X,Y,dY':
        graph_type = 'xydy'
    elif graph_type == 'X,Y,dX,dY':
        graph_type = 'xydxdy'

    # No data, so close the empty file and exit.
    if not len(data) or not len(data[0]) or not len(data[0][0]):
        warn(RelaxWarning("No data could be found, creating an empty file."))
        file.close()
        return

    # Get the axis information.
    data_type = [x_data_type, y_data_type]
    seq_type, axis_labels = axis_setup(data_type=data_type, norm=norm)

    # Write the header.
    write_xy_header(format=format, file=file, data_type=data_type, seq_type=seq_type, sets=[len(data[0])], set_names=[set_names], axis_labels=[axis_labels], norm=[norm])

    # Write the data.
    write_xy_data(format=format, data=data, file=file, graph_type=graph_type, norm_type=norm_type, norm=[norm])

    # Close the file.
    file.close()

    # Add the file to the results file list.
    label = None
    if format == 'grace':
        label = 'Grace'
    add_result_file(type=format, label='Grace', file=file_path)
Ejemplo n.º 22
0
def signal_noise_ratio(verbose=True):
    """Calculate the signal to noise ratio per spin.

    @keyword verbose:       A flag which if True will print additional information out.
    @type verbose:          bool
    """

    # Tests.
    check_pipe()
    check_mol_res_spin_data()

    # Test if spectra have been loaded.
    if not hasattr(cdp, 'spectrum_ids'):
        raise RelaxError("No spectra have been loaded.")

    # Possible print.
    if verbose:
        print("\nThe following signal to noise ratios has been calculated:\n")

    # Set the spin specific signal to noise ratio.
    for spin, spin_id in spin_loop(return_id=True):
        # Skip deselected spins.
        if not spin.select:
            continue

        # Skip spins missing intensity data.
        if not hasattr(spin, 'peak_intensity'):
            continue

        # Test if error analysis has been performed.
        if not hasattr(spin, 'peak_intensity_err'):
            raise RelaxError(
                "Intensity error analysis has not been performed.  Please see spectrum.error_analysis()."
            )

        # If necessary, create the dictionary.
        if not hasattr(spin, 'sn_ratio'):
            spin.sn_ratio = {}

        # Loop over the ID.
        ids = []
        for id in spin.peak_intensity:
            # Append the ID to the list.
            ids.append(id)

            # Calculate the sn_ratio.
            pint = float(spin.peak_intensity[id])
            pint_err = float(spin.peak_intensity_err[id])
            sn_ratio = pint / pint_err

            # Assign the sn_ratio.
            spin.sn_ratio[id] = sn_ratio

        # Sort the ids alphanumeric.
        ids = sort_filenames(filenames=ids, rev=False)

        # Collect the data under sorted ids.
        data_i = []
        for id in ids:
            # Get the values.
            pint = spin.peak_intensity[id]
            pint_err = spin.peak_intensity_err[id]
            sn_ratio = spin.sn_ratio[id]

            # Store the data.
            data_i.append([id, repr(pint), repr(pint_err), repr(sn_ratio)])

        if verbose:
            section(file=sys.stdout,
                    text="Signal to noise ratio for spin ID '%s'" % spin_id,
                    prespace=1)
            write_data(out=sys.stdout,
                       headings=["Spectrum ID", "Signal", "Noise", "S/N"],
                       data=data_i)
Ejemplo n.º 23
0
def error_analysis(subset=None):
    """Determine the peak intensity standard deviation.

    @keyword subset:    The list of spectrum ID strings to restrict the analysis to.
    @type subset:       list of str
    """

    # Tests.
    check_pipe()
    check_mol_res_spin_data()

    # Test if spectra have been loaded.
    if not hasattr(cdp, 'spectrum_ids'):
        raise RelaxError("Error analysis is not possible, no spectra have been loaded.")

    # Check the IDs.
    if subset:
        for id in subset:
            if id not in cdp.spectrum_ids:
                raise RelaxError("The spectrum ID '%s' has not been loaded into relax." % id)

    # Peak height category.
    if cdp.int_method == 'height':
        # Print out.
        print("Intensity measure:  Peak heights.")

        # Do we have replicated spectra?
        if hasattr(cdp, 'replicates'):
            # Print out.
            print("Replicated spectra:  Yes.")

            # Set the errors.
            __errors_repl(subset=subset)

        # No replicated spectra.
        else:
            # Print out.
            print("Replicated spectra:  No.")
            if subset:
                print("Spectra ID subset ignored.")

            # Set the errors.
            __errors_height_no_repl()

    # Peak volume category.
    if cdp.int_method == 'point sum':
        # Print out.
        print("Intensity measure:  Peak volumes.")

        # Do we have replicated spectra?
        if hasattr(cdp, 'replicates'):
            # Print out.
            print("Replicated spectra:  Yes.")

            # Set the errors.
            __errors_repl(subset=subset)

        # No replicated spectra.
        else:
            # Print out.
            print("Replicated spectra:  No.")

            # No implemented.
            raise RelaxImplementError

            # Set the errors.
            __errors_vol_no_repl()
Ejemplo n.º 24
0
def read(file=None, dir=None, spectrum_id=None, dim=1, int_col=None, int_method=None, spin_id_col=None, mol_name_col=None, res_num_col=None, res_name_col=None, spin_num_col=None, spin_name_col=None, sep=None, spin_id=None, ncproc=None, verbose=True):
    """Read the peak intensity data.

    @keyword file:          The name of the file(s) containing the peak intensities.
    @type file:             str or list of str
    @keyword dir:           The directory where the file is located.
    @type dir:              str
    @keyword spectrum_id:   The spectrum identification string.
    @type spectrum_id:      str or list of str
    @keyword dim:           The dimension of the peak list to associate the data with.
    @type dim:              int
    @keyword int_col:       The column containing the peak intensity data (used by the generic intensity file format).
    @type int_col:          int or list of int
    @keyword int_method:    The integration method, one of 'height', 'point sum' or 'other'.
    @type int_method:       str
    @keyword spin_id_col:   The column containing the spin ID strings (used by the generic intensity file format).  If supplied, the mol_name_col, res_name_col, res_num_col, spin_name_col, and spin_num_col arguments must be none.
    @type spin_id_col:      int or None
    @keyword mol_name_col:  The column containing the molecule name information (used by the generic intensity file format).  If supplied, spin_id_col must be None.
    @type mol_name_col:     int or None
    @keyword res_name_col:  The column containing the residue name information (used by the generic intensity file format).  If supplied, spin_id_col must be None.
    @type res_name_col:     int or None
    @keyword res_num_col:   The column containing the residue number information (used by the generic intensity file format).  If supplied, spin_id_col must be None.
    @type res_num_col:      int or None
    @keyword spin_name_col: The column containing the spin name information (used by the generic intensity file format).  If supplied, spin_id_col must be None.
    @type spin_name_col:    int or None
    @keyword spin_num_col:  The column containing the spin number information (used by the generic intensity file format).  If supplied, spin_id_col must be None.
    @type spin_num_col:     int or None
    @keyword sep:           The column separator which, if None, defaults to whitespace.
    @type sep:              str or None
    @keyword spin_id:       The spin ID string used to restrict data loading to a subset of all spins.  If 'auto' is provided for a NMRPipe seriesTab formatted file, the ID's are auto generated in form of Z_Ai.
    @type spin_id:          None or str
    @keyword ncproc:        The Bruker ncproc binary intensity scaling factor.
    @type ncproc:           int or None
    @keyword verbose:       A flag which if True will cause all relaxation data loaded to be printed out.
    @type verbose:          bool
    """

    # Data checks.
    check_pipe()
    check_mol_res_spin_data()

    # Check the file name.
    if file == None:
        raise RelaxError("The file name must be supplied.")

    # Test that the intensity measures are identical.
    if hasattr(cdp, 'int_method') and cdp.int_method != int_method:
        raise RelaxError("The '%s' measure of peak intensities does not match '%s' of the previously loaded spectra." % (int_method, cdp.int_method))

    # Multiple ID flags.
    flag_multi = False
    flag_multi_file = False
    flag_multi_col = False
    if isinstance(spectrum_id, list) or spectrum_id == 'auto':
        flag_multi = True
    if isinstance(file, list):
        flag_multi_file = True
    if isinstance(int_col, list) or spectrum_id == 'auto':
        flag_multi_col = True

    # List argument checks.
    if flag_multi:
        # Too many lists.
        if flag_multi_file and flag_multi_col:
            raise RelaxError("If a list of spectrum IDs is supplied, the file names and intensity column arguments cannot both be lists.")

        # Not enough lists.
        if not flag_multi_file and not flag_multi_col:
            raise RelaxError("If a list of spectrum IDs is supplied, either the file name or intensity column arguments must be a list of equal length.")

        # List lengths for multiple files.
        if flag_multi_file and len(spectrum_id) != len(file):
                raise RelaxError("The file list %s and spectrum ID list %s do not have the same number of elements." % (file, spectrum_id))

        # List lengths for multiple intensity columns.
        if flag_multi_col and spectrum_id != 'auto' and len(spectrum_id) != len(int_col):
            raise RelaxError("The spectrum ID list %s and intensity column list %s do not have the same number of elements." % (spectrum_id, int_col))

    # More list argument checks (when only one spectrum ID is supplied).
    else:
        # Multiple files.
        if flag_multi_file:
            raise RelaxError("If multiple files are supplied, then multiple spectrum IDs must also be supplied.")

        # Multiple intensity columns.
        if flag_multi_col:
            raise RelaxError("If multiple intensity columns are supplied, then multiple spectrum IDs must also be supplied.")

    # Intensity column checks.
    if spectrum_id != 'auto' and not flag_multi and flag_multi_col:
        raise RelaxError("If a list of intensity columns is supplied, the spectrum ID argument must also be a list of equal length.")

    # Check the intensity measure.
    if not int_method in ['height', 'point sum', 'other']:
        raise RelaxError("The intensity measure '%s' is not one of 'height', 'point sum', 'other'." % int_method)

    # Set the peak intensity measure.
    cdp.int_method = int_method

    # Convert the file argument to a list if necessary.
    if not isinstance(file, list):
        file = [file]

    # Loop over all files.
    for file_index in range(len(file)):
        # Read the peak list data.
        peak_list = read_peak_list(file=file[file_index], dir=dir, int_col=int_col, spin_id_col=spin_id_col, mol_name_col=mol_name_col, res_num_col=res_num_col, res_name_col=res_name_col, spin_num_col=spin_num_col, spin_name_col=spin_name_col, sep=sep, spin_id=spin_id)

        # Automatic spectrum IDs.
        if spectrum_id == 'auto':
            spectrum_id = peak_list[0].intensity_name

        # Loop over the assignments.
        data = []
        data_flag = False
        for assign in peak_list:
            # Generate the spin_id.
            spin_id = generate_spin_id_unique(res_num=assign.res_nums[dim-1], spin_name=assign.spin_names[dim-1])

            # Convert the intensity data to a list if needed.
            intensity = assign.intensity
            if not isinstance(intensity, list):
                intensity = [intensity]

            # Loop over the intensity data.
            for int_index in range(len(intensity)):
                # Sanity check.
                if intensity[int_index] == 0.0:
                    warn(RelaxWarning("A peak intensity of zero has been encountered for the spin '%s' - this could be fatal later on." % spin_id))

                # Get the spin container.
                spin = return_spin(spin_id)
                if not spin:
                    warn(RelaxNoSpinWarning(spin_id))
                    continue

                # Skip deselected spins.
                if not spin.select:
                    continue

                # Initialise.
                if not hasattr(spin, 'peak_intensity'):
                    spin.peak_intensity = {}

                # Intensity scaling.
                if ncproc != None:
                    intensity[int_index] = intensity[int_index] / float(2**ncproc)

                # Add the data.
                if flag_multi_file:
                    id = spectrum_id[file_index]
                elif flag_multi_col:
                    id = spectrum_id[int_index]
                else:
                    id = spectrum_id
                spin.peak_intensity[id] = intensity[int_index]

                # Switch the flag.
                data_flag = True

                # Append the data for printing out.
                data.append([spin_id, repr(intensity[int_index])])

        # Add the spectrum id (and ncproc) to the relax data store.
        spectrum_ids = spectrum_id
        if isinstance(spectrum_id, str):
            spectrum_ids = [spectrum_id]
        if ncproc != None and not hasattr(cdp, 'ncproc'):
            cdp.ncproc = {}
        for i in range(len(spectrum_ids)):
            add_spectrum_id(spectrum_ids[i])
            if ncproc != None:
                cdp.ncproc[spectrum_ids[i]] = ncproc

        # No data.
        if not data_flag:
            # Delete all the data.
            delete(spectrum_id)

            # Raise the error.
            raise RelaxError("No data could be loaded from the peak list")

        # Printout.
        if verbose:
            print("\nThe following intensities have been loaded into the relax data store:\n")
            write_data(out=sys.stdout, headings=["Spin_ID", "Intensity"], data=data)
        print('')
Ejemplo n.º 25
0
def write_xy(format='grace', x_data_type='res_num', y_data_type=None, spin_id=None, plot_data='value', norm_type='first', file=None, dir=None, force=False, norm=True):
    """Writing data to a file.

    @keyword format:        The specific backend to use.  The currently support backends are 'grace'.
    @type format:           str
    @keyword x_data_type:   The category of the X-axis data.
    @type x_data_type:      str
    @keyword y_data_type:   The category of the Y-axis data.
    @type y_data_type:      str
    @keyword spin_id:       The spin identification string.
    @type spin_id:          str
    @keyword plot_data:     The type of the plotted data, one of 'value', 'error', or 'sim'.
    @type plot_data:        str
    @keyword norm_type:     The point to normalise to 1.  This can be 'first' or 'last'.
    @type norm_type:        str
    @keyword file:          The name of the Grace file to create.
    @type file:             str
    @keyword dir:           The optional directory to place the file into.
    @type dir:              str
    @param force:           Boolean argument which if True causes the file to be overwritten if it already exists.
    @type force:            bool
    @keyword norm:          The normalisation flag which if set to True will cause all graphs to be normalised to a starting value of 1.
    @type norm:             bool
    """

    # Checks.
    check_pipe()
    check_mol_res_spin_data()

    # Test if the plot_data argument is one of 'value', 'error', or 'sim'.
    if plot_data not in ['value', 'error', 'sim']:
        raise RelaxError("The plot data argument " + repr(plot_data) + " must be set to either 'value', 'error', 'sim'.")

    # Test if the simulations exist.
    if plot_data == 'sim' and not hasattr(cdp, 'sim_number'):
        raise RelaxNoSimError

    # Open the file for writing.
    file_path = get_file_path(file, dir)
    file = open_write_file(file, dir, force)

    # Get the data.
    data, set_names, graph_type = assemble_data(spin_id, x_data_name=x_data_type, y_data_name=y_data_type, plot_data=plot_data)

    # Convert the graph type.
    if graph_type == 'X,Y':
        graph_type = 'xy'
    elif graph_type == 'X,Y,dX':
        graph_type = 'xydx'
    elif graph_type == 'X,Y,dY':
        graph_type = 'xydy'
    elif graph_type == 'X,Y,dX,dY':
        graph_type = 'xydxdy'

    # No data, so close the empty file and exit.
    if not len(data) or not len(data[0]) or not len(data[0][0]):
        warn(RelaxWarning("No data could be found, creating an empty file."))
        file.close()
        return

    # Get the axis information.
    data_type = [x_data_type, y_data_type]
    seq_type, axis_labels = axis_setup(data_type=data_type, norm=norm)

    # Write the header.
    write_xy_header(format=format, file=file, data_type=data_type, seq_type=seq_type, sets=[len(data[0])], set_names=[set_names], axis_labels=[axis_labels], norm=[norm])

    # Write the data.
    write_xy_data(format=format, data=data, file=file, graph_type=graph_type, norm_type=norm_type, norm=[norm])

    # Close the file.
    file.close()

    # Add the file to the results file list.
    label = None
    if format == 'grace':
        label = 'Grace'
    add_result_file(type=format, label='Grace', file=file_path)