コード例 #1
0
def monte_carlo_select_all_sims(number=None, all_select_sim=None):
    """Set the select flag of all simulations of all models to one.

    @keyword number:            The number of Monte Carlo simulations to set up.
    @type number:               int
    @keyword all_select_sim:    The selection status of the Monte Carlo simulations.  The first
                                dimension of this matrix corresponds to the simulation and the
                                second corresponds to the models.
    @type all_select_sim:       list of lists of bool
    """

    # The specific analysis API object.
    api = return_api()

    # Create the selected simulation array with all simulations selected.
    if all_select_sim == None:
        select_sim = [True] * number

    # Loop over the models.
    i = 0
    for model_info in api.model_loop():
        # Skip function.
        if api.skip_function(model_info=model_info):
            continue

        # Set up the selected simulation array.
        if all_select_sim != None:
            select_sim = all_select_sim[i]

        # Set the selected simulation array.
        api.set_selected_sim(select_sim, model_info=model_info)

        # Model index.
        i += 1
コード例 #2
0
def create_macro(data_type=None,
                 style="classic",
                 colour_start=None,
                 colour_end=None,
                 colour_list=None):
    """Create an array of Molmol commands.

    @keyword data_type:     The data type to map to the structure.
    @type data_type:        str
    @keyword style:         The style of the macro.
    @type style:            str
    @keyword colour_start:  The starting colour of the linear gradient.
    @type colour_start:     str or RBG colour array (len 3 with vals from 0 to 1)
    @keyword colour_end:    The ending colour of the linear gradient.
    @type colour_end:       str or RBG colour array (len 3 with vals from 0 to 1)
    @keyword colour_list:   The colour list to search for the colour names.  Can be either 'molmol' or 'x11'.
    @type colour_list:      str or None
    @return:                The list of Molmol commands.
    @rtype:                 list of str
    """

    # Get the specific macro.
    api = return_api()
    commands = api.molmol_macro(data_type, style, colour_start, colour_end,
                                colour_list)

    # Return the macro commands.
    return commands
コード例 #3
0
def calc(verbosity=1):
    """Function for calculating the function value.

    @keyword verbosity: The amount of information to print.  The higher the value, the greater the verbosity.
    @type verbosity:    int
    """

    # Test if the current data pipe exists.
    check_pipe()

    # Reset the minimisation statistics.
    reset_min_stats(verbosity=verbosity)

    # The specific analysis API object.
    api = return_api()

    # Deselect spins lacking data:
    api.overfit_deselect()

    # Create the scaling matrix.
    scaling_matrix = assemble_scaling_matrix()

    # Get the Processor box singleton (it contains the Processor instance) and alias the Processor.
    processor_box = Processor_box()
    processor = processor_box.processor

    # Monte Carlo simulation calculation.
    if hasattr(cdp, 'sim_state') and cdp.sim_state == 1:
        # Loop over the simulations.
        for i in range(cdp.sim_number):
            # Status.
            if status.current_analysis:
                status.auto_analysis[status.current_analysis].mc_number = i
            else:
                status.mc_number = i

            # Calculation.
            api.calculate(verbosity=verbosity - 1,
                          sim_index=i,
                          scaling_matrix=scaling_matrix)

            # Print out.
            if verbosity and not processor.is_queued():
                print("Simulation " + repr(i + 1))

        # Unset the status.
        if status.current_analysis:
            status.auto_analysis[status.current_analysis].mc_number = None
        else:
            status.mc_number = None

    # Minimisation.
    else:
        api.calculate(verbosity=verbosity, scaling_matrix=scaling_matrix)

    # Execute any queued commands.
    processor.run_queue()
コード例 #4
0
ファイル: minimise.py プロジェクト: pombredanne/relax
def calc(verbosity=1):
    """Function for calculating the function value.

    @keyword verbosity: The amount of information to print.  The higher the value, the greater the verbosity.
    @type verbosity:    int
    """

    # Test if the current data pipe exists.
    check_pipe()

    # Reset the minimisation statistics.
    reset_min_stats(verbosity=verbosity)

    # The specific analysis API object.
    api = return_api()

    # Deselect spins lacking data:
    api.overfit_deselect()

    # Create the scaling matrix.
    scaling_matrix = assemble_scaling_matrix()

    # Get the Processor box singleton (it contains the Processor instance) and alias the Processor.
    processor_box = Processor_box() 
    processor = processor_box.processor

    # Monte Carlo simulation calculation.
    if hasattr(cdp, 'sim_state') and cdp.sim_state == 1:
        # Loop over the simulations.
        for i in range(cdp.sim_number):
            # Status.
            if status.current_analysis:
                status.auto_analysis[status.current_analysis].mc_number = i
            else:
                status.mc_number = i

            # Calculation.
            api.calculate(verbosity=verbosity-1, sim_index=i, scaling_matrix=scaling_matrix)

            # Print out.
            if verbosity and not processor.is_queued():
                print("Simulation " + repr(i+1))

        # Unset the status.
        if status.current_analysis:
            status.auto_analysis[status.current_analysis].mc_number = None
        else:
            status.mc_number = None

    # Minimisation.
    else:
        api.calculate(verbosity=verbosity, scaling_matrix=scaling_matrix)

    # Execute any queued commands.
    processor.run_queue()
コード例 #5
0
    def value_set_csa(self, event=None):
        """Set the CSA via the value.set uf.

        @keyword event: The wx event.
        @type event:    wx event
        """

        # Get the default value.
        api = return_api()
        val = api.default_value('csa')

        # Call the user function.
        uf_store['value.set'](val=val, param='csa', spin_id='@N*')
コード例 #6
0
ファイル: auto_model_free.py プロジェクト: bopopescu/relax
    def value_set_csa(self, event=None):
        """Set the CSA via the value.set uf.

        @keyword event: The wx event.
        @type event:    wx event
        """

        # Get the default value.
        api = return_api()
        val = api.default_value('csa')

        # Call the user function.
        uf_store['value.set'](val=val, param='csa', spin_id='@N*')
コード例 #7
0
def monte_carlo_initial_values():
    """Set the initial simulation parameter values."""

    # Test if the current data pipe exists.
    check_pipe()

    # Test if simulations have been set up.
    if not hasattr(cdp, 'sim_state'):
        raise RelaxError("Monte Carlo simulations have not been set up.")

    # The specific analysis API object.
    api = return_api()

    # Set the initial parameter values.
    api.sim_init_values()
コード例 #8
0
ファイル: plotting.py プロジェクト: pombredanne/relax
def get_data_type(data_name=None):
    """Determine the type for the given data.

    @keyword data_name: The name of the data or variable to plot.
    @type data_name:    str
    @return:            The data type.
    @rtype:             Python type
    """

    # Sequence data.
    if data_name in ['res_num', 'spin_num']:
        return int

    # Analysis specific value returning functions.
    api = return_api()
    return api.data_type(data_name)
コード例 #9
0
def get_data_type(data_name=None):
    """Determine the type for the given data.

    @keyword data_name: The name of the data or variable to plot.
    @type data_name:    str
    @return:            The data type.
    @rtype:             Python type
    """

    # Sequence data.
    if data_name in ['res_num', 'spin_num']:
        return int

    # Analysis specific value returning functions.
    api = return_api()
    return api.data_type(data_name)
コード例 #10
0
def assemble_scaling_matrix(scaling=True):
    """Create and return the per-model scaling matrices.

    @keyword scaling:           If True, diagonal scaling is enabled during optimisation to allow the problem to be better conditioned.
    @type scaling:              bool
    @return:                    The list of diagonal and square scaling matrices.
    @rtype:                     list of numpy rank-2, float64 array or list of None
    """

    # The specific analysis API object and parameter object.
    api = return_api()
    param_object = return_parameter_object()

    # Initialise.
    scaling_matrix = []

    # Loop over the models.
    for model_info in api.model_loop():
        # No diagonal scaling.
        if not scaling:
            scaling_matrix.append(None)
            continue

        # Get the parameter names.
        names = api.get_param_names(model_info)

        # No parameters for this model.
        if names == None or len(names) == 0:
            scaling_matrix.append(None)
            continue

        # The parameter number.
        n = len(names)

        # Initialise.
        scaling_matrix.append(identity(n, float64))
        i = 0

        # Update the diagonal with the parameter specific scaling factor.
        for i in range(n):
            scaling_matrix[-1][i,
                               i] = param_object.scaling(names[i],
                                                         model_info=model_info)

    # Return the matrix.
    return scaling_matrix
コード例 #11
0
ファイル: plotting.py プロジェクト: pombredanne/relax
def get_functions(data_name=None):
    """Determine the specific functions for the given data type.

    @keyword data_name: The name of the data or variable to plot.
    @type data_name:    str
    @return:            The analysis specific return_value, return_conversion_factor, and data_type methods.
    @rtype:             tuple of methods or None
    """

    # Spin data.
    if data_name in ['res_num', 'spin_num']:
        return None, None

    # Analysis specific value returning functions.
    else:
        api = return_api()
        return api.return_value, api.return_conversion_factor
コード例 #12
0
def get_functions(data_name=None):
    """Determine the specific functions for the given data type.

    @keyword data_name: The name of the data or variable to plot.
    @type data_name:    str
    @return:            The analysis specific return_value, return_conversion_factor, and data_type methods.
    @rtype:             tuple of methods or None
    """

    # Spin data.
    if data_name in ['res_num', 'spin_num']:
        return None, None

    # Analysis specific value returning functions.
    else:
        api = return_api()
        return api.return_value, api.return_conversion_factor
コード例 #13
0
ファイル: minimise.py プロジェクト: pombredanne/relax
def assemble_scaling_matrix(scaling=True):
    """Create and return the per-model scaling matrices.

    @keyword scaling:           If True, diagonal scaling is enabled during optimisation to allow the problem to be better conditioned.
    @type scaling:              bool
    @return:                    The list of diagonal and square scaling matrices.
    @rtype:                     list of numpy rank-2, float64 array or list of None
    """

    # The specific analysis API object and parameter object.
    api = return_api()
    param_object = return_parameter_object()

    # Initialise.
    scaling_matrix = []

    # Loop over the models.
    for model_info in api.model_loop():
        # No diagonal scaling.
        if not scaling:
            scaling_matrix.append(None)
            continue

        # Get the parameter names.
        names = api.get_param_names(model_info)

        # No parameters for this model.
        if names == None or len(names) == 0:
            scaling_matrix.append(None)
            continue

        # The parameter number.
        n = len(names)

        # Initialise.
        scaling_matrix.append(identity(n, float64))
        i = 0

        # Update the diagonal with the parameter specific scaling factor.
        for i in range(n):
            scaling_matrix[-1][i, i] = param_object.scaling(names[i], model_info=model_info)

    # Return the matrix.
    return scaling_matrix
コード例 #14
0
def get_data_types():
    """Get all of the data types to plot for the current data pipe.

    @return:    A list of lists of all the allowable data type descriptions and their values.
    @rtype:     list of list of str
    """

    # The specific analysis API object.
    api = return_api()

    # Return an empty list if the required functions are absent.
    if not hasattr(api, 'data_names') or not hasattr(api, 'return_data_desc'):
        return []

    # The data names, if they exist.
    names = api.data_names(set='params')

    # Initialise the list and then add the sequence data.
    data = []
    data.append(["Spin sequence", 'spin'])

    # Loop over the parameters.
    for name in (api.data_names(set='params') + api.data_names(set='generic') +
                 api.data_names(set='min')):
        # Get the description.
        try:
            desc = api.return_data_desc(name)
        except:
            return []

        # No description.
        if not desc:
            text = name

        # The text.
        else:
            text = "'%s':  %s" % (name, desc)

        # Append the description.
        data.append([text, name])

    # Return the data.
    return data
コード例 #15
0
ファイル: grace.py プロジェクト: pombredanne/relax
def get_data_types():
    """Get all of the data types to plot for the current data pipe.

    @return:    A list of lists of all the allowable data type descriptions and their values.
    @rtype:     list of list of str
    """

    # The specific analysis API object.
    api = return_api()

    # Return an empty list if the required functions are absent.
    if not hasattr(api, 'data_names') or not hasattr(api, 'return_data_desc'):
        return []

    # The data names, if they exist.
    names = api.data_names(set='params')

    # Initialise the list and then add the sequence data.
    data = []
    data.append(["Spin sequence", 'spin'])

    # Loop over the parameters.
    for name in (api.data_names(set='params') + api.data_names(set='generic') + api.data_names(set='min')):
        # Get the description.
        try:
            desc = api.return_data_desc(name)
        except:
            return []

        # No description.
        if not desc:
            text = name

        # The text.
        else:
            text = "'%s':  %s" % (name, desc)

        # Append the description.
        data.append([text, name])

    # Return the data.
    return data
コード例 #16
0
def aic():
    """Calculate and store Akaike's Information Criterion (AIC) for each model."""

    # Checks.
    check_pipe()

    # The specific analysis API object.
    api = return_api()

    # Calculate the chi2.
    print(
        "Calculating the chi-squared value for the current parameter values.")
    api.calculate()

    # Loop over the base models.
    print("\nStoring the model statistics.")
    for model_info in api.model_loop():
        # Title printout.
        api.print_model_title(model_info=model_info)

        # Get the model statistics.
        k, n, chi2 = api.model_statistics(model_info=model_info)

        # Calculate the AIC value.
        aic = chi2 + 2.0 * k

        # The model container.
        container = api.get_model_container(model_info=model_info)

        # Store the statistics.
        container.chi2 = chi2
        container.num_params = k
        container.aic = aic

        # Statistics printout.
        data = [["Chi-squared value:", "%20f" % chi2],
                ["Number of parameters (k):",
                 "%20i" % k],
                ["Akaike's Information Criterion (AIC):",
                 "%20f" % aic]]
        write_data(out=sys.stdout, data=data)
コード例 #17
0
ファイル: statistics.py プロジェクト: pombredanne/relax
def aic():
    """Calculate and store Akaike's Information Criterion (AIC) for each model."""

    # Checks.
    check_pipe()

    # The specific analysis API object.
    api = return_api()

    # Calculate the chi2.
    print("Calculating the chi-squared value for the current parameter values.")
    api.calculate()

    # Loop over the base models.
    print("\nStoring the model statistics.")
    for model_info in api.model_loop():
        # Title printout.
        api.print_model_title(model_info=model_info)

        # Get the model statistics.
        k, n, chi2 = api.model_statistics(model_info=model_info)

        # Calculate the AIC value.
        aic = chi2 + 2.0*k

        # The model container.
        container = api.get_model_container(model_info=model_info)

        # Store the statistics.
        container.chi2 = chi2
        container.num_params = k
        container.aic = aic

        # Statistics printout.
        data = [
            ["Chi-squared value:", "%20f" % chi2],
            ["Number of parameters (k):", "%20i" % k],
            ["Akaike's Information Criterion (AIC):", "%20f" % aic]
        ]
        write_data(out=sys.stdout, data=data)
コード例 #18
0
def read(file=None, dir=None, version=None, sample_conditions=None):
    """Read the contents of a BMRB NMR-STAR formatted file.

    @keyword file:              The name of the BMRB STAR formatted file.
    @type file:                 str
    @keyword dir:               The directory where the file is located.
    @type dir:                  None or str
    @keyword version:           The BMRB version to force the reading.
    @type version:              None or str
    @keyword sample_conditions: The sample condition label to read.  Only one sample condition can be read per data pipe.
    @type sample_conditions:    None or str
    """

    # Test if bmrblib is installed.
    if not dep_check.bmrblib_module:
        raise RelaxNoModuleInstallError('BMRB library', 'bmrblib')

    # Test if the current data pipe exists.
    pipe_name = cdp_name()
    if not pipe_name:
        raise RelaxNoPipeError

    # Make sure that the data pipe is empty.
    if not ds[pipe_name].is_empty():
        raise RelaxError("The current data pipe is not empty.")

    # Get the full file path.
    file_path = get_file_path(file_name=file, dir=dir)

    # Fail if the file does not exist.
    if not access(file_path, F_OK):
        raise RelaxFileError(file_path)

    # Read the results.
    api = return_api(pipe_name=pipe_name)
    api.bmrb_read(file_path,
                  version=version,
                  sample_conditions=sample_conditions)
コード例 #19
0
def covariance_matrix(epsrel=0.0, verbosity=2):
    """Estimate model parameter errors via the covariance matrix technique.

    Note that the covariance matrix error estimate is always of lower quality than Monte Carlo simulations.


    @param epsrel:          Any columns of R which satisfy |R_{kk}| <= epsrel |R_{11}| are considered linearly-dependent and are excluded from the covariance matrix, where the corresponding rows and columns of the covariance matrix are set to zero.
    @type epsrel:           float
    @keyword verbosity:     The amount of information to print.  The higher the value, the greater the verbosity.
    @type verbosity:        int
    """

    # Test if the current data pipe exists.
    check_pipe()

    # The specific analysis API object.
    api = return_api()

    # Loop over the models.
    for model_info in api.model_loop():
        # Get the Jacobian and weighting matrix.
        jacobian, weights = api.covariance_matrix(model_info=model_info,
                                                  verbosity=verbosity)

        # Calculate the covariance matrix.
        pcov = statistics.multifit_covar(J=jacobian, weights=weights)

        # To compute one standard deviation errors on the parameters, take the square root of the diagonal covariance.
        sd = sqrt(diag(pcov))

        # Loop over the parameters.
        index = 0
        for name in api.get_param_names(model_info):
            # Set the parameter error.
            api.set_error(index, sd[index], model_info=model_info)

            # Increment the parameter index.
            index = index + 1
コード例 #20
0
ファイル: pymol_control.py プロジェクト: pombredanne/relax
def create_macro(data_type=None, style="classic", colour_start=None, colour_end=None, colour_list=None):
    """Create an array of PyMOL commands.

    @keyword data_type:     The data type to map to the structure.
    @type data_type:        str
    @keyword style:         The style of the macro.
    @type style:            str
    @keyword colour_start:  The starting colour of the linear gradient.
    @type colour_start:     str or RBG colour array (len 3 with vals from 0 to 1)
    @keyword colour_end:    The ending colour of the linear gradient.
    @type colour_end:       str or RBG colour array (len 3 with vals from 0 to 1)
    @keyword colour_list:   The colour list to search for the colour names.  Can be either 'molmol' or 'x11'.
    @type colour_list:      str or None
    @return:                The list of PyMOL commands.
    @rtype:                 list of str
    """

    # Get the specific macro.
    api = return_api()
    commands = api.pymol_macro(data_type, style, colour_start, colour_end, colour_list)

    # Return the macro commands.
    return commands
コード例 #21
0
def get_parameters():
    """Return a list of the parameters associated with the current data pipe.

    @return:    The list of parameters.
    @rtype:     list of str
    """

    # No data pipes.
    if cdp == None:
        return []

    # The specific analysis API object.
    api = return_api()

    # Return an empty list if the required functions are absent.
    if not hasattr(api, 'data_names') or not hasattr(api, 'return_data_desc'):
        return []

    # Loop over the parameters.
    params = []
    for name in (api.data_names(set='params') + api.data_names(set='generic') + api.data_names(set='min')):
        # Get the description.
        desc = api.return_data_desc(name)

        # No description.
        if not desc:
            text = name

        # The text.
        else:
            text = "'%s':  %s" % (name, desc)

        # Append the data as a list.
        params.append((text, name))

    # Return the data.
    return params
コード例 #22
0
def model_statistics():
    """Calculate and store the model statistics."""

    # Checks.
    check_pipe()

    # The specific analysis API object.
    api = return_api()

    # Calculate the chi2.
    print(
        "Calculating the chi-squared value for the current parameter values.")
    api.calculate()

    # Loop over the base models.
    print("\nStoring the model statistics.")
    for model_info in api.model_loop():
        # Title printout.
        api.print_model_title(model_info=model_info)

        # Get the model statistics.
        k, n, chi2 = api.model_statistics(model_info=model_info)

        # The model container.
        container = api.get_model_container(model_info=model_info)

        # Store the values.
        container.chi2 = chi2
        container.num_params = k
        container.num_data_points = n

        # Statistics printout.
        data = [['Chi-squared value:', "%20f" % chi2],
                ['Number of parameters (k):',
                 "%20i" % k], ['Number of data points (n):',
                               "%20i" % n]]
        write_data(out=sys.stdout, data=data)
コード例 #23
0
ファイル: value.py プロジェクト: pombredanne/relax
def get_parameters():
    """Return a list of the parameters associated with the current data pipe.

    @return:    The list of parameters.
    @rtype:     list of str
    """

    # No data pipes.
    if cdp == None:
        return []

    # The specific analysis API object.
    api = return_api()

    # Return an empty list if the required functions are absent.
    if not hasattr(api, 'data_names') or not hasattr(api, 'return_data_desc'):
        return []

    # Loop over the parameters.
    params = []
    for name in (api.data_names(set='params') + api.data_names(set='generic') + api.data_names(set='min')):
        # Get the description.
        desc = api.return_data_desc(name)

        # No description.
        if not desc:
            text = name

        # The text.
        else:
            text = "'%s':  %s" % (name, desc)

        # Append the data as a list.
        params.append((text, name))

    # Return the data.
    return params
コード例 #24
0
ファイル: statistics.py プロジェクト: pombredanne/relax
def model_statistics():
    """Calculate and store the model statistics."""

    # Checks.
    check_pipe()

    # The specific analysis API object.
    api = return_api()

    # Calculate the chi2.
    print("Calculating the chi-squared value for the current parameter values.")
    api.calculate()

    # Loop over the base models.
    print("\nStoring the model statistics.")
    for model_info in api.model_loop():
        # Title printout.
        api.print_model_title(model_info=model_info)

        # Get the model statistics.
        k, n, chi2 = api.model_statistics(model_info=model_info)

        # The model container.
        container = api.get_model_container(model_info=model_info)

        # Store the values.
        container.chi2 = chi2
        container.num_params = k
        container.num_data_points = n

        # Statistics printout.
        data = [
            ['Chi-squared value:', "%20f" % chi2],
            ['Number of parameters (k):', "%20i" % k],
            ['Number of data points (n):', "%20i" % n]
        ]
        write_data(out=sys.stdout, data=data)
コード例 #25
0
ファイル: bmrb.py プロジェクト: pombredanne/relax
def read(file=None, dir=None, version=None, sample_conditions=None):
    """Read the contents of a BMRB NMR-STAR formatted file.

    @keyword file:              The name of the BMRB STAR formatted file.
    @type file:                 str
    @keyword dir:               The directory where the file is located.
    @type dir:                  None or str
    @keyword version:           The BMRB version to force the reading.
    @type version:              None or str
    @keyword sample_conditions: The sample condition label to read.  Only one sample condition can be read per data pipe.
    @type sample_conditions:    None or str
    """

    # Test if bmrblib is installed.
    if not dep_check.bmrblib_module:
        raise RelaxNoModuleInstallError('BMRB library', 'bmrblib')

    # Test if the current data pipe exists.
    pipe_name = cdp_name()
    if not pipe_name:
        raise RelaxNoPipeError

    # Make sure that the data pipe is empty.
    if not ds[pipe_name].is_empty():
        raise RelaxError("The current data pipe is not empty.")

    # Get the full file path.
    file_path = get_file_path(file_name=file, dir=dir)

    # Fail if the file does not exist.
    if not access(file_path, F_OK):
        raise RelaxFileError(file_path)

    # Read the results.
    api = return_api(pipe_name=pipe_name)
    api.bmrb_read(file_path, version=version, sample_conditions=sample_conditions)
コード例 #26
0
def partition_params(val, param):
    """Function for sorting and partitioning the parameters and their values.

    The two major partitions are the tensor parameters and the spin specific parameters.

    @param val:     The parameter values.
    @type val:      None, number, or list of numbers
    @param param:   The parameter names.
    @type param:    None, str, or list of str
    @return:        A tuple, of length 4, of lists.  The first and second elements are the lists of
                    spin specific parameters and values respectively.  The third and forth elements
                    are the lists of all other parameters and their values.
    @rtype:         tuple of 4 lists
    """

    # The specific analysis API object.
    api = return_api()

    # Initialise.
    spin_params = []
    spin_values = []
    other_params = []
    other_values = []

    # Single parameter.
    if isinstance(param, str):
        # Spin specific parameter.
        if api.is_spin_param(param):
            params = spin_params
            values = spin_values

        # Other parameters.
        else:
            params = other_params
            values = other_values

        # List of values.
        if isinstance(val, list) or isinstance(val, ndarray):
            # Parameter name.
            for i in range(len(val)):
                params.append(param)

            # Parameter value.
            values = val

        # Single value.
        else:
            # Parameter name.
            params.append(param)

            # Parameter value.
            values.append(val)

    # Multiple parameters.
    elif isinstance(param, list):
        # Loop over all parameters.
        for i in range(len(param)):
            # Spin specific parameter.
            if api.is_spin_param(param[i]):
                params = spin_params
                values = spin_values

            # Other parameters.
            else:
                params = other_params
                values = other_values

            # Parameter name.
            params.append(param[i])

            # Parameter value.
            if isinstance(val, list) or isinstance(val, ndarray):
                values.append(val[i])
            else:
                values.append(val)


    # Return the partitioned parameters and values.
    return spin_params, spin_values, other_params, other_values
コード例 #27
0
ファイル: minimise.py プロジェクト: pombredanne/relax
def minimise(min_algor=None, line_search=None, hessian_mod=None, hessian_type=None, func_tol=None, grad_tol=None, max_iter=None, constraints=True, scaling=True, verbosity=1, sim_index=None):
    """Minimisation function.

    @keyword min_algor:         The minimisation algorithm to use.
    @type min_algor:            str
    @keyword line_search:       The line search algorithm which will only be used in combination with the line search and conjugate gradient methods.  This will default to the More and Thuente line search.
    @type line_search:          str or None
    @keyword hessian_mod:       The Hessian modification.  This will only be used in the algorithms which use the Hessian, and defaults to Gill, Murray, and Wright modified Cholesky algorithm.
    @type hessian_mod:          str or None
    @keyword hessian_type:      The Hessian type.  This will only be used in a few trust region algorithms, and defaults to BFGS.
    @type hessian_type:         str or None
    @keyword func_tol:          The function tolerance which, when reached, terminates optimisation.  Setting this to None turns of the check.
    @type func_tol:             None or float
    @keyword grad_tol:          The gradient tolerance which, when reached, terminates optimisation.  Setting this to None turns of the check.
    @type grad_tol:             None or float
    @keyword max_iter:          The maximum number of iterations for the algorithm.
    @type max_iter:             int
    @keyword constraints:       If True, constraints are used during optimisation.
    @type constraints:          bool
    @keyword scaling:           If True, diagonal scaling is enabled during optimisation to allow the problem to be better conditioned.
    @type scaling:              bool
    @keyword verbosity:         The amount of information to print.  The higher the value, the greater the verbosity.
    @type verbosity:            int
    @keyword sim_index:         The index of the simulation to optimise.  This should be None if normal optimisation is desired.
    @type sim_index:            None or int
    """

    # Test if the current data pipe exists.
    check_pipe()

    # The specific analysis API object.
    api = return_api()

    # Re-package the minimisation algorithm, options, and constraints for the generic_minimise() calls within the specific code.
    if constraints:
        min_options = [min_algor]

        # Determine the constraint algorithm to use.
        min_algor = api.constraint_algorithm()
    else:
        min_options = []
    if line_search != None:
        min_options.append(line_search)
    if hessian_mod != None:
        min_options.append(hessian_mod)
    if hessian_type != None:
        min_options.append(hessian_type)
    min_options = tuple(min_options)

    # Deselect spins lacking data:
    api.overfit_deselect()

    # Create the scaling matrix.
    scaling_matrix = assemble_scaling_matrix(scaling)

    # Get the Processor box singleton (it contains the Processor instance) and alias the Processor.
    processor_box = Processor_box() 
    processor = processor_box.processor

    # Single Monte Carlo simulation.
    if sim_index != None:
        # Reset the minimisation statistics.
        reset_min_stats(sim_index=sim_index, verbosity=verbosity)

        # Optimise.
        api.minimise(min_algor=min_algor, min_options=min_options, func_tol=func_tol, grad_tol=grad_tol, max_iterations=max_iter, constraints=constraints, scaling_matrix=scaling_matrix, verbosity=verbosity, sim_index=sim_index)

    # Monte Carlo simulation minimisation.
    elif hasattr(cdp, 'sim_state') and cdp.sim_state == 1:
        for i in range(cdp.sim_number):
            # Reset the minimisation statistics.
            reset_min_stats(sim_index=i, verbosity=verbosity)

            # Status.
            if status.current_analysis:
                status.auto_analysis[status.current_analysis].mc_number = i
            else:
                status.mc_number = i

            # Optimisation.
            api.minimise(min_algor=min_algor, min_options=min_options, func_tol=func_tol, grad_tol=grad_tol, max_iterations=max_iter, constraints=constraints, scaling_matrix=scaling_matrix, verbosity=verbosity-1, sim_index=i)

            # Print out.
            if verbosity and not processor.is_queued():
                print("Simulation " + repr(i+1))

        # Unset the status.
        if status.current_analysis:
            status.auto_analysis[status.current_analysis].mc_number = None
        else:
            status.mc_number = None

    # Standard minimisation.
    else:
        # Reset the minimisation statistics.
        reset_min_stats(verbosity=verbosity)

        # Optimise.
        api.minimise(min_algor=min_algor, min_options=min_options, func_tol=func_tol, grad_tol=grad_tol, max_iterations=max_iter, constraints=constraints, scaling_matrix=scaling_matrix, verbosity=verbosity)

    # Execute any queued commands.
    processor.run_queue()
コード例 #28
0
def monte_carlo_create_data(method=None, distribution=None, fixed_error=None):
    """Function for creating simulation data.

    @keyword method:        The type of Monte Carlo simulation to perform.
    @type method:           str
    @keyword distribution:  Which gauss distribution to draw errors from. Can be: 'measured', 'red_chi2', 'fixed'.
    @type distribution:     str
    @keyword fixed_error:   If distribution is set to 'fixed', use this value as the standard deviation for the gauss distribution.
    @type fixed_error:      float
    """

    # Test if the current data pipe exists.
    check_pipe()

    # Test if simulations have been set up.
    if not hasattr(cdp, 'sim_state'):
        raise RelaxError("Monte Carlo simulations have not been set up.")

    # Test the method argument.
    valid_methods = ['back_calc', 'direct']
    if method not in valid_methods:
        raise RelaxError("The simulation creation method " + repr(method) +
                         " is not valid.")

    # Test the distribution argument.
    valid_distributions = ['measured', 'red_chi2', 'fixed']
    if distribution not in valid_distributions:
        raise RelaxError("The simulation error distribution method " +
                         repr(distribution) +
                         " is not valid.  Try one of the following: " +
                         repr(valid_distributions))

    # Test the fixed_error argument.
    if fixed_error != None and distribution != 'fixed':
        raise RelaxError(
            "The argument 'fixed_error' is set to a value, but the argument 'distribution' is not set to 'fixed'."
        )

    # Test the distribution argument, equal to 'fixed', but no error is set.
    if distribution == 'fixed' and fixed_error == None:
        raise RelaxError(
            "The argument 'distribution' is set to 'fixed', but you have not provided a value to the argument 'fixed_error'."
        )

    # The specific analysis API object.
    api = return_api()

    # Loop over the models.
    for data_index in api.base_data_loop():
        # Create the Monte Carlo data.
        if method == 'back_calc':
            data = api.create_mc_data(data_index)

        # Get the original data.
        else:
            data = api.return_data(data_index)

        # No data, so skip.
        if data == None:
            continue

        # Possible get the errors from reduced chi2 distribution.
        if distribution == 'red_chi2':
            error_red_chi2 = api.return_error_red_chi2(data_index)

        # Get the errors.
        error = api.return_error(data_index)

        # List type data.
        if isinstance(data, list) or isinstance(data, ndarray):
            # Loop over the Monte Carlo simulations.
            random = []
            for j in range(cdp.sim_number):
                # Randomise the data.
                random.append([])
                for k in range(len(data)):
                    # No data or errors.
                    if data[k] == None or error[k] == None:
                        random[j].append(None)
                        continue

                    # Gaussian randomisation.
                    if distribution == 'fixed':
                        random[j].append(gauss(data[k], float(fixed_error)))

                    else:
                        random[j].append(gauss(data[k], error[k]))

        # Dictionary type data.
        if isinstance(data, dict):
            # Loop over the Monte Carlo simulations.
            random = []
            for j in range(cdp.sim_number):
                # Randomise the data.
                random.append({})
                for id in data:
                    # No data or errors.
                    if data[id] == None or error[id] == None:
                        random[j][id] = None
                        continue

                    # If errors are drawn from the reduced chi2 distribution.
                    if distribution == 'red_chi2':
                        # Gaussian randomisation, centered at 0, with width of reduced chi2 distribution.
                        g_error = gauss(0.0, error_red_chi2[id])

                        # We need to scale the gauss error, before adding to datapoint.
                        new_point = data[id] + g_error * error[id]

                    # If errors are drawn from fixed distribution.
                    elif distribution == 'fixed':
                        # Gaussian randomisation, centered at data point, with width of fixed error.
                        new_point = gauss(data[id], float(fixed_error))

                    # If errors are drawn from measured values.
                    else:
                        # Gaussian randomisation, centered at data point, with width of measured error.
                        new_point = gauss(data[id], error[id])

                    # Assign datapoint the new value.
                    random[j][id] = new_point

        # Pack the simulation data.
        api.sim_pack_data(data_index, random)
コード例 #29
0
ファイル: local_min_search.py プロジェクト: tlinnet/relax
    Erdelyi, M., d'Auvergne E., Navarro-Vazquez, A., Leonov, A., and Griesinger, C. (2011) Dynamics of the Glycosidic Bond: Conformational Space of Lactose. Chemistry-A European Journal, 17(34), 9368-9376 (http://dx.doi.org/10.1002/chem.201100854).
"""


# Python imports.
from numpy import float64, zeros
from numpy.linalg import norm
from random import uniform

# relax imports.
from data_store import Relax_data_store; ds = Relax_data_store()
from specific_analyses.api import return_api


# The specific analysis API object.
api = return_api(analysis_type='N-state')

# Loop over random positions.
for rand_index in range(200):
    # Reset.
    reset()

    # Create the datapipe.
    pipe.create('lactose', 'N-state')

    # Read the results file.
    results.read('results_fixed_rdc+pcs')


    # Random starts.
    ################
コード例 #30
0
ファイル: minimise.py プロジェクト: pombredanne/relax
def grid_setup(lower=None, upper=None, inc=None, verbosity=1, skip_preset=True):
    """Determine the per-model grid bounds, allowing for the zooming grid search.

    @keyword lower:         The user supplied lower bounds of the grid search which must be equal to the number of parameters in the model.
    @type lower:            list of numbers
    @keyword upper:         The user supplied upper bounds of the grid search which must be equal to the number of parameters in the model.
    @type upper:            list of numbers
    @keyword inc:           The user supplied grid search increments.
    @type inc:              int or list of int
    @keyword verbosity:     The amount of information to print.  The higher the value, the greater the verbosity.
    @type verbosity:        int
    @keyword skip_preset:   This argument, when True, allows any parameter which already has a value set to be skipped in the grid search.
    @type skip_preset:      bool
    @return:                The per-model grid upper and lower bounds.  The first dimension of each structure corresponds to the model, the second the model parameters.
    @rtype:                 tuple of lists of lists of float, lists of lists of float, list of lists of int
    """

    # The specific analysis API object and parameter object.
    api = return_api()
    param_object = return_parameter_object()

    # Initialise.
    model_lower = []
    model_upper = []
    model_inc = []

    # Loop over the models.
    for model_info in api.model_loop():
        # Get the parameter names and current values.
        names = api.get_param_names(model_info)
        values = api.get_param_values(model_info)

        # No parameters for this model.
        if names == None or len(names) == 0:
            model_lower.append([])
            model_upper.append([])
            model_inc.append([])
            continue

        # The parameter number.
        n = len(names)

        # Make sure that the length of the parameter array is > 0.
        if n == 0:
            raise RelaxError("Cannot run a grid search on a model with zero parameters.")

        # Check that the user supplied bound lengths are ok.
        if lower != None and len(lower) != n:
            raise RelaxLenError('lower bounds', n)
        if upper != None and len(upper) != n:
            raise RelaxLenError('upper bounds', n)

        # Check the user supplied increments.
        if isinstance(inc, list) and len(inc) != n:
            raise RelaxLenError('increment', n)
        if isinstance(inc, list):
            for i in range(n):
                if not (isinstance(inc[i], int) or inc[i] == None):
                    raise RelaxIntListIntError('increment', inc)
        elif not isinstance(inc, int):
            raise RelaxIntListIntError('increment', inc)

        # Convert to the model increment list.
        if isinstance(inc, int):
            model_inc.append([inc]*n)
        else:
            model_inc.append(inc)

        # Print out the model title.
        api.print_model_title(prefix="Grid search setup:  ", model_info=model_info)

        # The grid zoom level.
        zoom = 0
        if hasattr(cdp, 'grid_zoom_level'):
            zoom = cdp.grid_zoom_level
        zoom_factor = 1.0 / 2.0**zoom
        if zoom > 0:
            print("Zooming grid level of %s, scaling the grid size by a factor of %s.\n" % (zoom, zoom_factor))

        # Append empty lists for the bounds to be built up.
        model_lower.append([])
        model_upper.append([])

        # Loop over the parameters.
        data = []
        for i in range(n):
            # A comment for user feedback.
            comment = 'Default bounds'
            if lower != None and upper != None:
                comment = 'User supplied lower and upper bound'
            elif lower != None:
                comment = 'User supplied lower bound'
            elif upper != None:
                comment = 'User supplied upper bound'

            # Alias the number of increments for this parameter.
            incs = model_inc[-1][i]

            # Error checking for increment values of None.
            if incs == None and values[i] in [None, {}, []]:
                raise RelaxError("The parameter '%s' has no preset value, therefore a grid increment of None is not valid." % names[i])

            # The lower bound for this parameter.
            if lower != None:
                lower_i = lower[i]
            else:
                lower_i = param_object.grid_lower(names[i], incs=incs, model_info=model_info)

            # The upper bound for this parameter.
            if upper != None:
                upper_i = upper[i]
            else:
                upper_i = param_object.grid_upper(names[i], incs=incs, model_info=model_info)

            # The skipping logic.
            skip = False
            if skip_preset:
                # Override the flag if the zoom is on.
                if zoom:
                    skip = False

                # No preset value.
                elif values[i] in [None, {}, []]:
                    skip = False

                # The preset value is a NaN value due to numpy conversions of None.
                elif isNaN(values[i]):
                    skip = False

                # Ok, now the parameter can be skipped.
                else:
                    skip = True

            # Override the skip flag if the incs value is None.
            if incs == None:
                skip = True

            # Skip preset values.
            if skip:
                lower_i = values[i]
                upper_i = values[i]
                model_inc[-1][i] = incs = 1
                comment = 'Preset value'

            # Zooming grid.
            elif zoom:
                # The full size and scaled size.
                size = upper_i - lower_i
                zoom_size = size * zoom_factor
                half_size = zoom_size / 2.0
                comment = 'Zoom grid width of %s %s' % (zoom_size, param_object.units(names[i]))

                # The new size around the current value.
                lower_zoom = values[i] - half_size
                upper_zoom = values[i] + half_size

                # Outside of the original lower bound, so shift the grid to fit.
                if zoom > 0 and lower_zoom < lower_i:
                    # The amount to shift by.
                    shift = lower_i - lower_zoom

                    # Set the new bounds.
                    upper_i = upper_zoom + shift

                # Outside of the original upper bound, so shift the grid to fit.
                elif zoom > 0 and upper_zoom > upper_i:
                    # The amount to shift by.
                    shift = upper_i - upper_zoom

                    # Set the new bounds.
                    lower_i = lower_zoom + shift

                # Inside the original bounds.
                else:
                    lower_i = lower_zoom
                    upper_i = upper_zoom

            # Add to the data list for printing out.
            data.append([names[i], "%15s" % lower_i, "%15s" % upper_i, "%15s" % incs, comment])

            # Scale the bounds.
            scaling = param_object.scaling(names[i], model_info=model_info)
            lower_i /= scaling
            upper_i /= scaling

            # Append.
            model_lower[-1].append(lower_i)
            model_upper[-1].append(upper_i)

        # Printout.
        if verbosity:
            write_data(out=sys.stdout, headings=["Parameter", "Lower bound", "Upper bound", "Increments", "Comment"], data=data)
            sys.stdout.write('\n')

    # Return the bounds.
    return model_lower, model_upper, model_inc
コード例 #31
0
ファイル: value.py プロジェクト: pombredanne/relax
def partition_params(val, param):
    """Function for sorting and partitioning the parameters and their values.

    The two major partitions are the tensor parameters and the spin specific parameters.

    @param val:     The parameter values.
    @type val:      None, number, or list of numbers
    @param param:   The parameter names.
    @type param:    None, str, or list of str
    @return:        A tuple, of length 4, of lists.  The first and second elements are the lists of
                    spin specific parameters and values respectively.  The third and forth elements
                    are the lists of all other parameters and their values.
    @rtype:         tuple of 4 lists
    """

    # The specific analysis API object.
    api = return_api()

    # Initialise.
    spin_params = []
    spin_values = []
    other_params = []
    other_values = []

    # Single parameter.
    if isinstance(param, str):
        # Spin specific parameter.
        if api.is_spin_param(param):
            params = spin_params
            values = spin_values

        # Other parameters.
        else:
            params = other_params
            values = other_values

        # List of values.
        if isinstance(val, list) or isinstance(val, ndarray):
            # Parameter name.
            for i in range(len(val)):
                params.append(param)

            # Parameter value.
            values = val

        # Single value.
        else:
            # Parameter name.
            params.append(param)

            # Parameter value.
            values.append(val)

    # Multiple parameters.
    elif isinstance(param, list):
        # Loop over all parameters.
        for i in range(len(param)):
            # Spin specific parameter.
            if api.is_spin_param(param[i]):
                params = spin_params
                values = spin_values

            # Other parameters.
            else:
                params = other_params
                values = other_values

            # Parameter name.
            params.append(param[i])

            # Parameter value.
            if isinstance(val, list) or isinstance(val, ndarray):
                values.append(val[i])
            else:
                values.append(val)


    # Return the partitioned parameters and values.
    return spin_params, spin_values, other_params, other_values
コード例 #32
0
ファイル: lactose_n_state.py プロジェクト: pombredanne/relax
# Load the lanthanide atoms.
self._execute_uf(uf_name='structure.load_spins', spin_id='@C1', ave_pos=False)

# Switch back to the main analysis data pipe.
self._execute_uf(uf_name='pipe.switch', pipe_name='lactose')

# Calculate the paramagnetic centre (from the structures in the 'tag' data pipe).
self._execute_uf(uf_name='paramag.centre', atom_id=':4@C1', pipe='tag')

# Set up the model.
self._execute_uf(uf_name='n_state_model.select_model', model=ds.model)

# Set to equal probabilities.
if ds.model == 'population':
    for j in range(NUM_STR):
        self._execute_uf(uf_name='value.set', val=1.0/NUM_STR, param='probs', index=j)

# Minimisation.
self._execute_uf('bfgs', constraints=True, max_iter=5, uf_name='minimise.execute')

# Calculate the AIC value.
api = return_api('N-state')
k, n, chi2 = api.model_statistics()
ds[ds.current_pipe].aic = chi2 + 2.0*k

# Write out a results file.
self._execute_uf(uf_name='results.write', file='devnull', force=True)

# Show the tensors.
self._execute_uf(uf_name='align_tensor.display')
コード例 #33
0
#                                                                             #
###############################################################################

# Module docstring.
"""The pipe user function definitions."""

# relax module imports.
from graphics import WIZARD_IMAGE_PATH
from pipe_control import pipes
from specific_analyses.api import return_api
from user_functions.data import Uf_info
uf_info = Uf_info()
from user_functions.objects import Desc_container

# The hybrid API object.
hybrid_obj = return_api('hybrid')

# The user function class.
uf_class = uf_info.add_class('pipe')
uf_class.title = "Class holding the user functions for manipulating data pipes."
uf_class.menu_text = "&pipe"
uf_class.gui_icon = "relax.pipe"

# The pipe.bundle user function.
uf = uf_info.add_uf('pipe.bundle')
uf.title = "The grouping of data pipes into a bundle."
uf.title_short = "Data pipe bundling."
uf.add_keyarg(
    name="bundle",
    basic_types=["str"],
    desc_short="pipe bundle",
コード例 #34
0
ファイル: model_selection.py プロジェクト: pombredanne/relax
def select(method=None, modsel_pipe=None, bundle=None, pipes=None):
    """Model selection function.

    @keyword method:        The model selection method.  This can currently be one of:
                                - 'AIC', Akaike's Information Criteria.
                                - 'AICc', Small sample size corrected AIC.
                                - 'BIC', Bayesian or Schwarz Information Criteria.
                                - 'CV', Single-item-out cross-validation.
                            None of the other model selection techniques are currently supported.
    @type method:           str
    @keyword modsel_pipe:   The name of the new data pipe to be created by copying of the selected data pipe.
    @type modsel_pipe:      str
    @keyword bundle:        The optional data pipe bundle to associate the newly created pipe with.
    @type bundle:           str or None
    @keyword pipes:         A list of the data pipes to use in the model selection.
    @type pipes:            list of str
    """

    # Test if the pipe already exists.
    if has_pipe(modsel_pipe):
        raise RelaxPipeError(modsel_pipe)

    # Use all pipes.
    if pipes == None:
        # Get all data pipe names from the relax data store.
        pipes = pipe_names()

    # Select the model selection technique.
    if method == 'AIC':
        print("AIC model selection.")
        formula = aic
    elif method == 'AICc':
        print("AICc model selection.")
        formula = aicc
    elif method == 'BIC':
        print("BIC model selection.")
        formula = bic
    elif method == 'CV':
        print("CV model selection.")
        raise RelaxError("The model selection technique " + repr(method) + " is not currently supported.")
    else:
        raise RelaxError("The model selection technique " + repr(method) + " is not currently supported.")

    # No pipes.
    if len(pipes) == 0:
        raise RelaxError("No data pipes are available for use in model selection.")

    # Initialise.
    function_type = {}
    model_loop = {}
    model_type = {}
    duplicate_data = {}
    model_statistics = {}
    skip_function = {}
    modsel_pipe_exists = False

    # Cross validation setup.
    if isinstance(pipes[0], list):
        # No pipes.
        if len(pipes[0]) == 0:
            raise RelaxError("No pipes are available for use in model selection in the array " + repr(pipes[0]) + ".")

        # Loop over the data pipes.
        for i in range(len(pipes)):
            for j in range(len(pipes[i])):
                # The specific analysis API object.
                api = return_api(pipe_name=pipes[i][j])

                # Store the specific functions.
                model_loop[pipes[i][j]] = api.model_loop
                model_type[pipes[i][j]] = api.model_type
                duplicate_data[pipes[i][j]] = api.duplicate_data
                model_statistics[pipes[i][j]] = api.model_statistics
                skip_function[pipes[i][j]] = api.skip_function

        # The model loop should be the same for all data pipes!
        for i in range(len(pipes)):
            for j in range(len(pipes[i])):
                if model_loop[pipes[0][j]] != model_loop[pipes[i][j]]:
                    raise RelaxError("The models for each data pipes should be the same.")

        # Alias some function from the specific API of the first data pipe.
        api = return_api(pipe_name=pipes[0][0])
        model_loop = api.model_loop
        model_desc = api.model_desc

        # Global vs. local models.
        global_flag = False
        for i in range(len(pipes)):
            for j in range(len(pipes[i])):
                if model_type[pipes[i][j]]() == 'global':
                    global_flag = True

    # All other model selection setup.
    else:
        # Loop over the data pipes.
        for i in range(len(pipes)):
            # The specific analysis API object.
            api = return_api()

            # Store the specific functions.
            model_loop[pipes[i]] = api.model_loop
            model_type[pipes[i]] = api.model_type
            duplicate_data[pipes[i]] = api.duplicate_data
            model_statistics[pipes[i]] = api.model_statistics
            skip_function[pipes[i]] = api.skip_function

        # Alias some function from the specific API of the first data pipe.
        api = return_api(pipe_name=pipes[0])
        model_loop = api.model_loop
        model_desc = api.model_desc

        # Global vs. local models.
        global_flag = False
        for j in range(len(pipes)):
            if model_type[pipes[j]]() == 'global':
                global_flag = True


    # Loop over the base models.
    for model_info in model_loop():
        # Print out.
        print("\n")
        desc = model_desc(model_info)
        if desc:
            print(desc)

        # Initial model.
        best_model = None
        best_crit = 1e300
        data = []

        # Loop over the pipes.
        for j in range(len(pipes)):
            # Single-item-out cross validation.
            if method == 'CV':
                # Sum of chi-squared values.
                sum_crit = 0.0

                # Loop over the validation samples and sum the chi-squared values.
                for k in range(len(pipes[j])):
                    # Alias the data pipe name.
                    pipe = pipes[j][k]

                    # Switch to this pipe.
                    switch(pipe)

                    # Skip function.
                    if skip_function[pipe](model_info):
                        continue

                    # Get the model statistics.
                    k, n, chi2 = model_statistics[pipe](model_info)

                    # Missing data sets.
                    if k == None or n == None or chi2 == None:
                        continue

                    # Chi2 sum.
                    sum_crit = sum_crit + chi2

                # Cross-validation criterion (average chi-squared value).
                crit = sum_crit / float(len(pipes[j]))

            # Other model selection methods.
            else:
                # Reassign the pipe.
                pipe = pipes[j]

                # Switch to this pipe.
                switch(pipe)

                # Skip function.
                if skip_function[pipe](model_info):
                    continue

                # Get the model statistics.
                k, n, chi2 = model_statistics[pipe](model_info, global_stats=global_flag)

                # Missing data sets.
                if k == None or n == None or chi2 == None:
                    continue

                # Calculate the criterion value.
                crit = formula(chi2, float(k), float(n))

                # Store the values for a later printout.
                data.append([pipe, repr(k), repr(n), "%.5f" % chi2, "%.5f" % crit])

            # Select model.
            if crit < best_crit:
                best_model = pipe
                best_crit = crit

        # Write out the table.
        write_data(out=sys.stdout, headings=["Data pipe", "Num_params_(k)", "Num_data_sets_(n)", "Chi2", "Criterion"], data=data)

        # Duplicate the data from the 'best_model' to the model selection data pipe.
        if best_model != None:
            # Print out of selected model.
            print("The model from the data pipe " + repr(best_model) + " has been selected.")

            # Switch to the selected data pipe.
            switch(best_model)

            # Duplicate.
            duplicate_data[best_model](best_model, modsel_pipe, model_info, global_stats=global_flag, verbose=False)

            # Model selection pipe now exists.
            modsel_pipe_exists = True

        # No model selected.
        else:
            # Print out of selected model.
            print("No model has been selected.")

    # Switch to the model selection pipe.
    if modsel_pipe_exists:
        switch(modsel_pipe)

    # Bundle the data pipe.
    if bundle:
        pipe_control.pipes.bundle(bundle=bundle, pipe=modsel_pipe)
コード例 #35
0
ファイル: plotting.py プロジェクト: pombredanne/relax
def axis_setup(data_type=None, norm=True):
    """Determine the axis information for relax data store specific data.

    @keyword data_type: The axis data category (in the [X, Y] list format).
    @type data_type:    list of str
    @keyword norm:      The normalisation flag which if set to True will cause all graphs to be normalised to a starting value of 1.
    @type norm:         bool
    @return:            The axis information.  This includes the sequence type, the list of lower bounds, the list of upper bounds, and the axis labels.
    @rtype:             list of str or None, list of int or None, list of int or None, list of str or None
    """

    # Axis specific settings.
    axes = ['x', 'y']
    seq_type = [None, None]
    axis_labels = [None, None]
    for i in range(2):
        # Determine the sequence data type.
        if data_type[i] == 'res_num':
            seq_type[i] = 'res'

        # Analysis specific methods for making labels.
        analysis_spec = False
        if cdp_name():
            # Flag for making labels.
            analysis_spec = True

            # The specific analysis API object.
            api = return_api()

        # Some axis default values for spin data.
        if data_type[i] == 'res_num':
            # Residue only data.
            if seq_type[i] == 'res':
                # X-axis label.
                if not axis_labels[i]:
                    axis_labels[i] = "Residue number"

            # Spin only data.
            if seq_type[i] == 'spin':
                # X-axis label.
                if not axis_labels[i]:
                    axis_labels[i] = "Spin number"

            # Mixed data.
            if seq_type[i] == 'mixed':
                # X-axis label.
                if not axis_labels[i]:
                    axis_labels[i] = "Spin identification string"

        # Some axis default values for other data types.
        else:
            # Label.
            if analysis_spec and not axis_labels[i]:
                # Get the units.
                units = api.return_units(data_type[i])

                # Set the label.
                axis_labels[i] = api.return_grace_string(data_type[i])

                # Add units.
                if units:
                    axis_labels[i] = axis_labels[i] + "\\N (" + units + ")"

                # Normalised data.
                if norm and axes[i] == 'y':
                    axis_labels[i] = axis_labels[i] + " \\N\\q(normalised)\\Q"

    # Return the data.
    return seq_type, axis_labels
        if "round_" in rd:
            dir_model_round = mdir + os.sep + rd + os.sep + 'opt'
            if os.path.isdir(dir_model_round):
                # Create pipe to read data
                pipe_name_rnd = "%s_%s" % (model, rd)
                pipe.create(pipe_name_rnd, 'mf', bundle="temp")
                results.read(file='results', dir=dir_model_round)

                # Get info
                round_i = rd.split("_")[-1]
                cdp_iter = str(cdp.iter)
                chi2 = str(cdp.chi2)
                tm = str(cdp.diff_tensor.tm)

                # Get the api to get number of parameters
                api = return_api(pipe_name=pipe_name)
                model_loop = api.model_loop
                model_desc = api.model_desc
                model_statistics = api.model_statistics

                for model_info in model_loop():
                    desc = model_desc(model_info)
                    # Num_params_(k)
                    # Num_data_sets_(n)
                    k_glob, n_glob, chi2_glob = model_statistics(
                        model_info, global_stats=True)
                    break

                k_glob = str(k_glob)
                n_glob = str(n_glob)
                chi2_glob = str(chi2_glob)
コード例 #37
0
ファイル: opendx.py プロジェクト: pombredanne/relax
    def __init__(self, params, spin_id, inc, lower, upper, axis_incs, file_prefix, dir, point, point_file, chi_surface, create_par_file):
        """Map the space upon class instantiation."""

        # Initialise.
        #############

        # Function arguments.
        self.params = params
        self.spin_id = spin_id
        self.n = len(params)
        self.inc = inc
        self.axis_incs = axis_incs
        self.file_prefix = file_prefix
        self.dir = dir
        self.point_file = point_file

        # Define nested listed, which holds parameter values and chi2 value.
        self.par_chi2_vals = []

        # The specific analysis API object.
        self.api = return_api()

        # Points.
        if point != None:
            # Check if list is a nested list of lists.
            point_list = []
            if isinstance(point[0], float):
                point_list.append(array(point, float64))
                self.point = point_list
                self.num_points = 1
            else:
                for i in range(len(point)):
                    point_list.append(array(point[i], float64))
                self.point = point_list
                self.num_points = i + 1
        else:
            self.num_points = 0

        # Get the default map bounds.
        self.bounds = zeros((self.n, 2), float64)
        for i in range(self.n):
            # Get the bounds for the parameter i.
            bounds = self.api.map_bounds(self.params[i], self.spin_id)

            # No bounds found.
            if not bounds:
                raise RelaxError("No bounds for the parameter " + repr(self.params[i]) + " could be determined.")

            # Assign the bounds to the global data structure.
            self.bounds[i] = bounds

        # Lower bounds.
        if lower != None:
            self.bounds[:, 0] = array(lower, float64)

        # Upper bounds.
        if upper != None:
            self.bounds[:, 1] = array(upper, float64)

        # Setup the step sizes.
        self.step_size = zeros(self.n, float64)
        self.step_size = (self.bounds[:, 1] - self.bounds[:, 0]) / self.inc


        # Create all the OpenDX data and files.
        #######################################

        # Get the date.
        self.get_date()

        # Create the strings associated with the map axes.
        self.map_axes()

        # Generate the map.
        self.create_map()

        ## Generate the file with parameters and associated chi2 value.
        if create_par_file:
            self.create_par_chi2(file_prefix=self.file_prefix, par_chi2_vals=self.par_chi2_vals)

            # Generate the matplotlib script file, to plot surfaces
            self.matplotlib_surface_plot()

        ## Generate the file with parameters and associated chi2 value for the points send to dx.
        if self.num_points >= 1 and create_par_file:
            # Calculate the parameter and associated chi2 values for the points.
            par_chi2_vals = self.calc_point_par_chi2()

            ## Generate the file with parameters and associated chi2 value.
            self.create_par_chi2(file_prefix=self.point_file, par_chi2_vals=par_chi2_vals)

        # Default the chi2 surface values, for Innermost, Inner, Middle and Outer Isosurface.
        if chi_surface == None:
            all_chi2 = array(self.all_chi, float64)
            innermost = percentile(all_chi2, 10)
            inner = percentile(all_chi2, 20)
            middle = percentile(all_chi2, 50)
            outer = percentile(all_chi2, 90)
            chi_surface = [innermost, inner, middle, outer]

        # Create the OpenDX .net program file.
        write_program(file_prefix=self.file_prefix, point_file=self.point_file, dir=self.dir, inc=self.inc, N=self.n, num_points=self.num_points, labels=self.labels, tick_locations=self.tick_locations, tick_values=self.tick_values, date=self.date, chi_surface = chi_surface)

        # Create the OpenDX .cfg program configuration file.
        write_config(file_prefix=self.file_prefix, dir=self.dir, date=self.date)

        # Create the OpenDX .general file.
        write_general(file_prefix=self.file_prefix, dir=self.dir, inc=self.inc)

        # Create the OpenDX .general and data files for the given point.
        if self.num_points >= 1:
            write_point(file_prefix=self.point_file, dir=self.dir, inc=self.inc, point=self.point, num_points=self.num_points, bounds=self.bounds, N=self.n)
コード例 #38
0
def write(file=None, dir=None, version='3.1', force=False):
    """Create a BMRB NMR-STAR formatted file.

    @keyword file:      The name of the file to create or a file object.
    @type file:         str or file object
    @keyword dir:       The optional directory to place the file into.  If set to 'pipe_name', then it will be placed in a directory with the same name as the current data pipe.
    @type dir:          str or None
    @keyword version:   The NMR-STAR version to create.  This can be either '2.1', '3.0', or '3.1'.
    @type version:      str
    @keyword force:     A flag which if True will allow a currently existing file to be overwritten.
    @type force:        bool
    """

    # Test if bmrblib is installed.
    if not dep_check.bmrblib_module:
        raise RelaxNoModuleInstallError('BMRB library', 'bmrblib')

    # Test if the current data pipe exists.
    pipe_name = cdp_name()
    if not pipe_name:
        raise RelaxNoPipeError

    # Check the file name.
    if file == None:
        raise RelaxError("The file name must be specified.")

    # A file object.
    if isinstance(file, str):
        # The special data pipe name directory.
        if dir == 'pipe_name':
            dir = pipe_name

        # Get the full file path.
        file = get_file_path(file, dir)

        # Fail if the file already exists and the force flag is False.
        if access(file, F_OK) and not force:
            raise RelaxFileOverwriteError(file, 'force flag')

        # Print out.
        print("Opening the file '%s' for writing." % file)

        # Create the directories.
        mkdir_nofail(dir, verbosity=0)

    # Get the info box.
    info = Info_box()

    # Add the relax citations.
    for id, key in zip(['relax_ref1', 'relax_ref2'],
                       ['dAuvergneGooley08a', 'dAuvergneGooley08b']):
        # Alias the bib entry.
        bib = info.bib[key]

        # Add.
        exp_info.citation(cite_id=id,
                          authors=bib.author2,
                          doi=bib.doi,
                          pubmed_id=bib.pubmed_id,
                          full_citation=bib.cite_short(doi=False, url=False),
                          title=bib.title,
                          status=bib.status,
                          type=bib.type,
                          journal_abbrev=bib.journal,
                          journal_full=bib.journal_full,
                          volume=bib.volume,
                          issue=bib.number,
                          page_first=bib.page_first,
                          page_last=bib.page_last,
                          year=bib.year)

    # Add the relax software package.
    exp_info.software(name=exp_info.SOFTWARE['relax'].name,
                      version=version_full(),
                      vendor_name=exp_info.SOFTWARE['relax'].authors,
                      url=exp_info.SOFTWARE['relax'].url,
                      cite_ids=['relax_ref1', 'relax_ref2'],
                      tasks=exp_info.SOFTWARE['relax'].tasks)

    # Execute the specific BMRB writing code.
    api = return_api(pipe_name=pipe_name)
    api.bmrb_write(file, version=version)

    # Add the file to the results file list.
    if isinstance(file, str):
        add_result_file(type='text', label='BMRB', file=file)
コード例 #39
0
ファイル: value.py プロジェクト: pombredanne/relax
def read(param=None, scaling=1.0, file=None, dir=None, file_data=None, spin_id_col=None, mol_name_col=None, res_num_col=None, res_name_col=None, spin_num_col=None, spin_name_col=None, data_col=None, error_col=None, sep=None, spin_id=None):
    """Read spin specific data values from a file.

    @keyword param:         The name of the parameter to read.
    @type param:            str
    @keyword scaling:       A scaling factor by which all read values are multiplied by.
    @type scaling:          float
    @keyword file:          The name of the file to open.
    @type file:             str
    @keyword dir:           The directory containing the file (defaults to the current directory if None).
    @type dir:              str or None
    @keyword file_data:     An alternative to opening a file, if the data already exists in the correct format.  The format is a list of lists where the first index corresponds to the row and the second the column.
    @type file_data:        list of lists
    @keyword spin_id_col:   The column containing the spin ID strings.  If supplied, the mol_name_col, res_name_col, res_num_col, spin_name_col, and spin_num_col arguments must be none.
    @type spin_id_col:      int or None
    @keyword mol_name_col:  The column containing the molecule name information.  If supplied, spin_id_col must be None.
    @type mol_name_col:     int or None
    @keyword res_name_col:  The column containing the residue name information.  If supplied, spin_id_col must be None.
    @type res_name_col:     int or None
    @keyword res_num_col:   The column containing the residue number information.  If supplied, spin_id_col must be None.
    @type res_num_col:      int or None
    @keyword spin_name_col: The column containing the spin name information.  If supplied, spin_id_col must be None.
    @type spin_name_col:    int or None
    @keyword spin_num_col:  The column containing the spin number information.  If supplied, spin_id_col must be None.
    @type spin_num_col:     int or None
    @keyword data_col:      The column containing the RDC data in Hz.
    @type data_col:         int or None
    @keyword error_col:     The column containing the RDC errors.
    @type error_col:        int or None
    @keyword sep:           The column separator which, if None, defaults to whitespace.
    @type sep:              str or None
    @keyword spin_id:       The spin ID string.
    @type spin_id:          None or str
    """

    # Test if the current pipe exists.
    check_pipe()

    # Test if sequence data is loaded.
    if not exists_mol_res_spin_data():
        raise RelaxNoSequenceError

        # Minimisation statistic flag.
        min_stat = False

        # Alias specific analysis API object method.
        api = return_api()
        return_value = api.return_value

        # Specific set function.                                                           
        set_fn = set

    # Test data corresponding to param already exists.
    for spin in spin_loop():
        # Skip deselected spins.
        if not spin.select:
            continue

        # Get the value and error.
        value, error = return_value(spin, param)

        # Data exists.
        if value != None or error != None:
            raise RelaxValueError(param)

    # Loop over the data.
    mol_names = []
    res_nums = []
    res_names = []
    spin_nums = []
    spin_names = []
    values = []
    errors = []
    for data in read_spin_data(file=file, dir=dir, file_data=file_data, spin_id_col=spin_id_col, mol_name_col=mol_name_col, res_num_col=res_num_col, res_name_col=res_name_col, spin_num_col=spin_num_col, spin_name_col=spin_name_col, data_col=data_col, error_col=error_col, sep=sep, spin_id=spin_id):
        # Unpack.
        if data_col and error_col:
            mol_name, res_num, res_name, spin_num, spin_name, value, error = data
        elif data_col:
            mol_name, res_num, res_name, spin_num, spin_name, value = data
            error = None
        else:
            mol_name, res_num, res_name, spin_num, spin_name, error = data
            value = None

        # Set the value.
        id = generate_spin_id_unique(mol_name=mol_name, res_num=res_num, res_name=res_name, spin_num=spin_num, spin_name=spin_name)
        set_fn(val=value, error=error, param=param, spin_id=id)

        # Append the data for printout.
        mol_names.append(mol_name)
        res_nums.append(res_num)
        res_names.append(res_name)
        spin_nums.append(spin_num)
        spin_names.append(spin_name)
        values.append(value)
        errors.append(error)

    # Print out.
    write_spin_data(file=sys.stdout, mol_names=mol_names, res_nums=res_nums, res_names=res_names, spin_nums=spin_nums, spin_names=spin_names, data=values, data_name=param, error=errors, error_name='%s_error'%param)

    # Reset the minimisation statistics.
    if api.set(param) == 'min':
        minimise.reset_min_stats()
コード例 #40
0
def set(val=None, param=None, index=None, pipe=None, spin_id=None, verbosity=1, error=False, force=True, reset=True):
    """Set global or spin specific data values.

    @keyword val:       The parameter values.
    @type val:          None or list
    @keyword param:     The parameter names.
    @type param:        None, str, or list of str
    @keyword index:     The index for parameters which are of the list-type.  This is ignored for all other types.
    @type index:        None or int
    @keyword pipe:      The data pipe the values should be placed in.
    @type pipe:         None or str
    @keyword spin_id:   The spin identification string.
    @type spin_id:      str
    @keyword verbosity: The amount of information to print.  The higher the value, the greater the verbosity.
    @type verbosity:    int
    @keyword error:     A flag which if True will allow the parameter errors to be set instead of the values.
    @type error:        bool
    @keyword force:     A flag forcing the overwriting of current values.
    @type force:        bool
    @keyword reset:     A flag which if True will cause all minimisation statistics to be reset.
    @type reset:        bool
    """

    # Switch to the data pipe, storing the original.
    if pipe:
        orig_pipe = pipes.cdp_name()
        pipes.switch(pipe)

    # Test if the current data pipe exists.
    check_pipe()

    # The specific analysis API object.
    api = return_api()

    # Convert numpy arrays to lists, if necessary.
    if isinstance(val, ndarray):
        val = val.tolist()

    # Invalid combinations.
    if (isinstance(val, float) or isinstance(val, int)) and param == None:
        raise RelaxError("The combination of a single value '%s' without specifying the parameter name is invalid." % val)
    if isinstance(val, list) and isinstance(param, str):
        raise RelaxError("Invalid combination:  When multiple values '%s' are specified, either no parameters or a list of parameters must by supplied rather than the single parameter '%s'." % (val, param))

    # Value array and parameter array of equal length.
    if isinstance(val, list) and isinstance(param, list) and len(val) != len(param):
        raise RelaxError("Both the value array and parameter array must be of equal length.")

    # Get the parameter list if needed.
    if param == None:
        param = api.get_param_names()

    # Convert the param to a list if needed.
    if not isinstance(param, list):
        param = [param]

    # Convert the value to a list if needed.
    if val != None and not isinstance(val, list):
        val = [val] * len(param)

    # Default values.
    if val == None:
        # Loop over the parameters, getting the default values.
        val = []
        for i in range(len(param)):
            val.append(api.default_value(param[i]))

            # Check that there is a default.
            if val[-1] == None:
                raise RelaxParamSetError(param[i])

    # Set the parameter values.
    api.set_param_values(param=param, value=val, index=index, spin_id=spin_id, error=error, force=force)

    # Reset all minimisation statistics.
    if reset:
        minimise.reset_min_stats(verbosity=verbosity)

    # Switch back.
    if pipe:
        pipes.switch(orig_pipe)
コード例 #41
0
def write_data(param=None, file=None, scaling=1.0, bc=False, return_value=None, return_data_desc=None, comment=None):
    """The function which actually writes the data.

    @keyword param:             The parameter to write.
    @type param:                str
    @keyword file:              The file to write the data to.
    @type file:                 str
    @keyword scaling:           The value to scale the parameter by.
    @type scaling:              float
    @keyword bc:                A flag which if True will cause the back calculated values to be written.
    @type bc:                   bool
    @keyword return_value:      An optional function which if supplied will override the default value returning function.
    @type return_value:         None or func
    @keyword return_data_desc:  An optional function which if supplied will override the default parameter description returning function.
    @type return_data_desc:     None or func
    @keyword comment:           Text which will be added to the start of the file as comments.  All lines will be prefixed by '# '.
    @type comment:              str
    """

    # The specific analysis API object.
    api = return_api()

    # Get the value and error returning function parameter description function if required.
    if not return_value:
        return_value = api.return_value
    if not return_data_desc:
        return_data_desc = api.return_data_desc

    # Format string.
    format = "%-30s%-30s"

    # Init the data.
    mol_names = []
    res_nums = []
    res_names = []
    spin_nums = []
    spin_names = []
    values = []
    errors = []

    # Get the parameter description and add it to the file.
    desc = return_data_desc(param)
    if desc:
        file.write("# Parameter description:  %s.\n" % desc)
        file.write("#\n")

    # The comments.
    if comment:
        # Split up the lines.
        lines = comment.splitlines()

        # Write out.
        for line in lines:
            file.write("# %s\n" % line)
        file.write("#\n")

    # Determine the data type, check the data, and set up the dictionary type data keys.
    data_names = 'value'
    error_names = 'error'
    data_type = None
    for spin, mol_name, res_num, res_name in spin_loop(full_info=True):
        # Get the value and error.
        value, error = return_value(spin, param, bc=bc)

        # Dictionary type data.
        if isinstance(value, dict):
            # Sanity check.
            if not data_type in [None, 'dict']:
                raise RelaxError("Mixed data types.")
            data_type = 'dict'

            # Initialise the structures.
            if not isinstance(data_names, list):
                data_names = []
                error_names = []

            # Sort the keys.
            keys = sorted(value.keys())

            # Loop over the keys.
            for key in keys:
                # Add the data and error names if new.
                if key not in data_names:
                    data_names.append(key)
                    error_names.append('sd(%s)' % key)

        # List type data.
        elif isinstance(value, list):
            # Sanity check.
            if not data_type in [None, 'list']:
                raise RelaxError("Mixed data types.")
            data_type = 'list'

            # Initialise the structures.
            if not isinstance(data_names, list):
                data_names = []
                error_names = []

            # Check the length.
            elif len(data_names) != len(value):
                raise RelaxError("The list type data has an inconsistent number of elements between different spin systems.")

            # Loop over the data.
            for i in range(len(value)):
                # The data and error names.
                data_names.append('value_%s' % i)
                error_names.append('error_%s' % i)

        # None.
        elif value == None:
            pass

        # Simple values.
        else:
            # Sanity check.
            if not data_type in [None, 'value']:
                raise RelaxError("Mixed data types.")
            data_type = 'value'

    # Pack the data.
    for spin, mol_name, res_num, res_name in spin_loop(full_info=True):
        # Get the value and error.
        value, error = return_value(spin, param, bc=bc)

        # Append the spin data (scaled).
        mol_names.append(mol_name)
        res_nums.append(res_num)
        res_names.append(res_name)
        spin_nums.append(spin.num)
        spin_names.append(spin.name)

        # Dictionary type data.
        if data_type == 'dict':
            # Initialise the lists.
            values.append([])
            errors.append([])

            # Loop over the keys.
            for key in data_names:
                # Append the scaled values and errors.
                if value == None or key not in value:
                    values[-1].append(None)
                else:
                    values[-1].append(scale(value[key], scaling))
                if error == None or key not in error:
                    errors[-1].append(None)
                else:
                    errors[-1].append(scale(error[key], scaling))

        # List type data.
        elif data_type == 'list':
            # Initialise the lists.
            values.append([])
            errors.append([])

            # Loop over the data.
            for i in range(len(data_names)):
                # Append the scaled values and errors.
                if value == None:
                    values[-1].append(None)
                else:
                    values[-1].append(scale(value[i], scaling))
                if error == None:
                    errors[-1].append(None)
                else:
                    errors[-1].append(scale(error[i], scaling))

        # Simple values.
        else:
            # Append the scaled values and errors.
            values.append(scale(value, scaling))
            errors.append(scale(error, scaling))

    # Replace all spaces " " with "_" in data_names list.
    if isinstance(data_names, list):
        for i, data_name in enumerate(data_names):
            data_str = data_name.replace(" ", "_")
            # Replace string.
            data_names[i] = data_str

    # Replace all spaces " " with "_" in error_names list.
    if isinstance(error_names, list):
        for i, error_name in enumerate(error_names):
            error_str = error_name.replace(" ", "_")
            # Replace string.
            error_names[i] = error_str

    # Write the data.
    write_spin_data(file, mol_names=mol_names, res_nums=res_nums, res_names=res_names, spin_nums=spin_nums, spin_names=spin_names, data=values, data_name=data_names, error=errors, error_name=error_names)
コード例 #42
0
ファイル: relax_data.py プロジェクト: pombredanne/relax
def back_calc(ri_id=None, ri_type=None, frq=None):
    """Back calculate the relaxation data.

    If no relaxation data currently exists, then the ri_id, ri_type, and frq args are required.


    @keyword ri_id:     The relaxation data ID string.  If not given, all relaxation data will be back calculated.
    @type ri_id:        None or str
    @keyword ri_type:   The relaxation data type.  This should be one of 'R1', 'R2', or 'NOE'.
    @type ri_type:      None or str
    @keyword frq:       The spectrometer proton frequency in Hz.
    @type frq:          None or float
    """

    # Test if the current pipe exists.
    check_pipe()

    # Test if sequence data is loaded.
    if not exists_mol_res_spin_data():
        raise RelaxNoSequenceError

    # Check that ri_type and frq are supplied if no relaxation data exists.
    if ri_id and (not hasattr(cdp, 'ri_ids') or ri_id not in cdp.ri_ids) and (ri_type == None or frq == None):
        raise RelaxError("The 'ri_type' and 'frq' arguments must be supplied as no relaxation data corresponding to '%s' exists." % ri_id)

    # Check if the type is valid.
    if ri_type and ri_type not in VALID_TYPES:
        raise RelaxError("The relaxation data type '%s' must be one of %s." % (ri_type, VALID_TYPES))

    # Frequency checks.
    frequency_checks(frq)

    # Initialise the global data for the current pipe if necessary.
    if not hasattr(cdp, 'ri_type'):
        cdp.ri_type = {}
    if not hasattr(cdp, 'ri_ids'):
        cdp.ri_ids = []

    # Update the global data if needed.
    if ri_id and ri_id not in cdp.ri_ids:
        cdp.ri_ids.append(ri_id)
        cdp.ri_type[ri_id] = ri_type
        set_frequency(id=ri_id, frq=frq)

    # The specific analysis API object.
    api = return_api()

    # The IDs to loop over.
    if ri_id == None:
        ri_ids = cdp.ri_ids
    else:
        ri_ids = [ri_id]

    # The data types.
    if ri_type == None:
        ri_types = cdp.ri_type
    else:
        ri_types = {ri_id: ri_type}

    # The frequencies.
    if frq == None:
        frqs = cdp.spectrometer_frq
    else:
        frqs = {ri_id: frq}

    # Loop over the spins.
    for spin, spin_id in spin_loop(return_id=True):
        # Skip deselected spins.
        if not spin.select:
            continue

        # The global index.
        spin_index = find_index(spin_id)

        # Initialise the spin data if necessary.
        if not hasattr(spin, 'ri_data_bc'):
            spin.ri_data_bc = {}

        # Back-calculate the relaxation value.
        for ri_id in ri_ids:
            spin.ri_data_bc[ri_id] = api.back_calc_ri(spin_index=spin_index, ri_id=ri_id, ri_type=ri_types[ri_id], frq=frqs[ri_id])
コード例 #43
0
ファイル: pipe.py プロジェクト: pombredanne/relax
# Module docstring.
"""The pipe user function definitions."""

# relax module imports.
from graphics import WIZARD_IMAGE_PATH
from pipe_control import pipes
from specific_analyses.api import return_api
from user_functions.data import Uf_info

uf_info = Uf_info()
from user_functions.objects import Desc_container


# The hybrid API object.
hybrid_obj = return_api("hybrid")


# The user function class.
uf_class = uf_info.add_class("pipe")
uf_class.title = "Class holding the user functions for manipulating data pipes."
uf_class.menu_text = "&pipe"
uf_class.gui_icon = "relax.pipe"


# The pipe.bundle user function.
uf = uf_info.add_uf("pipe.bundle")
uf.title = "The grouping of data pipes into a bundle."
uf.title_short = "Data pipe bundling."
uf.add_keyarg(
    name="bundle",
コード例 #44
0
def read(param=None, scaling=1.0, file=None, dir=None, file_data=None, spin_id_col=None, mol_name_col=None, res_num_col=None, res_name_col=None, spin_num_col=None, spin_name_col=None, data_col=None, error_col=None, sep=None, spin_id=None):
    """Read spin specific data values from a file.

    @keyword param:         The name of the parameter to read.
    @type param:            str
    @keyword scaling:       A scaling factor by which all read values are multiplied by.
    @type scaling:          float
    @keyword file:          The name of the file to open.
    @type file:             str
    @keyword dir:           The directory containing the file (defaults to the current directory if None).
    @type dir:              str or None
    @keyword file_data:     An alternative to opening a file, if the data already exists in the correct format.  The format is a list of lists where the first index corresponds to the row and the second the column.
    @type file_data:        list of lists
    @keyword spin_id_col:   The column containing the spin ID strings.  If supplied, the mol_name_col, res_name_col, res_num_col, spin_name_col, and spin_num_col arguments must be none.
    @type spin_id_col:      int or None
    @keyword mol_name_col:  The column containing the molecule name information.  If supplied, spin_id_col must be None.
    @type mol_name_col:     int or None
    @keyword res_name_col:  The column containing the residue name information.  If supplied, spin_id_col must be None.
    @type res_name_col:     int or None
    @keyword res_num_col:   The column containing the residue number information.  If supplied, spin_id_col must be None.
    @type res_num_col:      int or None
    @keyword spin_name_col: The column containing the spin name information.  If supplied, spin_id_col must be None.
    @type spin_name_col:    int or None
    @keyword spin_num_col:  The column containing the spin number information.  If supplied, spin_id_col must be None.
    @type spin_num_col:     int or None
    @keyword data_col:      The column containing the RDC data in Hz.
    @type data_col:         int or None
    @keyword error_col:     The column containing the RDC errors.
    @type error_col:        int or None
    @keyword sep:           The column separator which, if None, defaults to whitespace.
    @type sep:              str or None
    @keyword spin_id:       The spin ID string.
    @type spin_id:          None or str
    """

    # Test if the current pipe exists.
    check_pipe()

    # Test if sequence data is loaded.
    if not exists_mol_res_spin_data():
        raise RelaxNoSequenceError

        # Minimisation statistic flag.
        min_stat = False

        # Alias specific analysis API object method.
        api = return_api()
        return_value = api.return_value

        # Specific set function.                                                           
        set_fn = set

    # Test data corresponding to param already exists.
    for spin in spin_loop():
        # Skip deselected spins.
        if not spin.select:
            continue

        # Get the value and error.
        value, error = return_value(spin, param)

        # Data exists.
        if value != None or error != None:
            raise RelaxValueError(param)

    # Loop over the data.
    mol_names = []
    res_nums = []
    res_names = []
    spin_nums = []
    spin_names = []
    values = []
    errors = []
    for data in read_spin_data(file=file, dir=dir, file_data=file_data, spin_id_col=spin_id_col, mol_name_col=mol_name_col, res_num_col=res_num_col, res_name_col=res_name_col, spin_num_col=spin_num_col, spin_name_col=spin_name_col, data_col=data_col, error_col=error_col, sep=sep, spin_id=spin_id):
        # Unpack.
        if data_col and error_col:
            mol_name, res_num, res_name, spin_num, spin_name, value, error = data
        elif data_col:
            mol_name, res_num, res_name, spin_num, spin_name, value = data
            error = None
        else:
            mol_name, res_num, res_name, spin_num, spin_name, error = data
            value = None

        # Set the value.
        id = generate_spin_id_unique(mol_name=mol_name, res_num=res_num, res_name=res_name, spin_num=spin_num, spin_name=spin_name)
        set_fn(val=value, error=error, param=param, spin_id=id)

        # Append the data for printout.
        mol_names.append(mol_name)
        res_nums.append(res_num)
        res_names.append(res_name)
        spin_nums.append(spin_num)
        spin_names.append(spin_name)
        values.append(value)
        errors.append(error)

    # Print out.
    write_spin_data(file=sys.stdout, mol_names=mol_names, res_nums=res_nums, res_names=res_names, spin_nums=spin_nums, spin_names=spin_names, data=values, data_name=param, error=errors, error_name='%s_error'%param)

    # Reset the minimisation statistics.
    if api.set(param) == 'min':
        minimise.reset_min_stats()
コード例 #45
0
def minimise(min_algor=None,
             line_search=None,
             hessian_mod=None,
             hessian_type=None,
             func_tol=None,
             grad_tol=None,
             max_iter=None,
             constraints=True,
             scaling=True,
             verbosity=1,
             sim_index=None):
    """Minimisation function.

    @keyword min_algor:         The minimisation algorithm to use.
    @type min_algor:            str
    @keyword line_search:       The line search algorithm which will only be used in combination with the line search and conjugate gradient methods.  This will default to the More and Thuente line search.
    @type line_search:          str or None
    @keyword hessian_mod:       The Hessian modification.  This will only be used in the algorithms which use the Hessian, and defaults to Gill, Murray, and Wright modified Cholesky algorithm.
    @type hessian_mod:          str or None
    @keyword hessian_type:      The Hessian type.  This will only be used in a few trust region algorithms, and defaults to BFGS.
    @type hessian_type:         str or None
    @keyword func_tol:          The function tolerance which, when reached, terminates optimisation.  Setting this to None turns of the check.
    @type func_tol:             None or float
    @keyword grad_tol:          The gradient tolerance which, when reached, terminates optimisation.  Setting this to None turns of the check.
    @type grad_tol:             None or float
    @keyword max_iter:          The maximum number of iterations for the algorithm.
    @type max_iter:             int
    @keyword constraints:       If True, constraints are used during optimisation.
    @type constraints:          bool
    @keyword scaling:           If True, diagonal scaling is enabled during optimisation to allow the problem to be better conditioned.
    @type scaling:              bool
    @keyword verbosity:         The amount of information to print.  The higher the value, the greater the verbosity.
    @type verbosity:            int
    @keyword sim_index:         The index of the simulation to optimise.  This should be None if normal optimisation is desired.
    @type sim_index:            None or int
    """

    # Test if the current data pipe exists.
    check_pipe()

    # The specific analysis API object.
    api = return_api()

    # Re-package the minimisation algorithm, options, and constraints for the generic_minimise() calls within the specific code.
    if constraints:
        min_options = [min_algor]

        # Determine the constraint algorithm to use.
        min_algor = api.constraint_algorithm()
    else:
        min_options = []
    if line_search != None:
        min_options.append(line_search)
    if hessian_mod != None:
        min_options.append(hessian_mod)
    if hessian_type != None:
        min_options.append(hessian_type)
    min_options = tuple(min_options)

    # Deselect spins lacking data:
    api.overfit_deselect()

    # Create the scaling matrix.
    scaling_matrix = assemble_scaling_matrix(scaling)

    # Get the Processor box singleton (it contains the Processor instance) and alias the Processor.
    processor_box = Processor_box()
    processor = processor_box.processor

    # Single Monte Carlo simulation.
    if sim_index != None:
        # Reset the minimisation statistics.
        reset_min_stats(sim_index=sim_index, verbosity=verbosity)

        # Optimise.
        api.minimise(min_algor=min_algor,
                     min_options=min_options,
                     func_tol=func_tol,
                     grad_tol=grad_tol,
                     max_iterations=max_iter,
                     constraints=constraints,
                     scaling_matrix=scaling_matrix,
                     verbosity=verbosity,
                     sim_index=sim_index)

    # Monte Carlo simulation minimisation.
    elif hasattr(cdp, 'sim_state') and cdp.sim_state == 1:
        for i in range(cdp.sim_number):
            # Reset the minimisation statistics.
            reset_min_stats(sim_index=i, verbosity=verbosity)

            # Status.
            if status.current_analysis:
                status.auto_analysis[status.current_analysis].mc_number = i
            else:
                status.mc_number = i

            # Optimisation.
            api.minimise(min_algor=min_algor,
                         min_options=min_options,
                         func_tol=func_tol,
                         grad_tol=grad_tol,
                         max_iterations=max_iter,
                         constraints=constraints,
                         scaling_matrix=scaling_matrix,
                         verbosity=verbosity - 1,
                         sim_index=i)

            # Print out.
            if verbosity and not processor.is_queued():
                print("Simulation " + repr(i + 1))

        # Unset the status.
        if status.current_analysis:
            status.auto_analysis[status.current_analysis].mc_number = None
        else:
            status.mc_number = None

    # Standard minimisation.
    else:
        # Reset the minimisation statistics.
        reset_min_stats(verbosity=verbosity)

        # Optimise.
        api.minimise(min_algor=min_algor,
                     min_options=min_options,
                     func_tol=func_tol,
                     grad_tol=grad_tol,
                     max_iterations=max_iter,
                     constraints=constraints,
                     scaling_matrix=scaling_matrix,
                     verbosity=verbosity)

    # Execute any queued commands.
    processor.run_queue()
コード例 #46
0
def copy(pipe_from=None, pipe_to=None, param=None, force=False):
    """Copy spin specific data values from pipe_from to pipe_to.

    @param pipe_from:   The data pipe to copy the value from.  This defaults to the current data
                        pipe.
    @type pipe_from:    str
    @param pipe_to:     The data pipe to copy the value to.  This defaults to the current data pipe.
    @type pipe_to:      str
    @param param:       The name of the parameter to copy the values of.
    @type param:        str
    @keyword force:     A flag forcing the overwriting of current values.
    @type force:        bool
    """

    # The current data pipe.
    if pipe_from == None:
        pipe_from = pipes.cdp_name()
    if pipe_to == None:
        pipe_to = pipes.cdp_name()
    pipe_orig = pipes.cdp_name()

    # The second pipe does not exist.
    check_pipe(pipe_to)

    # Test if the sequence data for pipe_from is loaded.
    if not exists_mol_res_spin_data(pipe_from):
        raise RelaxNoSequenceError(pipe_from)

    # Test if the sequence data for pipe_to is loaded.
    if not exists_mol_res_spin_data(pipe_to):
        raise RelaxNoSequenceError(pipe_to)

    # The specific analysis API object.
    api = return_api(pipe_name=pipe_from)

    # Test if the data exists for pipe_to.
    if force == False:
        for spin in spin_loop(pipe=pipe_to):
            # Get the value and error for pipe_to.
            value, error = api.return_value(spin, param)

            # Data exists.
            if value != None or error != None:
                raise RelaxValueError(param, pipe_to)

    # Switch to the data pipe to copy values to.
    pipes.switch(pipe_to)

    # Copy the values.
    for spin, spin_id in spin_loop(pipe=pipe_from, return_id=True):
        # Get the value and error from pipe_from.
        value, error = api.return_value(spin, param)

        # Set the values of pipe_to.
        if value != None:
            set(spin_id=spin_id, val=value, param=param, pipe=pipe_to, force=force)
        if error != None:
            set(spin_id=spin_id, val=error, param=param, pipe=pipe_to, error=True, force=force)

    # Reset all minimisation statistics.
    minimise.reset_min_stats(pipe_to)

    # Switch back to the original current data pipe.
    pipes.switch(pipe_orig)
コード例 #47
0
ファイル: local_min_search.py プロジェクト: pombredanne/relax
    Erdelyi, M., d'Auvergne E., Navarro-Vazquez, A., Leonov, A., and Griesinger, C. (2011) Dynamics of the Glycosidic Bond: Conformational Space of Lactose. Chemistry-A European Journal, 17(34), 9368-9376 (http://dx.doi.org/10.1002/chem.201100854).
"""


# Python imports.
from numpy import float64, zeros
from numpy.linalg import norm
from random import uniform

# relax imports.
from data_store import Relax_data_store; ds = Relax_data_store()
from specific_analyses.api import return_api


# The specific analysis API object.
api = return_api(analysis_type='N-state')

# Loop over random positions.
for rand_index in range(200):
    # Reset.
    reset()

    # Create the datapipe.
    pipe.create('lactose', 'N-state')

    # Read the results file.
    results.read('results_fixed_rdc+pcs')


    # Random starts.
    ################
コード例 #48
0
# Python imports
from os import getcwd, sep
from numpy import array, float64, zeros

# relax module imports.
from lib.io import open_write_file, write_data
from pipe_control.mol_res_spin import return_spin
from pipe_control import value
from specific_analyses.api import return_api

# Create pipe
pipe.create('relax_disp', 'relax_disp')

# The specific analysis API object.
api = return_api()

# Variables
prev_data_path = getcwd()
result_filename = 'FT_-_TSMFK01_-_min_-_128_-_free_spins.bz2'

# Read data in
results.read(prev_data_path + sep + result_filename)

# Get residue of interest. S65 is
cur_spin_id = ":%i@%s" % (65, 'N')

# Get the spin container.
cur_spin = return_spin(spin_id=cur_spin_id)

# Get the chi2 value
コード例 #49
0
def monte_carlo_error_analysis():
    """Function for calculating errors from the Monte Carlo simulations.

    The standard deviation formula used to calculate the errors is the square root of the
    bias-corrected variance, given by the formula::

                   __________________________
                  /   1
        sd  =    /  ----- * sum({Xi - Xav}^2)
               \/   n - 1

    where
        - n is the total number of simulations.
        - Xi is the parameter value for simulation i.
        - Xav is the mean parameter value for all simulations.
    """

    # Test if the current data pipe exists.
    check_pipe()

    # Test if simulations have been set up.
    if not hasattr(cdp, 'sim_state'):
        raise RelaxError("Monte Carlo simulations have not been set up.")

    # The specific analysis API object.
    api = return_api()

    # Loop over the models.
    for model_info in api.model_loop():
        # Get the selected simulation array.
        select_sim = api.sim_return_selected(model_info=model_info)

        # Loop over the parameters.
        index = 0
        while True:
            # Get the array of simulation parameters for the index.
            param_array = api.sim_return_param(index, model_info=model_info)

            # Break (no more parameters).
            if param_array == None:
                break

            # Handle dictionary type parameters.
            if isinstance(param_array[0], dict):
                # Initialise the standard deviation structure as a dictionary.
                sd = {}

                # Loop over each key.
                for key in param_array[0]:
                    # Create a list of the values for the current key.
                    data = []
                    for i in range(len(param_array)):
                        data.append(param_array[i][key])

                    # Calculate and store the SD.
                    sd[key] = statistics.std(values=data, skip=select_sim)

            # Handle list type parameters.
            elif isinstance(param_array[0], list):
                # Initialise the standard deviation structure as a list.
                sd = []

                # Loop over each element.
                for j in range(len(param_array[0])):
                    # Create a list of the values for the current key.
                    data = []
                    for i in range(len(param_array)):
                        data.append(param_array[i][j])

                    # Calculate and store the SD.
                    sd.append(statistics.std(values=data, skip=select_sim))

            # SD of simulation parameters with values (ie not None).
            elif param_array[0] != None:
                sd = statistics.std(values=param_array, skip=select_sim)

            # Simulation parameters with the value None.
            else:
                sd = None

            # Set the parameter error.
            api.set_error(index, sd, model_info=model_info)

            # Increment the parameter index.
            index = index + 1

    # Turn off the Monte Carlo simulation state, as the MC analysis is now finished.
    cdp.sim_state = False
コード例 #50
0
ファイル: relax_data.py プロジェクト: tlinnet/relax
def back_calc(ri_id=None, ri_type=None, frq=None):
    """Back calculate the relaxation data.

    If no relaxation data currently exists, then the ri_id, ri_type, and frq args are required.


    @keyword ri_id:     The relaxation data ID string.  If not given, all relaxation data will be back calculated.
    @type ri_id:        None or str
    @keyword ri_type:   The relaxation data type.  This should be one of 'R1', 'R2', or 'NOE'.
    @type ri_type:      None or str
    @keyword frq:       The spectrometer proton frequency in Hz.
    @type frq:          None or float
    """

    # Test if the current pipe exists.
    check_pipe()

    # Test if sequence data is loaded.
    if not exists_mol_res_spin_data():
        raise RelaxNoSequenceError

    # Check that ri_type and frq are supplied if no relaxation data exists.
    if ri_id and (not hasattr(cdp, 'ri_ids') or ri_id
                  not in cdp.ri_ids) and (ri_type == None or frq == None):
        raise RelaxError(
            "The 'ri_type' and 'frq' arguments must be supplied as no relaxation data corresponding to '%s' exists."
            % ri_id)

    # Check if the type is valid.
    if ri_type and ri_type not in VALID_TYPES:
        raise RelaxError("The relaxation data type '%s' must be one of %s." %
                         (ri_type, VALID_TYPES))

    # Frequency checks.
    frequency_checks(frq)

    # Initialise the global data for the current pipe if necessary.
    if not hasattr(cdp, 'ri_type'):
        cdp.ri_type = {}
    if not hasattr(cdp, 'ri_ids'):
        cdp.ri_ids = []

    # Update the global data if needed.
    if ri_id and ri_id not in cdp.ri_ids:
        cdp.ri_ids.append(ri_id)
        cdp.ri_type[ri_id] = ri_type
        set_frequency(id=ri_id, frq=frq)

    # The specific analysis API object.
    api = return_api()

    # The IDs to loop over.
    if ri_id == None:
        ri_ids = cdp.ri_ids
    else:
        ri_ids = [ri_id]

    # The data types.
    if ri_type == None:
        ri_types = cdp.ri_type
    else:
        ri_types = {ri_id: ri_type}

    # The frequencies.
    if frq == None:
        frqs = cdp.spectrometer_frq
    else:
        frqs = {ri_id: frq}

    # Loop over the spins.
    for spin, spin_id in spin_loop(return_id=True):
        # Skip deselected spins.
        if not spin.select:
            continue

        # The global index.
        spin_index = find_index(spin_id)

        # Initialise the spin data if necessary.
        if not hasattr(spin, 'ri_data_bc'):
            spin.ri_data_bc = {}

        # Back-calculate the relaxation value.
        for ri_id in ri_ids:
            spin.ri_data_bc[ri_id] = api.back_calc_ri(spin_index=spin_index,
                                                      ri_id=ri_id,
                                                      ri_type=ri_types[ri_id],
                                                      frq=frqs[ri_id])
コード例 #51
0
ファイル: bmrb.py プロジェクト: pombredanne/relax
def write(file=None, dir=None, version='3.1', force=False):
    """Create a BMRB NMR-STAR formatted file.

    @keyword file:      The name of the file to create or a file object.
    @type file:         str or file object
    @keyword dir:       The optional directory to place the file into.  If set to 'pipe_name', then it will be placed in a directory with the same name as the current data pipe.
    @type dir:          str or None
    @keyword version:   The NMR-STAR version to create.  This can be either '2.1', '3.0', or '3.1'.
    @type version:      str
    @keyword force:     A flag which if True will allow a currently existing file to be overwritten.
    @type force:        bool
    """

    # Test if bmrblib is installed.
    if not dep_check.bmrblib_module:
        raise RelaxNoModuleInstallError('BMRB library', 'bmrblib')

    # Test if the current data pipe exists.
    pipe_name = cdp_name()
    if not pipe_name:
        raise RelaxNoPipeError

    # Check the file name.
    if file == None:
        raise RelaxError("The file name must be specified.")

    # A file object.
    if isinstance(file, str):
        # The special data pipe name directory.
        if dir == 'pipe_name':
            dir = pipe_name

        # Get the full file path.
        file = get_file_path(file, dir)

        # Fail if the file already exists and the force flag is False.
        if access(file, F_OK) and not force:
            raise RelaxFileOverwriteError(file, 'force flag')

        # Print out.
        print("Opening the file '%s' for writing." % file)

        # Create the directories.
        mkdir_nofail(dir, verbosity=0)

    # Get the info box.
    info = Info_box()

    # Add the relax citations.
    for id, key in zip(['relax_ref1', 'relax_ref2'], ['dAuvergneGooley08a', 'dAuvergneGooley08b']):
        # Alias the bib entry.
        bib = info.bib[key]

        # Add.
        exp_info.citation(cite_id=id, authors=bib.author2, doi=bib.doi, pubmed_id=bib.pubmed_id, full_citation=bib.cite_short(doi=False, url=False), title=bib.title, status=bib.status, type=bib.type, journal_abbrev=bib.journal, journal_full=bib.journal_full, volume=bib.volume, issue=bib.number, page_first=bib.page_first, page_last=bib.page_last, year=bib.year)

    # Add the relax software package.
    exp_info.software(name=exp_info.SOFTWARE['relax'].name, version=version_full(), vendor_name=exp_info.SOFTWARE['relax'].authors, url=exp_info.SOFTWARE['relax'].url, cite_ids=['relax_ref1', 'relax_ref2'], tasks=exp_info.SOFTWARE['relax'].tasks)

    # Execute the specific BMRB writing code.
    api = return_api(pipe_name=pipe_name)
    api.bmrb_write(file, version=version)

    # Add the file to the results file list.
    if isinstance(file, str):
        add_result_file(type='text', label='BMRB', file=file)
コード例 #52
0
ファイル: value.py プロジェクト: pombredanne/relax
def set(val=None, param=None, index=None, pipe=None, spin_id=None, verbosity=1, error=False, force=True, reset=True):
    """Set global or spin specific data values.

    @keyword val:       The parameter values.
    @type val:          None or list
    @keyword param:     The parameter names.
    @type param:        None, str, or list of str
    @keyword index:     The index for parameters which are of the list-type.  This is ignored for all other types.
    @type index:        None or int
    @keyword pipe:      The data pipe the values should be placed in.
    @type pipe:         None or str
    @keyword spin_id:   The spin identification string.
    @type spin_id:      str
    @keyword verbosity: The amount of information to print.  The higher the value, the greater the verbosity.
    @type verbosity:    int
    @keyword error:     A flag which if True will allow the parameter errors to be set instead of the values.
    @type error:        bool
    @keyword force:     A flag forcing the overwriting of current values.
    @type force:        bool
    @keyword reset:     A flag which if True will cause all minimisation statistics to be reset.
    @type reset:        bool
    """

    # Switch to the data pipe, storing the original.
    if pipe:
        orig_pipe = pipes.cdp_name()
        pipes.switch(pipe)

    # Test if the current data pipe exists.
    check_pipe()

    # The specific analysis API object.
    api = return_api()

    # Convert numpy arrays to lists, if necessary.
    if isinstance(val, ndarray):
        val = val.tolist()

    # Invalid combinations.
    if (isinstance(val, float) or isinstance(val, int)) and param == None:
        raise RelaxError("The combination of a single value '%s' without specifying the parameter name is invalid." % val)
    if isinstance(val, list) and isinstance(param, str):
        raise RelaxError("Invalid combination:  When multiple values '%s' are specified, either no parameters or a list of parameters must by supplied rather than the single parameter '%s'." % (val, param))

    # Value array and parameter array of equal length.
    if isinstance(val, list) and isinstance(param, list) and len(val) != len(param):
        raise RelaxError("Both the value array and parameter array must be of equal length.")

    # Get the parameter list if needed.
    if param == None:
        param = api.get_param_names()

    # Convert the param to a list if needed.
    if not isinstance(param, list):
        param = [param]

    # Convert the value to a list if needed.
    if val != None and not isinstance(val, list):
        val = [val] * len(param)

    # Default values.
    if val == None:
        # Loop over the parameters, getting the default values.
        val = []
        for i in range(len(param)):
            val.append(api.default_value(param[i]))

            # Check that there is a default.
            if val[-1] == None:
                raise RelaxParamSetError(param[i])

    # Set the parameter values.
    api.set_param_values(param=param, value=val, index=index, spin_id=spin_id, error=error, force=force)

    # Reset all minimisation statistics.
    if reset:
        minimise.reset_min_stats(verbosity=verbosity)

    # Switch back.
    if pipe:
        pipes.switch(orig_pipe)
コード例 #53
0
ファイル: minimise.py プロジェクト: pombredanne/relax
def grid_search(lower=None, upper=None, inc=None, verbosity=1, constraints=True, skip_preset=True):
    """The grid search function.

    @keyword lower:         The lower bounds of the grid search which must be equal to the number of parameters in the model.
    @type lower:            array of numbers
    @keyword upper:         The upper bounds of the grid search which must be equal to the number of parameters in the model.
    @type upper:            array of numbers
    @keyword inc:           The increments for each dimension of the space for the grid search.  The number of elements in the array must equal to the number of parameters in the model.
    @type inc:              int or list of int
    @keyword verbosity:     The amount of information to print.  The higher the value, the greater the verbosity.
    @type verbosity:        int
    @keyword constraints:   If True, constraints are applied during the grid search (elinating parts of the grid).  If False, no constraints are used.
    @type constraints:      bool
    @keyword skip_preset:   This argument, when True, allows any parameter which already has a value set to be skipped in the grid search.
    @type skip_preset:      bool
    """

    # Test if the current data pipe exists.
    check_pipe()

    # The specific analysis API object.
    api = return_api()

    # Deselect models lacking data:
    api.overfit_deselect()

    # Determine the model specific grid bounds, and allow for the zooming grid search, and check the inc argument.
    model_lower, model_upper, model_inc = grid_setup(lower, upper, inc, verbosity=verbosity, skip_preset=skip_preset)

    # Create the scaling matrix.
    scaling_matrix = assemble_scaling_matrix()

    # Get the Processor box singleton (it contains the Processor instance) and alias the Processor.
    processor_box = Processor_box() 
    processor = processor_box.processor

    # Monte Carlo simulation grid search.
    if hasattr(cdp, 'sim_state') and cdp.sim_state == 1:
        # Loop over the simulations.
        for i in range(cdp.sim_number):
            # Reset the minimisation statistics.
            reset_min_stats(sim_index=i, verbosity=verbosity)

            # Status.
            if status.current_analysis:
                status.auto_analysis[status.current_analysis].mc_number = i
            else:
                status.mc_number = i

            # Optimisation.
            api.grid_search(lower=model_lower, upper=model_upper, inc=model_inc, scaling_matrix=scaling_matrix, constraints=constraints, verbosity=verbosity-1, sim_index=i)

            # Print out.
            if verbosity and not processor.is_queued():
                print("Simulation " + repr(i+1))

        # Unset the status.
        if status.current_analysis:
            status.auto_analysis[status.current_analysis].mc_number = None
        else:
            status.mc_number = None

    # Grid search.
    else:
        # Reset the minimisation statistics.
        reset_min_stats(verbosity=verbosity)

        # Optimise.
        api.grid_search(lower=model_lower, upper=model_upper, inc=model_inc, scaling_matrix=scaling_matrix, constraints=constraints, verbosity=verbosity)

    # Execute any queued commands.
    processor.run_queue()
コード例 #54
0
ファイル: value.py プロジェクト: pombredanne/relax
def copy(pipe_from=None, pipe_to=None, param=None, force=False):
    """Copy spin specific data values from pipe_from to pipe_to.

    @param pipe_from:   The data pipe to copy the value from.  This defaults to the current data
                        pipe.
    @type pipe_from:    str
    @param pipe_to:     The data pipe to copy the value to.  This defaults to the current data pipe.
    @type pipe_to:      str
    @param param:       The name of the parameter to copy the values of.
    @type param:        str
    @keyword force:     A flag forcing the overwriting of current values.
    @type force:        bool
    """

    # The current data pipe.
    if pipe_from == None:
        pipe_from = pipes.cdp_name()
    if pipe_to == None:
        pipe_to = pipes.cdp_name()
    pipe_orig = pipes.cdp_name()

    # The second pipe does not exist.
    check_pipe(pipe_to)

    # Test if the sequence data for pipe_from is loaded.
    if not exists_mol_res_spin_data(pipe_from):
        raise RelaxNoSequenceError(pipe_from)

    # Test if the sequence data for pipe_to is loaded.
    if not exists_mol_res_spin_data(pipe_to):
        raise RelaxNoSequenceError(pipe_to)

    # The specific analysis API object.
    api = return_api(pipe_name=pipe_from)

    # Test if the data exists for pipe_to.
    if force == False:
        for spin in spin_loop(pipe=pipe_to):
            # Get the value and error for pipe_to.
            value, error = api.return_value(spin, param)

            # Data exists.
            if value != None or error != None:
                raise RelaxValueError(param, pipe_to)

    # Switch to the data pipe to copy values to.
    pipes.switch(pipe_to)

    # Copy the values.
    for spin, spin_id in spin_loop(pipe=pipe_from, return_id=True):
        # Get the value and error from pipe_from.
        value, error = api.return_value(spin, param)

        # Set the values of pipe_to.
        if value != None:
            set(spin_id=spin_id, val=value, param=param, pipe=pipe_to, force=force)
        if error != None:
            set(spin_id=spin_id, val=error, param=param, pipe=pipe_to, error=True, force=force)

    # Reset all minimisation statistics.
    minimise.reset_min_stats(pipe_to)

    # Switch back to the original current data pipe.
    pipes.switch(pipe_orig)
コード例 #55
0
ファイル: value.py プロジェクト: pombredanne/relax
def write_data(param=None, file=None, scaling=1.0, bc=False, return_value=None, return_data_desc=None, comment=None):
    """The function which actually writes the data.

    @keyword param:             The parameter to write.
    @type param:                str
    @keyword file:              The file to write the data to.
    @type file:                 str
    @keyword scaling:           The value to scale the parameter by.
    @type scaling:              float
    @keyword bc:                A flag which if True will cause the back calculated values to be written.
    @type bc:                   bool
    @keyword return_value:      An optional function which if supplied will override the default value returning function.
    @type return_value:         None or func
    @keyword return_data_desc:  An optional function which if supplied will override the default parameter description returning function.
    @type return_data_desc:     None or func
    @keyword comment:           Text which will be added to the start of the file as comments.  All lines will be prefixed by '# '.
    @type comment:              str
    """

    # The specific analysis API object.
    api = return_api()

    # Get the value and error returning function parameter description function if required.
    if not return_value:
        return_value = api.return_value
    if not return_data_desc:
        return_data_desc = api.return_data_desc

    # Format string.
    format = "%-30s%-30s"

    # Init the data.
    mol_names = []
    res_nums = []
    res_names = []
    spin_nums = []
    spin_names = []
    values = []
    errors = []

    # Get the parameter description and add it to the file.
    desc = return_data_desc(param)
    if desc:
        file.write("# Parameter description:  %s.\n" % desc)
        file.write("#\n")

    # The comments.
    if comment:
        # Split up the lines.
        lines = comment.splitlines()

        # Write out.
        for line in lines:
            file.write("# %s\n" % line)
        file.write("#\n")

    # Determine the data type, check the data, and set up the dictionary type data keys.
    data_names = 'value'
    error_names = 'error'
    data_type = None
    for spin, mol_name, res_num, res_name in spin_loop(full_info=True):
        # Get the value and error.
        value, error = return_value(spin, param, bc=bc)

        # Dictionary type data.
        if isinstance(value, dict):
            # Sanity check.
            if not data_type in [None, 'dict']:
                raise RelaxError("Mixed data types.")
            data_type = 'dict'

            # Initialise the structures.
            if not isinstance(data_names, list):
                data_names = []
                error_names = []

            # Sort the keys.
            keys = sorted(value.keys())

            # Loop over the keys.
            for key in keys:
                # Add the data and error names if new.
                if key not in data_names:
                    data_names.append(key)
                    error_names.append('sd(%s)' % key)

        # List type data.
        elif isinstance(value, list):
            # Sanity check.
            if not data_type in [None, 'list']:
                raise RelaxError("Mixed data types.")
            data_type = 'list'

            # Initialise the structures.
            if not isinstance(data_names, list):
                data_names = []
                error_names = []

            # Check the length.
            elif len(data_names) != len(value):
                raise RelaxError("The list type data has an inconsistent number of elements between different spin systems.")

            # Loop over the data.
            for i in range(len(value)):
                # The data and error names.
                data_names.append('value_%s' % i)
                error_names.append('error_%s' % i)

        # None.
        elif value == None:
            pass

        # Simple values.
        else:
            # Sanity check.
            if not data_type in [None, 'value']:
                raise RelaxError("Mixed data types.")
            data_type = 'value'

    # Pack the data.
    for spin, mol_name, res_num, res_name in spin_loop(full_info=True):
        # Get the value and error.
        value, error = return_value(spin, param, bc=bc)

        # Append the spin data (scaled).
        mol_names.append(mol_name)
        res_nums.append(res_num)
        res_names.append(res_name)
        spin_nums.append(spin.num)
        spin_names.append(spin.name)

        # Dictionary type data.
        if data_type == 'dict':
            # Initialise the lists.
            values.append([])
            errors.append([])

            # Loop over the keys.
            for key in data_names:
                # Append the scaled values and errors.
                if value == None or key not in value:
                    values[-1].append(None)
                else:
                    values[-1].append(scale(value[key], scaling))
                if error == None or key not in error:
                    errors[-1].append(None)
                else:
                    errors[-1].append(scale(error[key], scaling))

        # List type data.
        elif data_type == 'list':
            # Initialise the lists.
            values.append([])
            errors.append([])

            # Loop over the data.
            for i in range(len(data_names)):
                # Append the scaled values and errors.
                if value == None:
                    values[-1].append(None)
                else:
                    values[-1].append(scale(value[i], scaling))
                if error == None:
                    errors[-1].append(None)
                else:
                    errors[-1].append(scale(error[i], scaling))

        # Simple values.
        else:
            # Append the scaled values and errors.
            values.append(scale(value, scaling))
            errors.append(scale(error, scaling))

    # Replace all spaces " " with "_" in data_names list.
    if isinstance(data_names, list):
        for i, data_name in enumerate(data_names):
            data_str = data_name.replace(" ", "_")
            # Replace string.
            data_names[i] = data_str

    # Replace all spaces " " with "_" in error_names list.
    if isinstance(error_names, list):
        for i, error_name in enumerate(error_names):
            error_str = error_name.replace(" ", "_")
            # Replace string.
            error_names[i] = error_str

    # Write the data.
    write_spin_data(file, mol_names=mol_names, res_nums=res_nums, res_names=res_names, spin_nums=spin_nums, spin_names=spin_names, data=values, data_name=data_names, error=errors, error_name=error_names)
コード例 #56
0

# Python imports.
from os import getcwd, listdir, pardir, sep
from re import search

# relax imports.
from data_store import Relax_data_store; ds = Relax_data_store()
from specific_analyses.api import return_api


# Create the data pipe.
pipe.create('lactose', 'N-state')

# The specific analysis API object.
api = return_api()

# Load all PDB structures from the current directory.
files = listdir(getcwd())
num = 1
for file in files:
    print(file)
    if search('.pdb$', file):
        structure.read_pdb(file=file, set_model_num=num, set_mol_name='conf')
        num += 1
NUM_STR = num - 1

# Set up the 13C and 1H spins information.
structure.load_spins(spin_id=':900@C*', ave_pos=False)
structure.load_spins(spin_id=':900@H*', ave_pos=False)
spin.isotope(isotope='13C', spin_id='@C*')
コード例 #57
0
def grid_setup(lower=None,
               upper=None,
               inc=None,
               verbosity=1,
               skip_preset=True):
    """Determine the per-model grid bounds, allowing for the zooming grid search.

    @keyword lower:         The user supplied lower bounds of the grid search which must be equal to the number of parameters in the model.
    @type lower:            list of numbers
    @keyword upper:         The user supplied upper bounds of the grid search which must be equal to the number of parameters in the model.
    @type upper:            list of numbers
    @keyword inc:           The user supplied grid search increments.
    @type inc:              int or list of int
    @keyword verbosity:     The amount of information to print.  The higher the value, the greater the verbosity.
    @type verbosity:        int
    @keyword skip_preset:   This argument, when True, allows any parameter which already has a value set to be skipped in the grid search.
    @type skip_preset:      bool
    @return:                The per-model grid upper and lower bounds.  The first dimension of each structure corresponds to the model, the second the model parameters.
    @rtype:                 tuple of lists of lists of float, lists of lists of float, list of lists of int
    """

    # The specific analysis API object and parameter object.
    api = return_api()
    param_object = return_parameter_object()

    # Initialise.
    model_lower = []
    model_upper = []
    model_inc = []

    # Loop over the models.
    for model_info in api.model_loop():
        # Get the parameter names and current values.
        names = api.get_param_names(model_info)
        values = api.get_param_values(model_info)

        # No parameters for this model.
        if names == None or len(names) == 0:
            model_lower.append([])
            model_upper.append([])
            model_inc.append([])
            continue

        # The parameter number.
        n = len(names)

        # Make sure that the length of the parameter array is > 0.
        if n == 0:
            raise RelaxError(
                "Cannot run a grid search on a model with zero parameters.")

        # Check that the user supplied bound lengths are ok.
        if lower != None and len(lower) != n:
            raise RelaxLenError('lower bounds', n)
        if upper != None and len(upper) != n:
            raise RelaxLenError('upper bounds', n)

        # Check the user supplied increments.
        if isinstance(inc, list) and len(inc) != n:
            raise RelaxLenError('increment', n)
        if isinstance(inc, list):
            for i in range(n):
                if not (isinstance(inc[i], int) or inc[i] == None):
                    raise RelaxIntListIntError('increment', inc)
        elif not isinstance(inc, int):
            raise RelaxIntListIntError('increment', inc)

        # Convert to the model increment list.
        if isinstance(inc, int):
            model_inc.append([inc] * n)
        else:
            model_inc.append(inc)

        # Print out the model title.
        api.print_model_title(prefix="Grid search setup:  ",
                              model_info=model_info)

        # The grid zoom level.
        zoom = 0
        if hasattr(cdp, 'grid_zoom_level'):
            zoom = cdp.grid_zoom_level
        zoom_factor = 1.0 / 2.0**zoom
        if zoom > 0:
            print(
                "Zooming grid level of %s, scaling the grid size by a factor of %s.\n"
                % (zoom, zoom_factor))

        # Append empty lists for the bounds to be built up.
        model_lower.append([])
        model_upper.append([])

        # Loop over the parameters.
        data = []
        for i in range(n):
            # A comment for user feedback.
            comment = 'Default bounds'
            if lower != None and upper != None:
                comment = 'User supplied lower and upper bound'
            elif lower != None:
                comment = 'User supplied lower bound'
            elif upper != None:
                comment = 'User supplied upper bound'

            # Alias the number of increments for this parameter.
            incs = model_inc[-1][i]

            # Error checking for increment values of None.
            if incs == None and values[i] in [None, {}, []]:
                raise RelaxError(
                    "The parameter '%s' has no preset value, therefore a grid increment of None is not valid."
                    % names[i])

            # The lower bound for this parameter.
            if lower != None:
                lower_i = lower[i]
            else:
                lower_i = param_object.grid_lower(names[i],
                                                  incs=incs,
                                                  model_info=model_info)

            # The upper bound for this parameter.
            if upper != None:
                upper_i = upper[i]
            else:
                upper_i = param_object.grid_upper(names[i],
                                                  incs=incs,
                                                  model_info=model_info)

            # The skipping logic.
            skip = False
            if skip_preset:
                # Override the flag if the zoom is on.
                if zoom:
                    skip = False

                # No preset value.
                elif values[i] in [None, {}, []]:
                    skip = False

                # The preset value is a NaN value due to numpy conversions of None.
                elif isNaN(values[i]):
                    skip = False

                # Ok, now the parameter can be skipped.
                else:
                    skip = True

            # Override the skip flag if the incs value is None.
            if incs == None:
                skip = True

            # Skip preset values.
            if skip:
                lower_i = values[i]
                upper_i = values[i]
                model_inc[-1][i] = incs = 1
                comment = 'Preset value'

            # Zooming grid.
            elif zoom:
                # The full size and scaled size.
                size = upper_i - lower_i
                zoom_size = size * zoom_factor
                half_size = zoom_size / 2.0
                comment = 'Zoom grid width of %s %s' % (
                    zoom_size, param_object.units(names[i]))

                # The new size around the current value.
                lower_zoom = values[i] - half_size
                upper_zoom = values[i] + half_size

                # Outside of the original lower bound, so shift the grid to fit.
                if zoom > 0 and lower_zoom < lower_i:
                    # The amount to shift by.
                    shift = lower_i - lower_zoom

                    # Set the new bounds.
                    upper_i = upper_zoom + shift

                # Outside of the original upper bound, so shift the grid to fit.
                elif zoom > 0 and upper_zoom > upper_i:
                    # The amount to shift by.
                    shift = upper_i - upper_zoom

                    # Set the new bounds.
                    lower_i = lower_zoom + shift

                # Inside the original bounds.
                else:
                    lower_i = lower_zoom
                    upper_i = upper_zoom

            # Add to the data list for printing out.
            data.append([
                names[i],
                "%15s" % lower_i,
                "%15s" % upper_i,
                "%15s" % incs, comment
            ])

            # Scale the bounds.
            scaling = param_object.scaling(names[i], model_info=model_info)
            lower_i /= scaling
            upper_i /= scaling

            # Append.
            model_lower[-1].append(lower_i)
            model_upper[-1].append(upper_i)

        # Printout.
        if verbosity:
            write_data(out=sys.stdout,
                       headings=[
                           "Parameter", "Lower bound", "Upper bound",
                           "Increments", "Comment"
                       ],
                       data=data)
            sys.stdout.write('\n')

    # Return the bounds.
    return model_lower, model_upper, model_inc
コード例 #58
0
    def __init__(self, params, spin_id, inc, lower, upper, axis_incs,
                 file_prefix, dir, point, point_file, chi_surface,
                 create_par_file):
        """Map the space upon class instantiation."""

        # Initialise.
        #############

        # Function arguments.
        self.params = params
        self.spin_id = spin_id
        self.n = len(params)
        self.inc = inc
        self.axis_incs = axis_incs
        self.file_prefix = file_prefix
        self.dir = dir
        self.point_file = point_file

        # Define nested listed, which holds parameter values and chi2 value.
        self.par_chi2_vals = []

        # The specific analysis API object.
        self.api = return_api()

        # Points.
        if point != None:
            # Check if list is a nested list of lists.
            point_list = []
            if isinstance(point[0], float):
                point_list.append(array(point, float64))
                self.point = point_list
                self.num_points = 1
            else:
                for i in range(len(point)):
                    point_list.append(array(point[i], float64))
                self.point = point_list
                self.num_points = i + 1
        else:
            self.num_points = 0

        # Get the default map bounds.
        self.bounds = zeros((self.n, 2), float64)
        for i in range(self.n):
            # Get the bounds for the parameter i.
            bounds = self.api.map_bounds(self.params[i], self.spin_id)

            # No bounds found.
            if not bounds:
                raise RelaxError("No bounds for the parameter " +
                                 repr(self.params[i]) +
                                 " could be determined.")

            # Assign the bounds to the global data structure.
            self.bounds[i] = bounds

        # Lower bounds.
        if lower != None:
            self.bounds[:, 0] = array(lower, float64)

        # Upper bounds.
        if upper != None:
            self.bounds[:, 1] = array(upper, float64)

        # Setup the step sizes.
        self.step_size = zeros(self.n, float64)
        self.step_size = (self.bounds[:, 1] - self.bounds[:, 0]) / self.inc

        # Create all the OpenDX data and files.
        #######################################

        # Get the date.
        self.get_date()

        # Create the strings associated with the map axes.
        self.map_axes()

        # Generate the map.
        self.create_map()

        ## Generate the file with parameters and associated chi2 value.
        if create_par_file:
            self.create_par_chi2(file_prefix=self.file_prefix,
                                 par_chi2_vals=self.par_chi2_vals)

            # Generate the matplotlib script file, to plot surfaces
            self.matplotlib_surface_plot()

        ## Generate the file with parameters and associated chi2 value for the points send to dx.
        if self.num_points >= 1 and create_par_file:
            # Calculate the parameter and associated chi2 values for the points.
            par_chi2_vals = self.calc_point_par_chi2()

            ## Generate the file with parameters and associated chi2 value.
            self.create_par_chi2(file_prefix=self.point_file,
                                 par_chi2_vals=par_chi2_vals)

        # Default the chi2 surface values, for Innermost, Inner, Middle and Outer Isosurface.
        if chi_surface == None:
            all_chi2 = array(self.all_chi, float64)
            innermost = percentile(all_chi2, 10)
            inner = percentile(all_chi2, 20)
            middle = percentile(all_chi2, 50)
            outer = percentile(all_chi2, 90)
            chi_surface = [innermost, inner, middle, outer]

        # Create the OpenDX .net program file.
        write_program(file_prefix=self.file_prefix,
                      point_file=self.point_file,
                      dir=self.dir,
                      inc=self.inc,
                      N=self.n,
                      num_points=self.num_points,
                      labels=self.labels,
                      tick_locations=self.tick_locations,
                      tick_values=self.tick_values,
                      date=self.date,
                      chi_surface=chi_surface)

        # Create the OpenDX .cfg program configuration file.
        write_config(file_prefix=self.file_prefix,
                     dir=self.dir,
                     date=self.date)

        # Create the OpenDX .general file.
        write_general(file_prefix=self.file_prefix, dir=self.dir, inc=self.inc)

        # Create the OpenDX .general and data files for the given point.
        if self.num_points >= 1:
            write_point(file_prefix=self.point_file,
                        dir=self.dir,
                        inc=self.inc,
                        point=self.point,
                        num_points=self.num_points,
                        bounds=self.bounds,
                        N=self.n)
コード例 #59
0
ファイル: eliminate.py プロジェクト: pombredanne/relax
def eliminate(function=None, args=None):
    """Model elimination.

    @keyword function:  A user supplied function for model elimination.  This function should accept
                        five arguments, a string defining a certain parameter, the value of the
                        parameter, the minimisation instance (ie the residue index if the model
                        is residue specific), and the function arguments.  If the model is rejected,
                        the function should return True, otherwise it should return False.  The
                        function will be executed multiple times, once for each parameter of the model. 
    @type function:     function
    @param args:        The arguments to be passed to the user supplied function.
    @type args:         tuple
    """

    # Test if the current data pipe exists.
    check_pipe()

    # The specific analysis API object.
    api = return_api()

    # Determine if simulations are active.
    if hasattr(cdp, 'sim_state') and cdp.sim_state == True:
        sim_state = True
    else:
        sim_state = False


    # Get the number of instances and loop over them.
    for model_info in api.model_loop():
        # Model elimination.
        ####################

        if not sim_state:
            # Get the parameter names and values.
            names = api.get_param_names(model_info=model_info)
            values = api.get_param_values(model_info=model_info)

            # No data.
            if names == None or values is None:
                continue

            # Test that the names and values vectors are of equal length.
            if len(names) != len(values):
                raise RelaxError("The names vector " + repr(names) + " is of a different length to the values vector " + repr(values) + ".")

            # Loop over the parameters.
            flag = False
            for j in range(len(names)):
                # Eliminate function.
                if api.eliminate(names[j], values[j], args, model_info=model_info):
                    flag = True

            # Deselect.
            if flag:
                api.deselect(model_info=model_info)


        # Simulation elimination.
        #########################

        else:
            # Loop over the simulations.
            for j in range(cdp.sim_number):
                # Get the parameter names and values.
                names = api.get_param_names(model_info=model_info)
                values = api.get_param_values(sim_index=j, model_info=model_info)

                # No data.
                if names == None or values is None:
                    continue

                # Test that the names and values vectors are of equal length.
                if len(names) != len(values):
                    raise RelaxError("The names vector " + repr(names) + " is of a different length to the values vector " + repr(values) + ".")

                # Loop over the parameters.
                flag = False
                for k in range(len(names)):
                    # Eliminate function.
                    if api.eliminate(names[k], values[k], args, sim=j, model_info=model_info):
                        flag = True

                # Deselect.
                if flag:
                    api.deselect(sim_index=j, model_info=model_info)