Пример #1
0
    def set_error(self, index, error, model_info=None):
        """Set the parameter errors.

        @param index:           The index of the parameter to set the errors for.
        @type index:            int
        @param error:           The error value.
        @type error:            float
        @keyword model_info:    The model information from model_loop().  This is unused.
        @type model_info:       None
        """

        # Align parameters.
        names = ['Axx', 'Ayy', 'Axy', 'Axz', 'Ayz']

        # Alignment tensor parameters.
        if index < len(cdp.align_ids)*5:
            # The tensor and parameter index.
            param_index = index % 5
            tensor_index = (index - index % 5) / 5

            # Set the error.
            tensor = align_tensor.return_tensor(index=tensor_index, skip_fixed=True)
            tensor.set(param=names[param_index], value=error, category='err')

            # Return the object.
            return getattr(tensor, names[param_index]+'_err')
Пример #2
0
    def set_error(self, index, error, model_info=None):
        """Set the parameter errors.

        @param index:           The index of the parameter to set the errors for.
        @type index:            int
        @param error:           The error value.
        @type error:            float
        @keyword model_info:    The model information from model_loop().  This is unused.
        @type model_info:       None
        """

        # Align parameters.
        names = ['Axx', 'Ayy', 'Axy', 'Axz', 'Ayz']

        # Alignment tensor parameters.
        if index < len(cdp.align_ids) * 5:
            # The tensor and parameter index.
            param_index = index % 5
            tensor_index = (index - index % 5) / 5

            # Set the error.
            tensor = align_tensor.return_tensor(index=tensor_index,
                                                skip_fixed=True)
            tensor.set(param=names[param_index], value=error, category='err')

            # Return the object.
            return getattr(tensor, names[param_index] + '_err')
Пример #3
0
    def sim_return_param(self, index, model_info=None):
        """Return the array of simulation parameter values.

        @param index:           The index of the parameter to return the array of values for.
        @type index:            int
        @keyword model_info:    The model information from model_loop().  This is unused.
        @type model_info:       None
        @return:                The array of simulation parameter values.
        @rtype:                 list of float
        """

        # Align parameters.
        names = ['Axx', 'Ayy', 'Axy', 'Axz', 'Ayz']

        # Alignment tensor parameters.
        if index < align_tensor.num_tensors(skip_fixed=True)*5:
            # The tensor and parameter index.
            param_index = index % 5
            tensor_index = (index - index % 5) / 5

            # Return the simulation parameter array.
            tensor = align_tensor.return_tensor(index=tensor_index, skip_fixed=True)
            return getattr(tensor, names[param_index]+'_sim')
Пример #4
0
    def sim_return_param(self, index, model_info=None):
        """Return the array of simulation parameter values.

        @param index:           The index of the parameter to return the array of values for.
        @type index:            int
        @keyword model_info:    The model information from model_loop().  This is unused.
        @type model_info:       None
        @return:                The array of simulation parameter values.
        @rtype:                 list of float
        """

        # Align parameters.
        names = ['Axx', 'Ayy', 'Axy', 'Axz', 'Ayz']

        # Alignment tensor parameters.
        if index < align_tensor.num_tensors(skip_fixed=True) * 5:
            # The tensor and parameter index.
            param_index = index % 5
            tensor_index = (index - index % 5) / 5

            # Return the simulation parameter array.
            tensor = align_tensor.return_tensor(index=tensor_index,
                                                skip_fixed=True)
            return getattr(tensor, names[param_index] + '_sim')
Пример #5
0
    def grid_search(self, lower=None, upper=None, inc=None, scaling_matrix=None, constraints=False, verbosity=0, sim_index=None):
        """The grid search function.

        @param lower:           The lower bounds of the grid search which must be equal to the number of parameters in the model.
        @type lower:            list of lists of floats
        @param upper:           The upper bounds of the grid search which must be equal to the number of parameters in the model.
        @type upper:            list of lists of floats
        @param inc:             The increments for each dimension of the space for the grid search.  The number of elements in the array must equal to the number of parameters in the model.
        @type inc:              list of lists of int
        @keyword scaling_matrix:    The per-model list of diagonal and square scaling matrices.
        @type scaling_matrix:       list of numpy rank-2, float64 array or list of None
        @param constraints:     If True, constraints are applied during the grid search (elinating parts of the grid).  If False, no constraints are used.
        @type constraints:      bool
        @param verbosity:       A flag specifying the amount of information to print.  The higher the value, the greater the verbosity.
        @type verbosity:        int
        """

        # Test if the N-state model has been set up.
        if not hasattr(cdp, 'model'):
            raise RelaxNoModelError('N-state')

        # The number of parameters.
        n = param_num()

        # Determine the data type.
        data_types = base_data_types()

        # The number of tensors to optimise.
        tensor_num = align_tensor.num_tensors(skip_fixed=True)

        # Custom sub-grid search for when only tensors are optimised (as each tensor is independent, the number of points collapses from inc**(5*N) to N*inc**5).
        if cdp.model == 'fixed' and tensor_num > 1 and ('rdc' in data_types or 'pcs' in data_types) and not align_tensor.all_tensors_fixed() and hasattr(cdp, 'paramag_centre_fixed') and cdp.paramag_centre_fixed:
            # Print out.
            print("Optimising each alignment tensor separately.")

            # Store the alignment tensor fixed flags.
            fixed_flags = []
            for i in range(len(cdp.align_ids)):
                # Get the tensor object.
                tensor = align_tensor.return_tensor(index=i, skip_fixed=False)

                # Store the flag.
                fixed_flags.append(tensor.fixed)

                # Fix the tensor.
                tensor.set('fixed', True)

            # Loop over each sub-grid.
            for i in range(len(cdp.align_ids)):
                # Skip the tensor if originally fixed.
                if fixed_flags[i]:
                    continue

                # Get the tensor object.
                tensor = align_tensor.return_tensor(index=i, skip_fixed=False)

                # Unfix the current tensor.
                tensor.set('fixed', False)

                # Grid search parameter subsets.
                lower_sub = lower[0][i*5:i*5+5]
                upper_sub = upper[0][i*5:i*5+5]
                inc_sub = inc[0][i*5:i*5+5]

                # Minimisation of the sub-grid.
                self.minimise(min_algor='grid', lower=[lower_sub], upper=[upper_sub], inc=[inc_sub], scaling_matrix=[None], constraints=constraints, verbosity=verbosity, sim_index=sim_index)

                # Fix the tensor again.
                tensor.set('fixed', True)

            # Reset the state of the tensors.
            for i in range(len(cdp.align_ids)):
                # Get the tensor object.
                tensor = align_tensor.return_tensor(index=i, skip_fixed=False)

                # Fix the tensor.
                tensor.set('fixed', fixed_flags[i])

        # All other minimisation.
        else:
            self.minimise(min_algor='grid', lower=lower, upper=upper, inc=inc, scaling_matrix=scaling_matrix, constraints=constraints, verbosity=verbosity, sim_index=sim_index)
Пример #6
0
    def grid_search(self,
                    lower=None,
                    upper=None,
                    inc=None,
                    scaling_matrix=None,
                    constraints=False,
                    verbosity=0,
                    sim_index=None):
        """The grid search function.

        @param lower:           The lower bounds of the grid search which must be equal to the number of parameters in the model.
        @type lower:            list of lists of floats
        @param upper:           The upper bounds of the grid search which must be equal to the number of parameters in the model.
        @type upper:            list of lists of floats
        @param inc:             The increments for each dimension of the space for the grid search.  The number of elements in the array must equal to the number of parameters in the model.
        @type inc:              list of lists of int
        @keyword scaling_matrix:    The per-model list of diagonal and square scaling matrices.
        @type scaling_matrix:       list of numpy rank-2, float64 array or list of None
        @param constraints:     If True, constraints are applied during the grid search (elinating parts of the grid).  If False, no constraints are used.
        @type constraints:      bool
        @param verbosity:       A flag specifying the amount of information to print.  The higher the value, the greater the verbosity.
        @type verbosity:        int
        """

        # Test if the N-state model has been set up.
        if not hasattr(cdp, 'model'):
            raise RelaxNoModelError('N-state')

        # The number of parameters.
        n = param_num()

        # Determine the data type.
        data_types = base_data_types()

        # The number of tensors to optimise.
        tensor_num = align_tensor.num_tensors(skip_fixed=True)

        # Custom sub-grid search for when only tensors are optimised (as each tensor is independent, the number of points collapses from inc**(5*N) to N*inc**5).
        if cdp.model == 'fixed' and tensor_num > 1 and (
                'rdc' in data_types or 'pcs' in data_types
        ) and not align_tensor.all_tensors_fixed() and hasattr(
                cdp, 'paramag_centre_fixed') and cdp.paramag_centre_fixed:
            # Print out.
            print("Optimising each alignment tensor separately.")

            # Store the alignment tensor fixed flags.
            fixed_flags = []
            for i in range(len(cdp.align_ids)):
                # Get the tensor object.
                tensor = align_tensor.return_tensor(index=i, skip_fixed=False)

                # Store the flag.
                fixed_flags.append(tensor.fixed)

                # Fix the tensor.
                tensor.set('fixed', True)

            # Loop over each sub-grid.
            for i in range(len(cdp.align_ids)):
                # Skip the tensor if originally fixed.
                if fixed_flags[i]:
                    continue

                # Get the tensor object.
                tensor = align_tensor.return_tensor(index=i, skip_fixed=False)

                # Unfix the current tensor.
                tensor.set('fixed', False)

                # Grid search parameter subsets.
                lower_sub = lower[0][i * 5:i * 5 + 5]
                upper_sub = upper[0][i * 5:i * 5 + 5]
                inc_sub = inc[0][i * 5:i * 5 + 5]

                # Minimisation of the sub-grid.
                self.minimise(min_algor='grid',
                              lower=[lower_sub],
                              upper=[upper_sub],
                              inc=[inc_sub],
                              scaling_matrix=[None],
                              constraints=constraints,
                              verbosity=verbosity,
                              sim_index=sim_index)

                # Fix the tensor again.
                tensor.set('fixed', True)

            # Reset the state of the tensors.
            for i in range(len(cdp.align_ids)):
                # Get the tensor object.
                tensor = align_tensor.return_tensor(index=i, skip_fixed=False)

                # Fix the tensor.
                tensor.set('fixed', fixed_flags[i])

        # All other minimisation.
        else:
            self.minimise(min_algor='grid',
                          lower=lower,
                          upper=upper,
                          inc=inc,
                          scaling_matrix=scaling_matrix,
                          constraints=constraints,
                          verbosity=verbosity,
                          sim_index=sim_index)