Esempio n. 1
0
    def error_analysis(self):
        """Perform an error analysis of the peak intensities for each field strength separately."""

        # Printout.
        section(file=sys.stdout, text="Error analysis", prespace=2)

        # Check if intensity errors have already been calculated by the user.
        precalc = True
        for spin in spin_loop(skip_desel=True):
            # No structure.
            if not hasattr(spin, 'peak_intensity_err'):
                precalc = False
                break

            # Determine if a spectrum ID is missing from the list.
            for id in cdp.spectrum_ids:
                if id not in spin.peak_intensity_err:
                    precalc = False
                    break

        # Skip.
        if precalc:
            print("Skipping the error analysis as it has already been performed.")
            return

        # Perform the error analysis.
        self.interpreter.spectrum.error_analysis_per_field()
Esempio n. 2
0
    def error_analysis(self):
        """Perform an error analysis of the peak intensities for each field strength separately."""

        # Printout.
        section(file=sys.stdout, text="Error analysis", prespace=2)

        # Check if intensity errors have already been calculated by the user.
        precalc = True
        for spin in spin_loop(skip_desel=True):
            # No structure.
            if not hasattr(spin, 'peak_intensity_err'):
                precalc = False
                break

            # Determine if a spectrum ID is missing from the list.
            for id in cdp.spectrum_ids:
                if id not in spin.peak_intensity_err:
                    precalc = False
                    break

        # Skip.
        if precalc:
            print(
                "Skipping the error analysis as it has already been performed."
            )
            return

        # Perform the error analysis.
        self.interpreter.spectrum.error_analysis_per_field()
Esempio n. 3
0
def error_analysis_per_field():
    """Perform an error analysis of the peak intensities for each field strength separately."""

    # Printout.
    section(file=sys.stdout, text="Automatic Error analysis per field strength", prespace=2)

    # Handle missing frequency data.
    frqs = [None]
    if hasattr(cdp, 'spectrometer_frq_list'):
        frqs = cdp.spectrometer_frq_list

    # Loop over the spectrometer frequencies.
    for frq in frqs:
        # Generate a list of spectrum IDs matching the frequency.
        ids = []
        for id in cdp.spectrum_ids:
            # Check that the spectrometer frequency matches.
            match_frq = True
            if frq != None and cdp.spectrometer_frq[id] != frq:
                match_frq = False

            # Add the ID.
            if match_frq:
                ids.append(id)

        # Run the error analysis on the subset.
        print("For field strength %.8f MHz, subset ids for spectrum.error_analysis is: %s" % (frq/1e6, ids))
        error_analysis(subset=ids)
Esempio n. 4
0
    def check_numpy_less_1_8_and_numerical_model(self):
        """Check for numerical model using numpy version under 1.8.  This will result in slow "for loop" calculation through data, making the analysis 5-6 times slower."""

        # Some warning for the user if the pure numeric solution is selected.
        if float(version.version[:3]) < 1.8:
            # Store which models are in numeric.
            models = []

            # Loop through models.
            for model in self.models:
                if model in MODEL_LIST_NUMERIC:
                    models.append(model)

            # Write system message if numerical models is present and numpy version is below 1.8.
            if len(models) > 0:
                # Printout.
                section(file=sys.stdout,
                        text="Numpy version checking for numerical models.",
                        prespace=2)
                warn(
                    RelaxWarning(
                        "Your version of numpy is %s, and below the recommended version of 1.8 for numerical models."
                        % (version.version)))
                warn(
                    RelaxWarning(
                        "Please consider upgrading your numpy version to 1.8.")
                )

                # Loop over models.
                for model in models:
                    warn(
                        RelaxWarning(
                            "This could make the numerical analysis with model '%s', 5 to 6 times slower."
                            % (model)))
Esempio n. 5
0
    def check_vars(self):
        """Check that the user has set the variables correctly."""

        # Printout.
        section(file=sys.stdout, text="Variable checking", prespace=2)

        # The pipe name.
        if not has_pipe(self.pipe_name):
            raise RelaxNoPipeError(self.pipe_name)

        # Check the model selection.
        allowed = ['AIC', 'AICc', 'BIC']
        if self.modsel not in allowed:
            raise RelaxError("The model selection technique '%s' is not in the allowed list of %s." % (self.modsel, allowed))

        # Some warning for the user if the pure numeric solution is selected.
        if self.numeric_only:
            # Loop over all models.
            for model in self.models:
                # Skip the models used for nesting.
                if model in [MODEL_CR72, MODEL_MMQ_CR72, MODEL_MP05]:
                    continue

                # Warnings for all other analytic models.
                if model in MODEL_LIST_ANALYTIC:
                    warn(RelaxWarning("The analytic model '%s' will be optimised but will not be used in any way in this numeric model only auto-analysis." % model))

        # Printout.
        print("The dispersion auto-analysis variables are OK.")
Esempio n. 6
0
    def optimise_rigid(self):
        """Optimise the rigid frame order model.

        The Sobol' integration is not used here, so the algorithm is different to the other frame order models.
        """

        # The model.
        model = 'rigid'
        title = model[0].upper() + model[1:]

        # Print out.
        section(file=sys.stdout, text="%s frame order model"%title, prespace=5)

        # The data pipe name.
        self.pipe_name_dict[model] = '%s - %s' % (title, self.pipe_bundle)
        self.pipe_name_list.append(self.pipe_name_dict[model])

        # The results file already exists, so read its contents instead.
        if self.read_results(model=model, pipe_name=self.pipe_name_dict[model]):
            # The PDB representation of the model (in case this was not completed correctly).
            self.interpreter.frame_order.pdb_model(dir=self.results_dir+model, force=True)

            # Nothing more to do.
            return

        # Create the data pipe using the full data set, and switch to it.
        self.interpreter.pipe.copy(self.data_pipe_full, self.pipe_name_dict[model], bundle_to=self.pipe_bundle)
        self.interpreter.pipe.switch(self.pipe_name_dict[model])

        # Select the Frame Order model.
        self.interpreter.frame_order.select_model(model=model)

        # Split grid search if translation is active.
        if cdp.ave_pos_translation:
            # Printout.
            print("\n\nTranslation active - splitting the grid search and iterating.")

            # Loop twice.
            for i in range(2):
                # First optimise the rotation.
                self.interpreter.grid_search(inc=[None, None, None, self.grid_inc_rigid, self.grid_inc_rigid, self.grid_inc_rigid], constraints=False)

                # Then the translation.
                self.interpreter.grid_search(inc=[self.grid_inc_rigid, self.grid_inc_rigid, self.grid_inc_rigid, None, None, None], constraints=False)

        # Standard grid search.
        else:
            self.interpreter.grid_search(inc=self.grid_inc_rigid, constraints=False)

        # Minimise.
        self.interpreter.minimise(self.min_algor, constraints=False)

        # Results printout.
        self.print_results()

        # Save the results.
        self.interpreter.results.write(dir=self.results_dir+model, force=True)

        # The PDB representation of the model.
        self.interpreter.frame_order.pdb_model(dir=self.results_dir+model, force=True)
Esempio n. 7
0
def error_analysis_per_field():
    """Perform an error analysis of the peak intensities for each field strength separately."""

    # Printout.
    section(file=sys.stdout,
            text="Automatic Error analysis per field strength",
            prespace=2)

    # Handle missing frequency data.
    frqs = [None]
    if hasattr(cdp, 'spectrometer_frq_list'):
        frqs = cdp.spectrometer_frq_list

    # Loop over the spectrometer frequencies.
    for frq in frqs:
        # Generate a list of spectrum IDs matching the frequency.
        ids = []
        for id in cdp.spectrum_ids:
            # Check that the spectrometer frequency matches.
            match_frq = True
            if frq != None and cdp.spectrometer_frq[id] != frq:
                match_frq = False

            # Add the ID.
            if match_frq:
                ids.append(id)

        # Run the error analysis on the subset.
        print(
            "For field strength %.8f MHz, subset ids for spectrum.error_analysis is: %s"
            % (frq / 1e6, ids))
        error_analysis(subset=ids)
    def summary(self):
        """Print out a summary of the relax test suite."""

        # Title.
        title(file=sys.stdout, text="Summary of the relax test suite")

        # The skipped tests.
        self.summary_skipped()

        # Subtitle.
        section(file=sys.stdout, text="Synopsis")

        # System/functional test summary.
        if hasattr(self, 'system_result'):
            summary_line("System/functional tests", self.system_result)

        # Unit test summary.
        if hasattr(self, 'unit_result'):
            summary_line("Unit tests", self.unit_result)

        # GUI test summary.
        if hasattr(self, 'gui_result'):
            summary_line("GUI tests", self.gui_result)

        # Synopsis.
        if hasattr(self, 'system_result') and hasattr(self, 'unit_result') and hasattr(self, 'gui_result'):
            if self.gui_result == "skip":
                status = self.system_result and self.unit_result
            else:
                status = self.system_result and self.unit_result and self.gui_result
            summary_line("Synopsis", status)

        # End.
        print('\n\n')
Esempio n. 9
0
    def check_vars(self):
        """Check that the user has set the variables correctly."""

        # Printout.
        section(file=sys.stdout, text="Variable checking", prespace=2)

        # The pipe name.
        if not has_pipe(self.pipe_name):
            raise RelaxNoPipeError(self.pipe_name)

        # Check the model selection.
        allowed = ['AIC', 'AICc', 'BIC']
        if self.modsel not in allowed:
            raise RelaxError(
                "The model selection technique '%s' is not in the allowed list of %s."
                % (self.modsel, allowed))

        # Some warning for the user if the pure numeric solution is selected.
        if self.numeric_only:
            # Loop over all models.
            for model in self.models:
                # Skip the models used for nesting.
                if model in MODEL_LIST_NEST:
                    continue

                # Warnings for all other analytic models.
                if model in MODEL_LIST_ANALYTIC:
                    warn(
                        RelaxWarning(
                            "The analytic model '%s' will be optimised but will not be used in any way in this numeric model only auto-analysis."
                            % model))

        # Printout.
        print("The dispersion auto-analysis variables are OK.")
Esempio n. 10
0
    def summary(self):
        """Print out a summary of the relax test suite."""

        # Title.
        title(file=sys.stdout, text="Summary of the relax test suite")

        # The skipped tests.
        if status.skip_blacklisted_tests:
            self.summary_skipped()

        # Subtitle.
        section(file=sys.stdout, text="Synopsis")

        # System/functional test summary.
        if hasattr(self, 'system_result'):
            summary_line("System/functional tests",
                         self.system_result,
                         width=status.text_width)

        # Unit test summary.
        if hasattr(self, 'unit_result'):
            summary_line("Unit tests",
                         self.unit_result,
                         width=status.text_width)

        # GUI test summary.
        if hasattr(self, 'gui_result'):
            summary_line("GUI tests", self.gui_result, width=status.text_width)

        # Verification test summary.
        if hasattr(self, 'verification_result'):
            summary_line("Software verification tests",
                         self.verification_result,
                         width=status.text_width)

        # Synopsis.
        if hasattr(self, 'system_result') and hasattr(
                self,
                'unit_result') and hasattr(self, 'gui_result') and hasattr(
                    self, 'verification_result'):
            if self.gui_result == "skip":
                test_status = self.system_result and self.unit_result and self.verification_result
            else:
                test_status = self.system_result and self.unit_result and self.gui_result and self.verification_result
            summary_line("Synopsis", test_status, width=status.text_width)

        # End.
        print('\n\n')
Esempio n. 11
0
    def optimise(self, model=None):
        """Optimise the model, taking model nesting into account.

        @keyword model: The model to be optimised.
        @type model:    str
        """

        # Printout. 
        section(file=sys.stdout, text="Optimisation", prespace=2)

        # Deselect insignificant spins.
        if model not in ['R2eff', 'No Rex']:
            self.interpreter.relax_disp.insignificance(level=self.insignificance)

        # Use pre-run results as the optimisation starting point.
        if self.pre_run_dir:
            self.pre_run_parameters(model=model)

        # Otherwise use the normal nesting check and grid search if not nested.
        else:
            # Nested model simplification.
            nested = self.nesting(model=model)

            # Grid search.
            if not nested:
                self.interpreter.grid_search(inc=self.grid_inc)

        # Minimise.
        self.interpreter.minimise('simplex', func_tol=self.opt_func_tol, max_iter=self.opt_max_iterations, constraints=True)

        # Model elimination.
        if self.eliminate:
            self.interpreter.eliminate()

        # Monte Carlo simulations.
        if self.mc_sim_all_models or len(self.models) < 2 or model == 'R2eff':
            self.interpreter.monte_carlo.setup(number=self.mc_sim_num)
            self.interpreter.monte_carlo.create_data()
            self.interpreter.monte_carlo.initial_values()
            self.interpreter.minimise('simplex', func_tol=self.opt_func_tol, max_iter=self.opt_max_iterations, constraints=True)
            if self.eliminate:
                self.interpreter.eliminate()
            self.interpreter.monte_carlo.error_analysis()
Esempio n. 12
0
    def error_analysis(self):
        """Perform an error analysis of the peak intensities for each field strength separately."""

        # Printout.
        section(file=sys.stdout, text="Error analysis", prespace=2)

        # Check if intensity errors have already been calculated by the user.
        precalc = True
        for spin in spin_loop(skip_desel=True):
            # No structure.
            if not hasattr(spin, 'intensity_err'):
                precalc = False
                break

            # Determine if a spectrum ID is missing from the list.
            for id in cdp.spectrum_ids:
                if id not in spin.intensity_err:
                    precalc = False
                    break

        # Skip.
        if precalc:
            print("Skipping the error analysis as it has already been performed.")
            return

        # Loop over the spectrometer frequencies.
        for frq in loop_frq():
            # Generate a list of spectrum IDs matching the frequency.
            ids = []
            for id in cdp.spectrum_ids:
                # Check that the spectrometer frequency matches.
                match_frq = True
                if frq != None and cdp.spectrometer_frq[id] != frq:
                    match_frq = False

                # Add the ID.
                if match_frq:
                    ids.append(id)

            # Run the error analysis on the subset.
            self.interpreter.spectrum.error_analysis(subset=ids)
Esempio n. 13
0
    def test_section(self):
        """Test of the lib.text.sectioning.section() function."""

        # Write out the section.
        file = DummyFileObject()
        section(file=file, text='Test section')

        # Read the results.
        lines = file.readlines()
        print("Formatted section lines:  %s" % lines)

        # Check the title.
        real_lines = [
            '\n',
            '\n',
            'Test section\n',
            '============\n',
            '\n',
        ]
        self.assertEqual(len(lines), len(real_lines))
        for i in range(len(lines)):
            self.assertEqual(lines[i], real_lines[i])
Esempio n. 14
0
    def check_numpy_less_1_8_and_numerical_model(self):
        """Check for numerical model using numpy version under 1.8.  This will result in slow "for loop" calculation through data, making the analysis 5-6 times slower."""

        # Some warning for the user if the pure numeric solution is selected.
        if float(version.version[:3]) < 1.8:
            # Store which models are in numeric.
            models = []

            # Loop through models.
            for model in self.models:
                if model in MODEL_LIST_NUMERIC:
                    models.append(model)

            # Write system message if numerical models is present and numpy version is below 1.8.
            if len(models) > 0:
                # Printout.
                section(file=sys.stdout, text="Numpy version checking for numerical models.", prespace=2)
                warn(RelaxWarning("Your version of numpy is %s, and below the recommended version of 1.8 for numerical models." % (version.version)))
                warn(RelaxWarning("Please consider upgrading your numpy version to 1.8."))

                # Loop over models.
                for model in models:
                    warn(RelaxWarning("This could make the numerical analysis with model '%s', 5 to 6 times slower." % (model)))
Esempio n. 15
0
def signal_noise_ratio(verbose=True):
    """Calculate the signal to noise ratio per spin.

    @keyword verbose:       A flag which if True will print additional information out.
    @type verbose:          bool
    """

    # Tests.
    check_pipe()
    check_mol_res_spin_data()

    # Test if spectra have been loaded.
    if not hasattr(cdp, 'spectrum_ids'):
        raise RelaxError("No spectra have been loaded.")

    # Possible print.
    if verbose:
        print("\nThe following signal to noise ratios has been calculated:\n")

    # Set the spin specific signal to noise ratio.
    for spin, spin_id in spin_loop(return_id=True):
        # Skip deselected spins.
        if not spin.select:
            continue

        # Skip spins missing intensity data.
        if not hasattr(spin, 'peak_intensity'):
            continue

        # Test if error analysis has been performed.
        if not hasattr(spin, 'peak_intensity_err'):
            raise RelaxError(
                "Intensity error analysis has not been performed.  Please see spectrum.error_analysis()."
            )

        # If necessary, create the dictionary.
        if not hasattr(spin, 'sn_ratio'):
            spin.sn_ratio = {}

        # Loop over the ID.
        ids = []
        for id in spin.peak_intensity:
            # Append the ID to the list.
            ids.append(id)

            # Calculate the sn_ratio.
            pint = float(spin.peak_intensity[id])
            pint_err = float(spin.peak_intensity_err[id])
            sn_ratio = pint / pint_err

            # Assign the sn_ratio.
            spin.sn_ratio[id] = sn_ratio

        # Sort the ids alphanumeric.
        ids = sort_filenames(filenames=ids, rev=False)

        # Collect the data under sorted ids.
        data_i = []
        for id in ids:
            # Get the values.
            pint = spin.peak_intensity[id]
            pint_err = spin.peak_intensity_err[id]
            sn_ratio = spin.sn_ratio[id]

            # Store the data.
            data_i.append([id, repr(pint), repr(pint_err), repr(sn_ratio)])

        if verbose:
            section(file=sys.stdout,
                    text="Signal to noise ratio for spin ID '%s'" % spin_id,
                    prespace=1)
            write_data(out=sys.stdout,
                       headings=["Spectrum ID", "Signal", "Noise", "S/N"],
                       data=data_i)
Esempio n. 16
0
    def nested_models(self):
        """Protocol for the nested optimisation of the frame order models."""

        # First optimise the rigid model using all data.
        self.optimise_rigid()

        # Iteratively optimise the frame order models.
        for model in self.models:
            # Skip the already optimised rigid model.
            if model == 'rigid':
                continue

            # The model title.
            title = model[0].upper() + model[1:]

            # Printout.
            section(file=sys.stdout, text="%s frame order model"%title, prespace=5)

            # The data pipe name.
            self.pipe_name_dict[model] = '%s - %s' % (title, self.pipe_bundle)
            self.pipe_name_list.append(self.pipe_name_dict[model])

            # The results file already exists, so read its contents instead.
            if self.read_results(model=model, pipe_name=self.pipe_name_dict[model]):
                # Re-perform model elimination just in case.
                self.interpreter.eliminate()

                # The PDB representation of the model and visualisation script (in case this was not completed correctly).
                self.visualisation(model=model)

                # Skip to the next model.
                continue

            # Create the data pipe using the full data set, and switch to it.
            self.interpreter.pipe.copy(self.data_pipe_subset, self.pipe_name_dict[model], bundle_to=self.pipe_bundle)
            self.interpreter.pipe.switch(self.pipe_name_dict[model])

            # Select the Frame Order model.
            self.interpreter.frame_order.select_model(model=model)

            # Copy nested parameters.
            self.nested_params(model)

            # The optimisation settings.
            self.interpreter.frame_order.num_int_pts(num=self.num_int_pts_grid)
            self.interpreter.frame_order.quad_int(flag=False)

            # Grid search.
            incs = self.custom_grid_incs(model)
            self.interpreter.grid_search(inc=incs, constraints=False)

            # Minimise (for the PCS data subset and full RDC set).
            for i in range(len(self.num_int_pts_subset)):
                self.interpreter.frame_order.num_int_pts(num=self.num_int_pts_subset[i])
                self.interpreter.minimise(self.min_algor, func_tol=self.func_tol_subset[i], constraints=False)

            # Copy the PCS data.
            self.interpreter.pcs.copy(pipe_from=self.data_pipe_full, pipe_to=self.pipe_name_dict[model])

            # Minimise (for the full data set).
            for i in range(len(self.num_int_pts_full)):
                self.interpreter.frame_order.num_int_pts(num=self.num_int_pts_full[i])
                self.interpreter.minimise(self.min_algor, func_tol=self.func_tol_full[i], constraints=False)

            # Results printout.
            self.print_results()

            # Model elimination.
            self.interpreter.eliminate()

            # Save the results.
            self.interpreter.results.write(dir=self.results_dir+model, force=True)

            # The PDB representation of the model and visualisation script.
            self.visualisation(model=model)
Esempio n. 17
0
def sn_ratio_deselection(ratio=10.0,
                         operation='<',
                         all_sn=False,
                         select=False,
                         verbose=True):
    """Use user function deselect.spin on spins with signal to noise ratio higher or lower than ratio.  The operation determines the selection operation.

    @keyword ratio:         The ratio to compare to.
    @type ratio:            float
    @keyword operation:     The comparison operation by which to select the spins.  Of the operation(sn_ratio, ratio), where operation can either be:  '<', '<=', '>', '>=', '==', '!='.
    @type operation:        str
    @keyword all_sn:        A flag specifying if all the signal to noise ratios per spin should match the comparison operator, of if just a single comparison match is enough.
    @type all_sn:           bool
    @keyword select:        A flag specifying if the user function select.spin should be used instead.
    @type select:           bool
    @keyword verbose:       A flag which if True will print additional information out.
    @type verbose:          bool
    """

    # Tests.
    check_pipe()
    check_mol_res_spin_data()

    # Test if spectra have been loaded.
    if not hasattr(cdp, 'spectrum_ids'):
        raise RelaxError("No spectra have been loaded.")

    # Assign the comparison operator.
    # "'<' : strictly less than"
    if operation == '<':
        op = operator.lt

    # "'<=' : less than or equal"
    elif operation == '<=':
        op = operator.le

    # "'>' : strictly greater than"
    elif operation == '>':
        op = operator.gt

    # "'>=' : greater than or equal"
    elif operation == '>=':
        op = operator.ge

    # "'==' : equal"
    elif operation == '==':
        op = operator.eq

    # "'!=' : not equal",
    elif operation == '!=':
        op = operator.ne

    # If not assigned, raise error.
    else:
        raise RelaxError(
            "The compare operation does not belong to the allowed list of methods: ['<', '<=', '>', '>=', '==', '!=']"
        )

    # Assign text for print out.
    if all_sn:
        text_all_sn = "all"
    else:
        text_all_sn = "any"

    if select:
        text_sel = "selected"
        sel_func = sel_spin
    else:
        text_sel = "deselected"
        sel_func = desel_spin

    # Print
    section(file=sys.stdout,
            text="Signal to noise ratio comparison selection",
            prespace=1,
            postspace=0)
    print("For the comparion test: S/N %s %1.1f" % (operation, ratio))

    # Loop over the spins.
    spin_ids = []
    for spin, spin_id in spin_loop(return_id=True):
        # Skip spins missing sn_ratio.
        if not hasattr(spin, 'sn_ratio'):
            # Skip warning for deselected spins.
            if spin.select:
                warn(
                    RelaxWarning(
                        "Spin '%s' does not contain Signal to Noise calculations. Perform the user function 'spectrum.sn_ratio'. This spin is skipped."
                        % spin_id))
            continue

        # Loop over the ID, collect and sort.
        ids = []
        for id in spin.peak_intensity:
            # Append the ID to the list.
            ids.append(id)

        # Sort the ids alphanumeric.
        ids = sort_filenames(filenames=ids, rev=False)

        # Loop over the sorted ids.
        sn_val = []
        for id in ids:
            # Append the Signal to Noise in the list.
            sn_val.append(spin.sn_ratio[id])

        # Convert the list to array.
        sn_val = asarray(sn_val)

        # Make the comparison for the whole array.
        test_arr = op(sn_val, ratio)

        # Determine how the test should evaluate.
        if all_sn:
            test = test_arr.all()
        else:
            test = test_arr.any()

        # Make an numpy array for the ids, an extract id which failed the test.
        ids_arr = asarray(ids)
        ids_test_arr = ids_arr[test_arr]

        # Make inversion of bool
        test_arr_inv = test_arr == False
        ids_test_arr_inv = ids_arr[test_arr_inv]

        # print
        if verbose:
            subsection(
                file=sys.stdout,
                text="Signal to noise ratio comparison for spin ID '%s'" %
                spin_id,
                prespace=1,
                postspace=0)
            print("Following spectra ID evaluated to True: %s" % ids_test_arr)
            print("Following spectra ID evaluated to False: %s" %
                  ids_test_arr_inv)
            print(
                "'%s' comparisons have been used for evaluation, which evaluated to: %s"
                % (text_all_sn, test))
            if test:
                print("The spin ID '%s' is %s" % (spin_id, text_sel))
            else:
                print("The spin ID '%s' is skipped" % spin_id)

        # If the test evaluates to True, then do selection action.
        if test:
            # Select/Deselect the spin.
            sel_func(spin_id=spin_id)

            # Assign spin_id to list, for printing.
            spin_ids.append(spin_id)

    # Make summary
    if verbose:
        if len(spin_ids) > 0:
            subsection(
                file=sys.stdout,
                text=
                "For all of the S/N comparion test, the following spin ID's was %s"
                % text_sel,
                prespace=1,
                postspace=0)
            print(spin_ids)
Esempio n. 18
0
    def summary_skipped(self):
        """Print out information about skipped tests.""" 

        # Counts.
        system_count = {}
        unit_count = {}
        gui_count = {}
        verification_count = {}
        for i in range(len(status.skipped_tests)):
            # Alias.
            test = status.skipped_tests[i]

            # Skip all skipped tests whereby the module is set to None to indicate that the test skipping should not be reported.
            if test[1] == None:
                continue

            # Initialise in needed.
            if not test[1] in system_count:
                system_count[test[1]] = 0
                unit_count[test[1]] = 0
                gui_count[test[1]] = 0
                verification_count[test[1]] = 0

            # A system test.
            if test[2] == 'system':
                system_count[test[1]] += 1

            # A unit test.
            if test[2] == 'unit':
                unit_count[test[1]] += 1

            # A GUI test.
            if test[2] == 'gui':
                gui_count[test[1]] += 1

            # A verification test.
            if test[2] == 'verification':
                verification_count[test[1]] += 1

        # The missing modules.
        missing_modules = sorted(system_count.keys())
        section(file=sys.stdout, text="Optional packages/modules")

        # Nothing missing.
        if not missing_modules:
            # Except for the wx module!
            if not dep_check.wx_module and hasattr(self, 'gui_result'):
                print("All GUI tests skipped due to the missing wxPython module, no other tests skipped due to missing modules.\n")

            # Normal printout.
            else:
                print("No tests skipped due to missing modules.\n")

            # The skip the table.
            return

        # Header.
        print("Tests skipped due to missing optional packages/modules/software:\n")
        header = "%-33s" % "Module/package/software" 
        if len(system_count):
            header = "%s %20s" % (header, "System test count")
        if len(unit_count):
            header = "%s %20s" % (header, "Unit test count")
        if len(gui_count):
            header = "%s %20s" % (header, "GUI test count")
        if len(verification_count):
            header = "%s %20s" % (header, "Verification test count")
        print('-'*len(header))
        print(header)
        print('-'*len(header))

        # The table.
        for module in missing_modules:
            text = "%-33s" % module
            if len(system_count):
                text = "%s %20s" % (text, system_count[module])
            if len(unit_count):
                text = "%s %20s" % (text, unit_count[module])
            if len(gui_count):
                text = "%s %20s" % (text, gui_count[module])
            if len(verification_count):
                text = "%s %20s" % (text, verification_count[module])
            print(text)

        # End the table.
        print('-'*len(header))
        print("\n")
Esempio n. 19
0
def sn_ratio_deselection(ratio=10.0, operation='<', all_sn=False, select=False, verbose=True):
    """Use user function deselect.spin on spins with signal to noise ratio higher or lower than ratio.  The operation determines the selection operation.

    @keyword ratio:         The ratio to compare to.
    @type ratio:            float
    @keyword operation:     The comparison operation by which to select the spins.  Of the operation(sn_ratio, ratio), where operation can either be:  '<', '<=', '>', '>=', '==', '!='.
    @type operation:        str
    @keyword all_sn:        A flag specifying if all the signal to noise ratios per spin should match the comparison operator, of if just a single comparison match is enough.
    @type all_sn:           bool
    @keyword select:        A flag specifying if the user function select.spin should be used instead.
    @type select:           bool
    @keyword verbose:       A flag which if True will print additional information out.
    @type verbose:          bool
    """

    # Tests.
    check_pipe()
    check_mol_res_spin_data()

    # Test if spectra have been loaded.
    if not hasattr(cdp, 'spectrum_ids'):
        raise RelaxError("No spectra have been loaded.")

    # Assign the comparison operator.
    # "'<' : strictly less than"
    if operation == '<':
        op = operator.lt

    # "'<=' : less than or equal"
    elif operation == '<=':
        op = operator.le

    # "'>' : strictly greater than"
    elif operation == '>':
        op = operator.gt

    # "'>=' : greater than or equal"
    elif operation == '>=':
        op = operator.ge

    # "'==' : equal"
    elif operation == '==':
        op = operator.eq

    # "'!=' : not equal",
    elif operation == '!=':
        op = operator.ne

    # If not assigned, raise error.
    else:
        raise RelaxError("The compare operation does not belong to the allowed list of methods: ['<', '<=', '>', '>=', '==', '!=']")

    # Assign text for print out.
    if all_sn:
        text_all_sn = "all"
    else:
        text_all_sn = "any"

    if select:
        text_sel = "selected"
        sel_func = sel_spin
    else:
        text_sel = "deselected"
        sel_func = desel_spin

    # Print
    section(file=sys.stdout, text="Signal to noise ratio comparison selection", prespace=1, postspace=0)
    print("For the comparion test: S/N %s %1.1f"%(operation, ratio))

    # Loop over the spins.
    spin_ids = []
    for spin, spin_id in spin_loop(return_id=True):
        # Skip spins missing sn_ratio.
        if not hasattr(spin, 'sn_ratio'):
            # Skip warning for deselected spins.
            if spin.select:
                warn(RelaxWarning("Spin '%s' does not contain Signal to Noise calculations. Perform the user function 'spectrum.sn_ratio'. This spin is skipped." % spin_id))
            continue

        # Loop over the ID, collect and sort.
        ids = []
        for id in spin.peak_intensity:
            # Append the ID to the list.
            ids.append(id)

        # Sort the ids alphanumeric.
        ids = sort_filenames(filenames=ids, rev=False)

        # Loop over the sorted ids.
        sn_val = []
        for id in ids:
            # Append the Signal to Noise in the list.
            sn_val.append(spin.sn_ratio[id])

        # Convert the list to array.
        sn_val = asarray(sn_val)

        # Make the comparison for the whole array.
        test_arr = op(sn_val, ratio)

        # Determine how the test should evaluate.
        if all_sn:
            test = test_arr.all()
        else:
            test = test_arr.any()

        # Make an numpy array for the ids, an extract id which failed the test.
        ids_arr = asarray(ids)
        ids_test_arr = ids_arr[test_arr]

        # Make inversion of bool
        test_arr_inv = test_arr == False
        ids_test_arr_inv = ids_arr[test_arr_inv]

        # print
        if verbose:
            subsection(file=sys.stdout, text="Signal to noise ratio comparison for spin ID '%s'"%spin_id, prespace=1, postspace=0)
            print("Following spectra ID evaluated to True: %s"%ids_test_arr)
            print("Following spectra ID evaluated to False: %s"%ids_test_arr_inv)
            print("'%s' comparisons have been used for evaluation, which evaluated to: %s"%(text_all_sn, test))
            if test:
                print("The spin ID '%s' is %s"%(spin_id, text_sel))
            else:
                print("The spin ID '%s' is skipped"%spin_id)

        # If the test evaluates to True, then do selection action.
        if test:
            # Select/Deselect the spin.
            sel_func(spin_id=spin_id)

            # Assign spin_id to list, for printing.
            spin_ids.append(spin_id)

    # Make summary
    if verbose:
        if len(spin_ids) > 0:
            subsection(file=sys.stdout, text="For all of the S/N comparion test, the following spin ID's was %s"%text_sel, prespace=1, postspace=0)
            print(spin_ids)
Esempio n. 20
0
def signal_noise_ratio(verbose=True):
    """Calculate the signal to noise ratio per spin.

    @keyword verbose:       A flag which if True will print additional information out.
    @type verbose:          bool
    """

    # Tests.
    check_pipe()
    check_mol_res_spin_data()

    # Test if spectra have been loaded.
    if not hasattr(cdp, 'spectrum_ids'):
        raise RelaxError("No spectra have been loaded.")

    # Possible print.
    if verbose:
        print("\nThe following signal to noise ratios has been calculated:\n")

    # Set the spin specific signal to noise ratio.
    for spin, spin_id in spin_loop(return_id=True):
        # Skip deselected spins.
        if not spin.select:
            continue

        # Skip spins missing intensity data.
        if not hasattr(spin, 'peak_intensity'):
            continue

        # Test if error analysis has been performed.
        if not hasattr(spin, 'peak_intensity_err'):
            raise RelaxError("Intensity error analysis has not been performed.  Please see spectrum.error_analysis().")

        # If necessary, create the dictionary.
        if not hasattr(spin, 'sn_ratio'):
            spin.sn_ratio = {}

        # Loop over the ID.
        ids = []
        for id in spin.peak_intensity:
            # Append the ID to the list.
            ids.append(id)

            # Calculate the sn_ratio.
            pint = float(spin.peak_intensity[id])
            pint_err = float(spin.peak_intensity_err[id])
            sn_ratio = pint / pint_err

            # Assign the sn_ratio.
            spin.sn_ratio[id] = sn_ratio

        # Sort the ids alphanumeric.
        ids = sort_filenames(filenames=ids, rev=False)

        # Collect the data under sorted ids.
        data_i = []
        for id in ids:
            # Get the values.
            pint = spin.peak_intensity[id]
            pint_err = spin.peak_intensity_err[id]
            sn_ratio = spin.sn_ratio[id]

            # Store the data.
            data_i.append([id, repr(pint), repr(pint_err), repr(sn_ratio)])

        if verbose:
            section(file=sys.stdout, text="Signal to noise ratio for spin ID '%s'"%spin_id, prespace=1)
            write_data(out=sys.stdout, headings=["Spectrum ID", "Signal", "Noise", "S/N"], data=data_i)
Esempio n. 21
0
    def write_results(self, path=None, model=None):
        """Create a set of results, text and Grace files for the current data pipe.

        @keyword path:  The directory to place the files into.
        @type path:     str
        """

        # Printout.
        section(file=sys.stdout, text="Results writing", prespace=2)

        # If this is the final model selection round, check which models have been tested.
        if model == None:
            models_tested = []
            for spin, spin_id in spin_loop(return_id=True, skip_desel=True):
                spin_model = spin.model

                # Add to list, if not in already.
                if spin_model not in models_tested:
                    models_tested.append(spin_model)
        else:
            models_tested = None

        # Special for R2eff model.
        if model == MODEL_R2EFF:
            # The R2eff parameter.
            self.interpreter.value.write(param='r2eff', file='r2eff.out', dir=path, force=True)
            self.interpreter.grace.write(x_data_type='res_num', y_data_type='r2eff', file='r2eff.agr', dir=path, force=True)

            # Exponential curves.
            if has_exponential_exp_type():
                self.interpreter.relax_disp.plot_exp_curves(file='intensities.agr', dir=path, force=True)    # Average peak intensities.
                self.interpreter.relax_disp.plot_exp_curves(file='intensities_norm.agr', dir=path, force=True, norm=True)    # Average peak intensities (normalised).

                # The I0 parameter.
                self.interpreter.value.write(param='i0', file='i0.out', dir=path, force=True)
                self.interpreter.grace.write(x_data_type='res_num', y_data_type='i0', file='i0.agr', dir=path, force=True)

        # Dispersion curves.
        self.interpreter.relax_disp.plot_disp_curves(dir=path, force=True)
        self.interpreter.relax_disp.write_disp_curves(dir=path, force=True)

        # The selected models for the final run.
        if model == None:
            self.interpreter.value.write(param='model', file='model.out', dir=path, force=True)

        # For CPMG models.
        if has_cpmg_exp_type():
            # The R20 parameter.
            self.write_results_test(path=path, model=model, models_tested=models_tested, param='r2', file_name_ini='r20')

            # The R20A and R20B parameters.
            self.write_results_test(path=path, model=model, models_tested=models_tested, param='r2a', file_name_ini='r20a')
            self.write_results_test(path=path, model=model, models_tested=models_tested, param='r2b', file_name_ini='r20b')

        # For R1ho models.
        if has_r1rho_exp_type():
            # The R1 parameter.
            self.write_results_test(path=path, model=model, models_tested=models_tested, param='r1')

            # The R1rho prime parameter.
            self.write_results_test(path=path, model=model, models_tested=models_tested, param='r2', file_name_ini='r1rho_prime')

            # Plot specific R1rho graphs.
            if model in [None] + MODEL_LIST_R1RHO:
                self.interpreter.relax_disp.plot_disp_curves(dir=path, x_axis=X_AXIS_THETA, force=True)
                self.interpreter.relax_disp.plot_disp_curves(dir=path, y_axis=Y_AXIS_R2_R1RHO, x_axis=X_AXIS_W_EFF, force=True)
                self.interpreter.relax_disp.plot_disp_curves(dir=path, y_axis=Y_AXIS_R2_EFF, x_axis=X_AXIS_THETA, interpolate=INTERPOLATE_OFFSET, force=True)

            # The calculation of theta and w_eff parameter in R1rho experiments.
            if model in MODEL_LIST_R1RHO_FULL:
                self.interpreter.value.write(param='theta', file='theta.out', dir=path, force=True)
                self.interpreter.value.write(param='w_eff', file='w_eff.out', dir=path, force=True)

        # The pA and pB parameters.
        self.write_results_test(path=path, model=model, models_tested=models_tested, param='pA')
        self.write_results_test(path=path, model=model, models_tested=models_tested, param='pB')

        # The pC parameter.
        self.write_results_test(path=path, model=model, models_tested=models_tested, param='pC')

        # The phi_ex parameter.
        self.write_results_test(path=path, model=model, models_tested=models_tested, param='phi_ex')

        # The phi_ex_B nd phi_ex_C parameters.
        self.write_results_test(path=path, model=model, models_tested=models_tested, param='phi_ex_B')
        self.write_results_test(path=path, model=model, models_tested=models_tested, param='phi_ex_C')

        # The dw parameter.
        self.write_results_test(path=path, model=model, models_tested=models_tested, param='dw')

        # The dw_AB, dw_BC and dw_AC parameter.
        self.write_results_test(path=path, model=model, models_tested=models_tested, param='dw_AB')
        self.write_results_test(path=path, model=model, models_tested=models_tested, param='dw_BC')
        self.write_results_test(path=path, model=model, models_tested=models_tested, param='dw_AC')

        # The dwH parameter.
        self.write_results_test(path=path, model=model, models_tested=models_tested, param='dwH')

        # The dwH_AB, dwH_BC and dwH_AC parameter.
        self.write_results_test(path=path, model=model, models_tested=models_tested, param='dwH_AB')
        self.write_results_test(path=path, model=model, models_tested=models_tested, param='dwH_BC')
        self.write_results_test(path=path, model=model, models_tested=models_tested, param='dwH_AC')

        # The k_AB, kex and tex parameters.
        self.write_results_test(path=path, model=model, models_tested=models_tested, param='k_AB')
        self.write_results_test(path=path, model=model, models_tested=models_tested, param='kex')
        self.write_results_test(path=path, model=model, models_tested=models_tested, param='tex')

        # The kex_AB, kex_BC, kex_AC parameters.
        self.write_results_test(path=path, model=model, models_tested=models_tested, param='kex_AB')
        self.write_results_test(path=path, model=model, models_tested=models_tested, param='kex_BC')
        self.write_results_test(path=path, model=model, models_tested=models_tested, param='kex_AC')

        # The kB and kC parameters.
        self.write_results_test(path=path, model=model, models_tested=models_tested, param='kB')
        self.write_results_test(path=path, model=model, models_tested=models_tested, param='kC')

        # Minimisation statistics.
        if not (model == MODEL_R2EFF and has_fixed_time_exp_type()):
            self.interpreter.value.write(param='chi2', file='chi2.out', dir=path, force=True)
            self.interpreter.grace.write(y_data_type='chi2', file='chi2.agr', dir=path, force=True)

        # Finally save the results.  This is last to allow the continuation of an interrupted analysis while ensuring that all results files have been created.
        self.interpreter.results.write(file='results', dir=path, force=True)
Esempio n. 22
0
value.set(param='cone_theta', val=0.8)

# Fix the true pivot point.
frame_order.pivot([ 37.254, 0.5, 16.7465], fix=True)

# Change the model.
frame_order.select_model('iso cone')

# Loop over the 2 permutations.
pipe_name = 'iso cone'
tag = ''
for perm in [None, 'A']:
    # The original permutation.
    if perm == None:
        # Title printout.
        section(file=sys.stdout, text="Isotropic cone original permutation")

        # Create a new data base data pipe for the iso cone.
        pipe.copy(pipe_from='frame order', pipe_to='iso cone')
        pipe.switch(pipe_name='iso cone')

    # Operations for the 'A' and 'B' permutations.
    else:
        # Title printout.
        section(file=sys.stdout, text="Isotropic cone permutation %s" % perm)

        # The pipe name and tag.
        pipe_name = 'iso cone perm %s' % perm
        tag = '_perm_%s' % perm

        # Create a new data pipe.
Esempio n. 23
0
value.set(param='cone_theta_y', val=0.6)

# Fix the true pivot point.
frame_order.pivot([ 37.254, 0.5, 16.7465], fix=True)

# Change the model.
frame_order.select_model('pseudo-ellipse')

# Loop over the 3 permutations.
pipe_name = 'pseudo-ellipse'
tag = ''
for perm in [None, 'A', 'B']:
    # The original permutation.
    if perm == None:
        # Title printout.
        section(file=sys.stdout, text="Pseudo-ellipse original permutation")

        # Create a new data base data pipe for the pseudo-ellipse.
        pipe.copy(pipe_from='frame order', pipe_to='pseudo-ellipse')
        pipe.switch(pipe_name='pseudo-ellipse')

    # Operations for the 'A' and 'B' permutations.
    else:
        # Title printout.
        section(file=sys.stdout, text="Pseudo-ellipse permutation %s" % perm)

        # The pipe name and tag.
        pipe_name = 'pseudo-ellipse perm %s' % perm
        tag = '_perm_%s' % perm

        # Create a new data pipe.
Esempio n. 24
0
    def error_analysis(self):
        """Perform an error analysis of the peak intensities."""

        # Printout.
        section(file=sys.stdout, text="Error analysis", prespace=2)

        # Check if intensity errors have already been calculated by the user.
        precalc = True
        for spin in spin_loop(skip_desel=True):
            # No structure.
            if not hasattr(spin, 'peak_intensity_err'):
                precalc = False
                break

            # Determine if a spectrum ID is missing from the list.
            for id in cdp.spectrum_ids:
                if id not in spin.peak_intensity_err:
                    precalc = False
                    break

        # Skip.
        if precalc:
            print(
                "Skipping the error analysis as it has already been performed."
            )
            return

        # Check if there is replicates, and the user has not specified them.

        # Set flag for dublicates.
        has_dub = False

        if not hasattr(cdp, 'replicates'):
            # Collect all times, and matching spectrum ID.
            all_times = []
            all_id = []
            for spectrum_id in cdp.relax_times:
                all_times.append(cdp.relax_times[spectrum_id])
                all_id.append(spectrum_id)

            # Get the dublicates.
            dublicates = [
                (val,
                 [i for i in range(len(all_times)) if all_times[i] == val])
                for val in all_times
            ]

            # Loop over the list of the mapping of times and duplications.
            list_dub_mapping = []
            for i, dub in enumerate(dublicates):
                # Get current spectum id.
                cur_spectrum_id = all_id[i]

                # Get the tuple of time and indexes of duplications.
                time, list_index_occur = dub

                # Collect mapping of index to id.
                id_list = []
                if len(list_index_occur) > 1:
                    # There exist dublications.
                    has_dub = True

                    for list_index in list_index_occur:
                        id_list.append(all_id[list_index])

                # Store to list
                list_dub_mapping.append((cur_spectrum_id, id_list))

        # If there is dublication, then assign them.
        if has_dub:
            # Assign dublicates.
            for spectrum_id, dub_pair in list_dub_mapping:
                if len(dub_pair) > 0:
                    self.interpreter.spectrum.replicated(spectrum_ids=dub_pair)

        # Run the error analysis.
        self.interpreter.spectrum.error_analysis()
Esempio n. 25
0
value.set(param='cone_theta', val=0.8)

# Fix the true pivot point.
frame_order.pivot([37.254, 0.5, 16.7465], fix=True)

# Change the model.
frame_order.select_model('iso cone')

# Loop over the 2 permutations.
pipe_name = 'iso cone'
tag = ''
for perm in [None, 'A']:
    # The original permutation.
    if perm == None:
        # Title printout.
        section(file=sys.stdout, text="Isotropic cone original permutation")

        # Create a new data base data pipe for the iso cone.
        pipe.copy(pipe_from='frame order', pipe_to='iso cone')
        pipe.switch(pipe_name='iso cone')

    # Operations for the 'A' and 'B' permutations.
    else:
        # Title printout.
        section(file=sys.stdout, text="Isotropic cone permutation %s" % perm)

        # The pipe name and tag.
        pipe_name = 'iso cone perm %s' % perm
        tag = '_perm_%s' % perm

        # Create a new data pipe.
Esempio n. 26
0
    def run(self):
        """Execute the auto-analysis."""

        # Peak intensity error analysis.
        if MODEL_R2EFF in self.models:
            self.error_analysis()

        # Loop over the models.
        self.model_pipes = []
        for model in self.models:
            # Printout.
            subtitle(file=sys.stdout, text="The '%s' model" % model, prespace=3)

            # The results directory path.
            path = self.results_dir+sep+model

            # The name of the data pipe for the model.
            model_pipe = model
            if self.is_model_for_selection(model):
                self.model_pipes.append(model_pipe)

            # Check that results do not already exist - i.e. a previous run was interrupted.
            path1 = path + sep + 'results'
            path2 = path1 + '.bz2'
            path3 = path1 + '.gz'
            if access(path1, F_OK) or access(path2, F_OK) or access(path2, F_OK):
                # Printout.
                print("Detected the presence of results files for the '%s' model - loading these instead of performing optimisation for a second time." % model)

                # Create a data pipe and switch to it.
                self.interpreter.pipe.create(pipe_name=model_pipe, pipe_type='relax_disp', bundle=self.pipe_bundle)
                self.interpreter.pipe.switch(model_pipe)

                # Load the results.
                self.interpreter.results.read(file='results', dir=path)

                # Jump to the next model.
                continue

            # Create the data pipe by copying the base pipe, then switching to it.
            self.interpreter.pipe.copy(pipe_from=self.pipe_name, pipe_to=model_pipe, bundle_to=self.pipe_bundle)
            self.interpreter.pipe.switch(model_pipe)

            # Select the model.
            self.interpreter.relax_disp.select_model(model)

            # Copy the R2eff values from the R2eff model data pipe.
            if model != MODEL_R2EFF and MODEL_R2EFF in self.models:
                self.interpreter.value.copy(pipe_from=MODEL_R2EFF, pipe_to=model, param='r2eff')

            # Calculate the R2eff values for the fixed relaxation time period data types.
            if model == MODEL_R2EFF and not has_exponential_exp_type():
                self.interpreter.calc()

            # Optimise the model.
            else:
                self.optimise(model=model)

            # Write out the results.
            self.write_results(path=path, model=model)

        # The final model selection data pipe.
        if len(self.models) >= 2:
            # Printout.
            section(file=sys.stdout, text="Final results", prespace=2)

            # Perform model selection.
            self.interpreter.model_selection(method=self.modsel, modsel_pipe='final', bundle=self.pipe_bundle, pipes=self.model_pipes)

            # Final Monte Carlo simulations only.
            if not self.mc_sim_all_models:
                self.interpreter.monte_carlo.setup(number=self.mc_sim_num)
                self.interpreter.monte_carlo.create_data()
                self.interpreter.monte_carlo.initial_values()
                self.interpreter.minimise('simplex', func_tol=self.opt_func_tol, max_iter=self.opt_max_iterations, constraints=True)
                if self.eliminate:
                    self.interpreter.eliminate()
                self.interpreter.monte_carlo.error_analysis()

            # Writing out the final results.
            self.write_results(path=self.results_dir+sep+'final')

        # No model selection.
        else:
            warn(RelaxWarning("Model selection in the dispersion auto-analysis has been skipped as only %s models have been optimised." % len(self.model_pipes)))

        # Finally save the program state.
        self.interpreter.state.save(state='final_state', dir=self.results_dir, force=True)
Esempio n. 27
0
    def optimise(self, model=None, model_path=None):
        """Optimise the model, taking model nesting into account.

        @keyword model:         The model to be optimised.
        @type model:            str
        @keyword model_path:    The folder name for the model, where possible spaces has been replaced with underscore.
        @type model_path:       str
        """

        # Printout.
        section(file=sys.stdout, text="Optimisation", prespace=2)

        # Deselect insignificant spins.
        if model not in [MODEL_R2EFF, MODEL_NOREX]:
            self.interpreter.relax_disp.insignificance(
                level=self.insignificance)

        # Speed-up grid-search by using minium R2eff value.
        if self.set_grid_r20 and model != MODEL_R2EFF:
            self.interpreter.relax_disp.r20_from_min_r2eff(force=True)

        # Use pre-run results as the optimisation starting point.
        # Test if file exists.
        if self.pre_run_dir:
            path = self.pre_run_dir + sep + model_path
            # File path.
            file_path = get_file_path('results', path)

            # Test if the file exists and determine the compression type.
            try:
                compress_type, file_path = determine_compression(file_path)
                res_file_exists = True

            except RelaxFileError:
                res_file_exists = False

        if self.pre_run_dir and res_file_exists:
            self.pre_run_parameters(model=model, model_path=model_path)

        # Otherwise use the normal nesting check and grid search if not nested.
        else:
            # Nested model simplification.
            nested = self.nesting(model=model)

            # Otherwise use a grid search of default values to start optimisation with.
            if not nested:
                # Grid search.
                if self.grid_inc:
                    self.interpreter.minimise.grid_search(inc=self.grid_inc)

                # Default values.
                else:
                    # The standard parameters.
                    for param in MODEL_PARAMS[model]:
                        self.interpreter.value.set(param=param, index=None)

                    # The optional R1 parameter.
                    if is_r1_optimised(model=model):
                        self.interpreter.value.set(param='r1', index=None)

        # 'R2eff' model minimisation flags.
        do_minimise = False
        if model == MODEL_R2EFF:
            # The constraints flag.
            constraints = False

            # The minimisation algorithm to use.
            # Both the Jacobian and Hessian matrix has been specified for exponential curve-fitting, allowing for the much faster algorithms to be used.
            min_algor = 'Newton'

            # Check if all spins contains 'r2eff and it associated error.
            has_r2eff = False

            # Loop over all spins.
            for cur_spin, spin_id in spin_loop(return_id=True,
                                               skip_desel=True):
                # Check 'r2eff'
                if hasattr(cur_spin, 'r2eff') and hasattr(
                        cur_spin, 'r2eff_err'):
                    has_r2eff = True
                else:
                    has_r2eff = False
                    break

            # Skip optimisation, if 'r2eff' + 'r2eff_err' is present and flag for forcing optimisation is not raised.
            if has_r2eff and not self.optimise_r2eff:
                pass

            # Do optimisation, if 'r2eff' + 'r2eff_err' is present and flag for forcing optimisation is raised.
            elif has_r2eff and self.optimise_r2eff:
                do_minimise = True

            # Optimise, if no R2eff and error is present.
            elif not has_r2eff:
                do_minimise = True

        # Dispersion model minimisation flags.
        else:
            do_minimise = True
            constraints = True
            # The minimisation algorithm to use. If the Jacobian and Hessian matrix have not been specified for fitting, 'simplex' should be used.
            min_algor = 'simplex'

        # Do the minimisation.
        if do_minimise:
            self.interpreter.minimise.execute(min_algor=min_algor,
                                              func_tol=self.opt_func_tol,
                                              max_iter=self.opt_max_iterations,
                                              constraints=constraints)

        # Model elimination.
        if self.eliminate:
            self.interpreter.eliminate()

        # Monte Carlo simulations.
        do_monte_carlo = False
        if model == MODEL_R2EFF:
            # The constraints flag.
            constraints = False

            # Both the Jacobian and Hessian matrix has been specified for exponential curve-fitting, allowing for the much faster algorithms to be used.
            min_algor = 'Newton'

            # Skip optimisation, if 'r2eff' + 'r2eff_err' is present and flag for forcing optimisation is not raised.
            if has_r2eff and not self.optimise_r2eff:
                pass

            # Do optimisation, if 'r2eff' + 'r2eff_err' is present and flag for forcing optimisation is raised.
            elif has_r2eff and self.optimise_r2eff:
                do_monte_carlo = True

            # Optimise, if no R2eff and error is present.
            elif not has_r2eff:
                do_monte_carlo = True

        elif self.mc_sim_all_models or len(self.models) < 2:
            do_monte_carlo = True
            # The constraints flag.
            constraints = True
            # The minimisation algorithm to use. If the Jacobian and Hessian matrix have not been specified for fitting, 'simplex' should be used.
            min_algor = 'simplex'

        # Error estimation by Monte Carlo simulations.
        if do_monte_carlo:
            # Set the number of Monte-Carlo simulations.
            monte_carlo_sim = self.mc_sim_num

            # If the number for exponential curve fitting has been set.
            if model == MODEL_R2EFF and self.exp_mc_sim_num != None:
                monte_carlo_sim = self.exp_mc_sim_num

            # When set to minus 1, estimation of the errors will be extracted from the covariance matrix.
            # This is HIGHLY likely to be wrong, but can be used in an initial test fase.
            if model == MODEL_R2EFF and self.exp_mc_sim_num == -1:
                # Print
                subsection(file=sys.stdout,
                           text="Estimating errors from Covariance matrix",
                           prespace=1)

                # Raise warning.
                text = 'Estimating errors from the Covariance matrix is highly likely to be "quite" wrong.  Use only with extreme care, and for initial rapid testing of your data.'
                warn(RelaxWarning(text))

                # Estimate errors
                self.interpreter.relax_disp.r2eff_err_estimate()
            else:
                self.interpreter.monte_carlo.setup(number=monte_carlo_sim)
                self.interpreter.monte_carlo.create_data()
                self.interpreter.monte_carlo.initial_values()
                self.interpreter.minimise.execute(
                    min_algor=min_algor,
                    func_tol=self.opt_func_tol,
                    max_iter=self.opt_max_iterations,
                    constraints=constraints)
                if self.eliminate:
                    self.interpreter.eliminate()
                self.interpreter.monte_carlo.error_analysis()
Esempio n. 28
0
    def write_results(self, path=None, model=None):
        """Create a set of results, text and Grace files for the current data pipe.

        @keyword path:  The directory to place the files into.
        @type path:     str
        """

        # Printout.
        section(file=sys.stdout, text="Results writing", prespace=2)

        # If this is the final model selection round, check which models have been tested.
        if model == None:
            models_tested = []
            for spin, spin_id in spin_loop(return_id=True, skip_desel=True):
                spin_model = spin.model

                # Add to list, if not in already.
                if spin_model not in models_tested:
                    models_tested.append(spin_model)
        else:
            models_tested = None

        # Special for R2eff model.
        if model == MODEL_R2EFF:
            # The R2eff parameter.
            self.interpreter.value.write(param='r2eff',
                                         file='r2eff.out',
                                         dir=path,
                                         force=True)
            self.interpreter.grace.write(x_data_type='res_num',
                                         y_data_type='r2eff',
                                         file='r2eff.agr',
                                         dir=path,
                                         force=True)

            # Exponential curves.
            if has_exponential_exp_type():
                self.interpreter.relax_disp.plot_exp_curves(
                    file='intensities.agr', dir=path,
                    force=True)  # Average peak intensities.
                self.interpreter.relax_disp.plot_exp_curves(
                    file='intensities_norm.agr',
                    dir=path,
                    force=True,
                    norm=True)  # Average peak intensities (normalised).

                # The I0 parameter.
                self.interpreter.value.write(param='i0',
                                             file='i0.out',
                                             dir=path,
                                             force=True)
                self.interpreter.grace.write(x_data_type='res_num',
                                             y_data_type='i0',
                                             file='i0.agr',
                                             dir=path,
                                             force=True)

        # Dispersion curves.
        self.interpreter.relax_disp.plot_disp_curves(dir=path, force=True)
        self.interpreter.relax_disp.write_disp_curves(dir=path, force=True)

        # The selected models for the final run.
        if model == None:
            self.interpreter.value.write(param='model',
                                         file='model.out',
                                         dir=path,
                                         force=True)

        # For CPMG models.
        if has_cpmg_exp_type():
            # The R20 parameter.
            self.write_results_test(path=path,
                                    model=model,
                                    models_tested=models_tested,
                                    param='r2',
                                    file_name_ini='r20')

            # The R20A and R20B parameters.
            self.write_results_test(path=path,
                                    model=model,
                                    models_tested=models_tested,
                                    param='r2a',
                                    file_name_ini='r20a')
            self.write_results_test(path=path,
                                    model=model,
                                    models_tested=models_tested,
                                    param='r2b',
                                    file_name_ini='r20b')

        # For R1ho models.
        if has_r1rho_exp_type():
            # The R1 parameter.
            self.write_results_test(path=path,
                                    model=model,
                                    models_tested=models_tested,
                                    param='r1')

            # The R1rho prime parameter.
            self.write_results_test(path=path,
                                    model=model,
                                    models_tested=models_tested,
                                    param='r2',
                                    file_name_ini='r1rho_prime')

            # Plot specific R1rho graphs.
            if model in [None] + MODEL_LIST_R1RHO:
                self.interpreter.relax_disp.plot_disp_curves(
                    dir=path, x_axis=X_AXIS_THETA, force=True)
                self.interpreter.relax_disp.plot_disp_curves(
                    dir=path,
                    y_axis=Y_AXIS_R2_R1RHO,
                    x_axis=X_AXIS_W_EFF,
                    force=True)
                self.interpreter.relax_disp.plot_disp_curves(
                    dir=path,
                    y_axis=Y_AXIS_R2_EFF,
                    x_axis=X_AXIS_THETA,
                    interpolate=INTERPOLATE_OFFSET,
                    force=True)

            # The calculation of theta and w_eff parameter in R1rho experiments.
            if model in MODEL_LIST_R1RHO_FULL:
                self.interpreter.value.write(param='theta',
                                             file='theta.out',
                                             dir=path,
                                             force=True)
                self.interpreter.value.write(param='w_eff',
                                             file='w_eff.out',
                                             dir=path,
                                             force=True)

        # The pA and pB parameters.
        self.write_results_test(path=path,
                                model=model,
                                models_tested=models_tested,
                                param='pA')
        self.write_results_test(path=path,
                                model=model,
                                models_tested=models_tested,
                                param='pB')

        # The pC parameter.
        self.write_results_test(path=path,
                                model=model,
                                models_tested=models_tested,
                                param='pC')

        # The phi_ex parameter.
        self.write_results_test(path=path,
                                model=model,
                                models_tested=models_tested,
                                param='phi_ex')

        # The phi_ex_B nd phi_ex_C parameters.
        self.write_results_test(path=path,
                                model=model,
                                models_tested=models_tested,
                                param='phi_ex_B')
        self.write_results_test(path=path,
                                model=model,
                                models_tested=models_tested,
                                param='phi_ex_C')

        # The dw parameter.
        self.write_results_test(path=path,
                                model=model,
                                models_tested=models_tested,
                                param='dw')

        # The dw_AB, dw_BC and dw_AC parameter.
        self.write_results_test(path=path,
                                model=model,
                                models_tested=models_tested,
                                param='dw_AB')
        self.write_results_test(path=path,
                                model=model,
                                models_tested=models_tested,
                                param='dw_BC')
        self.write_results_test(path=path,
                                model=model,
                                models_tested=models_tested,
                                param='dw_AC')

        # The dwH parameter.
        self.write_results_test(path=path,
                                model=model,
                                models_tested=models_tested,
                                param='dwH')

        # The dwH_AB, dwH_BC and dwH_AC parameter.
        self.write_results_test(path=path,
                                model=model,
                                models_tested=models_tested,
                                param='dwH_AB')
        self.write_results_test(path=path,
                                model=model,
                                models_tested=models_tested,
                                param='dwH_BC')
        self.write_results_test(path=path,
                                model=model,
                                models_tested=models_tested,
                                param='dwH_AC')

        # The k_AB, kex and tex parameters.
        self.write_results_test(path=path,
                                model=model,
                                models_tested=models_tested,
                                param='k_AB')
        self.write_results_test(path=path,
                                model=model,
                                models_tested=models_tested,
                                param='kex')
        self.write_results_test(path=path,
                                model=model,
                                models_tested=models_tested,
                                param='tex')

        # The kex_AB, kex_BC, kex_AC parameters.
        self.write_results_test(path=path,
                                model=model,
                                models_tested=models_tested,
                                param='kex_AB')
        self.write_results_test(path=path,
                                model=model,
                                models_tested=models_tested,
                                param='kex_BC')
        self.write_results_test(path=path,
                                model=model,
                                models_tested=models_tested,
                                param='kex_AC')

        # The kB and kC parameters.
        self.write_results_test(path=path,
                                model=model,
                                models_tested=models_tested,
                                param='kB')
        self.write_results_test(path=path,
                                model=model,
                                models_tested=models_tested,
                                param='kC')

        # Minimisation statistics.
        if not (model == MODEL_R2EFF and has_fixed_time_exp_type()):
            self.interpreter.value.write(param='chi2',
                                         file='chi2.out',
                                         dir=path,
                                         force=True)
            self.interpreter.grace.write(y_data_type='chi2',
                                         file='chi2.agr',
                                         dir=path,
                                         force=True)

        # Finally save the results.  This is last to allow the continuation of an interrupted analysis while ensuring that all results files have been created.
        self.interpreter.results.write(file='results', dir=path, force=True)
Esempio n. 29
0
    def write_results(self, path=None, model=None):
        """Create a set of results, text and Grace files for the current data pipe.

        @keyword path:  The directory to place the files into.
        @type path:     str
        """

        # Printout.
        section(file=sys.stdout, text="Results writing", prespace=2)

        # Exponential curves.
        if model == 'R2eff' and has_exponential_exp_type():
            self.interpreter.relax_disp.plot_exp_curves(file='intensities.agr', dir=path, force=True)    # Average peak intensities.
            self.interpreter.relax_disp.plot_exp_curves(file='intensities_norm.agr', dir=path, force=True, norm=True)    # Average peak intensities (normalised).

        # Dispersion curves.
        self.interpreter.relax_disp.plot_disp_curves(dir=path, force=True)
        self.interpreter.relax_disp.write_disp_curves(dir=path, force=True)

        # The selected models for the final run.
        if model == None:
            self.interpreter.value.write(param='model', file='model.out', dir=path, force=True)

        # The R2eff parameter.
        if model == 'R2eff':
            self.interpreter.value.write(param='r2eff', file='r2eff.out', dir=path, force=True)
            self.interpreter.grace.write(x_data_type='res_num', y_data_type='r2eff', file='r2eff.agr', dir=path, force=True)

        # The I0 parameter.
        if model == 'R2eff' and has_exponential_exp_type():
            self.interpreter.value.write(param='i0', file='i0.out', dir=path, force=True)
            self.interpreter.grace.write(x_data_type='res_num', y_data_type='i0', file='i0.agr', dir=path, force=True)

        ## The R20 parameter.
        #if has_cpmg_exp_type() and model in [None, MODEL_LM63, MODEL_CR72, MODEL_IT99, MODEL_M61, MODEL_DPL94, MODEL_M61B, MODEL_MMQ_CR72, MODEL_NS_CPMG_2SITE_3D, MODEL_NS_CPMG_2SITE_STAR, MODEL_NS_CPMG_2SITE_EXPANDED, MODEL_NS_MMQ_2SITE, MODEL_NS_MMQ_3SITE, MODEL_NS_MMQ_3SITE_LINEAR]:
        #    self.interpreter.value.write(param='r2', file='r20.out', dir=path, force=True)
        #    self.interpreter.grace.write(x_data_type='res_num', y_data_type='r2', file='r20.agr', dir=path, force=True)

        ## The R20A and R20B parameters.
        #if has_cpmg_exp_type() and model in [None, MODEL_CR72_FULL, MODEL_NS_CPMG_2SITE_3D_FULL, MODEL_NS_CPMG_2SITE_STAR_FULL]:
        #    self.interpreter.value.write(param='r2a', file='r20a.out', dir=path, force=True)
        #    self.interpreter.value.write(param='r2b', file='r20b.out', dir=path, force=True)
        #    self.interpreter.grace.write(x_data_type='res_num', y_data_type='r2a', file='r20a.agr', dir=path, force=True)
        #    self.interpreter.grace.write(x_data_type='res_num', y_data_type='r2b', file='r20b.agr', dir=path, force=True)

        ## The R1rho parameter.
        #if has_r1rho_exp_type() and model in [None] + MODEL_LIST_R1RHO:
        #    self.interpreter.value.write(param='r2', file='r1rho0.out', dir=path, force=True)
        #    self.interpreter.grace.write(x_data_type='res_num', y_data_type='r2', file='r1rho0.agr', dir=path, force=True)

        # The pA, pB, and pC parameters.
        if model in [None, MODEL_CR72, MODEL_CR72_FULL, MODEL_IT99, MODEL_M61B, MODEL_MMQ_CR72, MODEL_NS_CPMG_2SITE_3D, MODEL_NS_CPMG_2SITE_3D_FULL, MODEL_NS_CPMG_2SITE_STAR, MODEL_NS_CPMG_2SITE_STAR_FULL, MODEL_NS_CPMG_2SITE_EXPANDED, MODEL_NS_MMQ_2SITE, MODEL_NS_R1RHO_2SITE, MODEL_NS_R1RHO_3SITE, MODEL_NS_R1RHO_3SITE_LINEAR, MODEL_TP02, MODEL_TAP03, MODEL_MP05, MODEL_NS_MMQ_3SITE, MODEL_NS_MMQ_3SITE_LINEAR]:
            self.interpreter.value.write(param='pA', file='pA.out', dir=path, force=True)
            self.interpreter.value.write(param='pB', file='pB.out', dir=path, force=True)
            self.interpreter.grace.write(x_data_type='res_num', y_data_type='pA', file='pA.agr', dir=path, force=True)
            self.interpreter.grace.write(x_data_type='res_num', y_data_type='pB', file='pB.agr', dir=path, force=True)
        if model in [MODEL_NS_MMQ_3SITE, MODEL_NS_MMQ_3SITE_LINEAR, MODEL_NS_R1RHO_3SITE, MODEL_NS_R1RHO_3SITE_LINEAR]:
            self.interpreter.value.write(param='pC', file='pC.out', dir=path, force=True)
            self.interpreter.grace.write(x_data_type='res_num', y_data_type='pC', file='pC.agr', dir=path, force=True)

        # The Phi_ex parameter.
        if model in [None, MODEL_LM63, MODEL_M61, MODEL_DPL94]:
            self.interpreter.value.write(param='phi_ex', file='phi_ex.out', dir=path, force=True)
            self.interpreter.grace.write(x_data_type='res_num', y_data_type='phi_ex', file='phi_ex.agr', dir=path, force=True)

        # The Phi_ex_B nd Phi_ex_C parameters.
        if model in [None, MODEL_LM63_3SITE]:
            self.interpreter.value.write(param='phi_ex_B', file='phi_ex_B.out', dir=path, force=True)
            self.interpreter.value.write(param='phi_ex_C', file='phi_ex_C.out', dir=path, force=True)
            self.interpreter.grace.write(x_data_type='res_num', y_data_type='phi_ex_B', file='phi_ex_B.agr', dir=path, force=True)
            self.interpreter.grace.write(x_data_type='res_num', y_data_type='phi_ex_C', file='phi_ex_C.agr', dir=path, force=True)

        # The dw parameter.
        if model in [None, MODEL_CR72, MODEL_CR72_FULL, MODEL_IT99, MODEL_M61B, MODEL_MMQ_CR72, MODEL_NS_CPMG_2SITE_3D, MODEL_NS_CPMG_2SITE_3D_FULL, MODEL_NS_CPMG_2SITE_STAR, MODEL_NS_CPMG_2SITE_STAR_FULL, MODEL_NS_CPMG_2SITE_EXPANDED, MODEL_NS_MMQ_2SITE, MODEL_NS_R1RHO_2SITE, MODEL_TP02, MODEL_TAP03, MODEL_MP05, MODEL_TSMFK01]:
            self.interpreter.value.write(param='dw', file='dw.out', dir=path, force=True)
            self.interpreter.grace.write(x_data_type='res_num', y_data_type='dw', file='dw.agr', dir=path, force=True)
        if model in [MODEL_NS_MMQ_3SITE, MODEL_NS_MMQ_3SITE_LINEAR, MODEL_NS_R1RHO_3SITE, MODEL_NS_R1RHO_3SITE_LINEAR]:
            self.interpreter.value.write(param='dw_AB', file='dw_AB.out', dir=path, force=True)
            self.interpreter.value.write(param='dw_BC', file='dw_BC.out', dir=path, force=True)
            self.interpreter.value.write(param='dw_AC', file='dw_AC.out', dir=path, force=True)
            self.interpreter.grace.write(x_data_type='res_num', y_data_type='dw_AB', file='dw_AB.agr', dir=path, force=True)
            self.interpreter.grace.write(x_data_type='res_num', y_data_type='dw_BC', file='dw_BC.agr', dir=path, force=True)
            self.interpreter.grace.write(x_data_type='res_num', y_data_type='dw_AC', file='dw_AC.agr', dir=path, force=True)

        # The dwH parameter.
        if model in [None, MODEL_MMQ_CR72, MODEL_NS_MMQ_2SITE]:
            self.interpreter.value.write(param='dwH', file='dwH.out', dir=path, force=True)
            self.interpreter.grace.write(x_data_type='res_num', y_data_type='dwH', file='dwH.agr', dir=path, force=True)
        if model in [MODEL_NS_MMQ_3SITE, MODEL_NS_MMQ_3SITE_LINEAR]:
            self.interpreter.value.write(param='dwH_AB', file='dwH_AB.out', dir=path, force=True)
            self.interpreter.value.write(param='dwH_BC', file='dwH_BC.out', dir=path, force=True)
            self.interpreter.value.write(param='dwH_AC', file='dwH_AC.out', dir=path, force=True)
            self.interpreter.grace.write(x_data_type='res_num', y_data_type='dwH_AB', file='dwH_AB.agr', dir=path, force=True)
            self.interpreter.grace.write(x_data_type='res_num', y_data_type='dwH_BC', file='dwH_BC.agr', dir=path, force=True)
            self.interpreter.grace.write(x_data_type='res_num', y_data_type='dwH_AC', file='dwH_AC.agr', dir=path, force=True)

        # The k_AB, kex and tex parameters.
        if model in [None, MODEL_LM63, MODEL_CR72, MODEL_CR72_FULL, MODEL_IT99, MODEL_M61, MODEL_DPL94, MODEL_M61B, MODEL_MMQ_CR72, MODEL_NS_CPMG_2SITE_3D, MODEL_NS_CPMG_2SITE_3D_FULL, MODEL_NS_CPMG_2SITE_STAR, MODEL_NS_CPMG_2SITE_STAR_FULL, MODEL_NS_CPMG_2SITE_EXPANDED, MODEL_NS_MMQ_2SITE, MODEL_NS_R1RHO_2SITE, MODEL_TP02, MODEL_TAP03, MODEL_MP05]:
            self.interpreter.value.write(param='k_AB', file='k_AB.out', dir=path, force=True)
            self.interpreter.value.write(param='kex', file='kex.out', dir=path, force=True)
            self.interpreter.value.write(param='tex', file='tex.out', dir=path, force=True)
            self.interpreter.grace.write(x_data_type='res_num', y_data_type='k_AB', file='k_AB.agr', dir=path, force=True)
            self.interpreter.grace.write(x_data_type='res_num', y_data_type='kex', file='kex.agr', dir=path, force=True)
            self.interpreter.grace.write(x_data_type='res_num', y_data_type='tex', file='tex.agr', dir=path, force=True)
        if model in [MODEL_NS_MMQ_3SITE, MODEL_NS_MMQ_3SITE_LINEAR, MODEL_NS_R1RHO_3SITE, MODEL_NS_R1RHO_3SITE_LINEAR]:
            self.interpreter.value.write(param='kex_AB', file='kex_AB.out', dir=path, force=True)
            self.interpreter.value.write(param='kex_BC', file='kex_BC.out', dir=path, force=True)
            self.interpreter.value.write(param='kex_AC', file='kex_AC.out', dir=path, force=True)
            self.interpreter.grace.write(x_data_type='res_num', y_data_type='kex_AB', file='kex_AB.agr', dir=path, force=True)
            self.interpreter.grace.write(x_data_type='res_num', y_data_type='kex_BC', file='kex_BC.agr', dir=path, force=True)
            self.interpreter.grace.write(x_data_type='res_num', y_data_type='kex_AC', file='kex_AC.agr', dir=path, force=True)

        # The k_AB parameter.
        if model in [None, MODEL_TSMFK01]:
            self.interpreter.value.write(param='k_AB', file='k_AB.out', dir=path, force=True)
            self.interpreter.grace.write(x_data_type='res_num', y_data_type='k_AB', file='k_AB.agr', dir=path, force=True)

        # The kB and kC parameters.
        if model in [None, MODEL_LM63_3SITE]:
            self.interpreter.value.write(param='kB', file='kB.out', dir=path, force=True)
            self.interpreter.value.write(param='kC', file='kC.out', dir=path, force=True)
            self.interpreter.grace.write(x_data_type='res_num', y_data_type='kB', file='kB.agr', dir=path, force=True)
            self.interpreter.grace.write(x_data_type='res_num', y_data_type='kC', file='kC.agr', dir=path, force=True)

        # Minimisation statistics.
        if not (model == 'R2eff' and has_fixed_time_exp_type()):
            self.interpreter.grace.write(y_data_type='chi2', file='chi2.agr', dir=path, force=True)

        # Finally save the results.  This is last to allow the continuation of an interrupted analysis while ensuring that all results files have been created.
        self.interpreter.results.write(file='results', dir=path, force=True)
Esempio n. 30
0
    def optimise(self, model=None, model_path=None):
        """Optimise the model, taking model nesting into account.

        @keyword model:         The model to be optimised.
        @type model:            str
        @keyword model_path:    The folder name for the model, where possible spaces has been replaced with underscore.
        @type model_path:       str
        """

        # Printout. 
        section(file=sys.stdout, text="Optimisation", prespace=2)

        # Deselect insignificant spins.
        if model not in [MODEL_R2EFF, MODEL_NOREX]:
            self.interpreter.relax_disp.insignificance(level=self.insignificance)

        # Speed-up grid-search by using minium R2eff value.
        if self.set_grid_r20 and model != MODEL_R2EFF:
            self.interpreter.relax_disp.r20_from_min_r2eff(force=True)

        # Use pre-run results as the optimisation starting point.
        # Test if file exists.
        if self.pre_run_dir:
            path = self.pre_run_dir + sep + model_path
            # File path.
            file_path = get_file_path('results', path)

            # Test if the file exists and determine the compression type.
            try:
                compress_type, file_path = determine_compression(file_path)
                res_file_exists = True

            except RelaxFileError:
                res_file_exists = False

        if self.pre_run_dir and res_file_exists:
            self.pre_run_parameters(model=model, model_path=model_path)

        # Otherwise use the normal nesting check and grid search if not nested.
        else:
            # Nested model simplification.
            nested = self.nesting(model=model)

            # Otherwise use a grid search of default values to start optimisation with.
            if not nested:
                # Grid search.
                if self.grid_inc:
                    self.interpreter.minimise.grid_search(inc=self.grid_inc)

                # Default values.
                else:
                    # The standard parameters.
                    for param in MODEL_PARAMS[model]:
                        self.interpreter.value.set(param=param, index=None)

                    # The optional R1 parameter.
                    if is_r1_optimised(model=model):
                        self.interpreter.value.set(param='r1', index=None)

        # 'R2eff' model minimisation flags.
        do_minimise = False
        if model == MODEL_R2EFF:
            # The constraints flag.
            constraints = False

            # The minimisation algorithm to use.
            # Both the Jacobian and Hessian matrix has been specified for exponential curve-fitting, allowing for the much faster algorithms to be used.
            min_algor = 'Newton'

            # Check if all spins contains 'r2eff and it associated error.
            has_r2eff = False

            # Loop over all spins.
            for cur_spin, spin_id in spin_loop(return_id=True, skip_desel=True):
                # Check 'r2eff'
                if hasattr(cur_spin, 'r2eff') and hasattr(cur_spin, 'r2eff_err'):
                    has_r2eff = True
                else:
                    has_r2eff = False
                    break

            # Skip optimisation, if 'r2eff' + 'r2eff_err' is present and flag for forcing optimisation is not raised.
            if has_r2eff and not self.optimise_r2eff:
                pass

            # Do optimisation, if 'r2eff' + 'r2eff_err' is present and flag for forcing optimisation is raised.
            elif has_r2eff and self.optimise_r2eff:
                do_minimise = True

            # Optimise, if no R2eff and error is present.
            elif not has_r2eff:
                do_minimise = True

        # Dispersion model minimisation flags.
        else:
            do_minimise = True
            constraints = True
            # The minimisation algorithm to use. If the Jacobian and Hessian matrix have not been specified for fitting, 'simplex' should be used.
            min_algor = 'simplex'

        # Do the minimisation.
        if do_minimise:
            self.interpreter.minimise.execute(min_algor=min_algor, func_tol=self.opt_func_tol, max_iter=self.opt_max_iterations, constraints=constraints)

        # Model elimination.
        if self.eliminate:
            self.interpreter.eliminate()

        # Monte Carlo simulations.
        do_monte_carlo = False
        if model == MODEL_R2EFF:
            # The constraints flag.
            constraints = False

            # Both the Jacobian and Hessian matrix has been specified for exponential curve-fitting, allowing for the much faster algorithms to be used.
            min_algor = 'Newton'

            # Skip optimisation, if 'r2eff' + 'r2eff_err' is present and flag for forcing optimisation is not raised.
            if has_r2eff and not self.optimise_r2eff:
                pass

            # Do optimisation, if 'r2eff' + 'r2eff_err' is present and flag for forcing optimisation is raised.
            elif has_r2eff and self.optimise_r2eff:
                do_monte_carlo = True

            # Optimise, if no R2eff and error is present.
            elif not has_r2eff:
                do_monte_carlo = True

        elif self.mc_sim_all_models or len(self.models) < 2:
            do_monte_carlo = True
            # The constraints flag.
            constraints = True
            # The minimisation algorithm to use. If the Jacobian and Hessian matrix have not been specified for fitting, 'simplex' should be used.
            min_algor = 'simplex'

        # Error estimation by Monte Carlo simulations.
        if do_monte_carlo:
            # Set the number of Monte-Carlo simulations.
            monte_carlo_sim = self.mc_sim_num

            # If the number for exponential curve fitting has been set.
            if model == MODEL_R2EFF and self.exp_mc_sim_num != None:
                monte_carlo_sim = self.exp_mc_sim_num

            # When set to minus 1, estimation of the errors will be extracted from the covariance matrix.
            # This is HIGHLY likely to be wrong, but can be used in an initial test fase.
            if model == MODEL_R2EFF and self.exp_mc_sim_num == -1:
                # Print
                subsection(file=sys.stdout, text="Estimating errors from Covariance matrix", prespace=1)

                # Raise warning.
                text = 'Estimating errors from the Covariance matrix is highly likely to be "quite" wrong.  Use only with extreme care, and for initial rapid testing of your data.'
                warn(RelaxWarning(text))

                # Estimate errors
                self.interpreter.relax_disp.r2eff_err_estimate()
            else:
                self.interpreter.monte_carlo.setup(number=monte_carlo_sim)
                self.interpreter.monte_carlo.create_data()
                self.interpreter.monte_carlo.initial_values()
                self.interpreter.minimise.execute(min_algor=min_algor, func_tol=self.opt_func_tol, max_iter=self.opt_max_iterations, constraints=constraints)
                if self.eliminate:
                    self.interpreter.eliminate()
                self.interpreter.monte_carlo.error_analysis()
Esempio n. 31
0
    def run(self):
        """Execute the auto-analysis."""

        # Peak intensity error analysis.
        if MODEL_R2EFF in self.models:
            self.error_analysis()

        # R1 parameter fitting.
        if self.r1_fit:
            subtitle(file=sys.stdout,
                     text="R1 parameter optimisation activation",
                     prespace=3)
            self.interpreter.relax_disp.r1_fit(fit=self.r1_fit)
        else:
            # No print out.
            self.interpreter.relax_disp.r1_fit(fit=self.r1_fit)

        # Loop over the models.
        self.model_pipes = []
        for model in self.models:
            # Printout.
            subtitle(file=sys.stdout,
                     text="The '%s' model" % model,
                     prespace=3)

            # The results directory path.
            model_path = model.replace(" ", "_")
            path = self.results_dir + sep + model_path

            # The name of the data pipe for the model.
            model_pipe = self.name_pipe(model)
            if self.is_model_for_selection(model):
                self.model_pipes.append(model_pipe)

            # Check that results do not already exist - i.e. a previous run was interrupted.
            path1 = path + sep + 'results'
            path2 = path1 + '.bz2'
            path3 = path1 + '.gz'
            if access(path1, F_OK) or access(path2, F_OK) or access(
                    path2, F_OK):
                # Printout.
                print(
                    "Detected the presence of results files for the '%s' model - loading these instead of performing optimisation for a second time."
                    % model)

                # Create a data pipe and switch to it.
                self.interpreter.pipe.create(pipe_name=model_pipe,
                                             pipe_type='relax_disp',
                                             bundle=self.pipe_bundle)
                self.interpreter.pipe.switch(model_pipe)

                # Load the results.
                self.interpreter.results.read(file='results', dir=path)

                # Jump to the next model.
                continue

            # Create the data pipe by copying the base pipe, then switching to it.
            self.interpreter.pipe.copy(pipe_from=self.pipe_name,
                                       pipe_to=model_pipe,
                                       bundle_to=self.pipe_bundle)
            self.interpreter.pipe.switch(model_pipe)

            # Select the model.
            self.interpreter.relax_disp.select_model(model)

            # Copy the R2eff values from the R2eff model data pipe.
            if model != MODEL_R2EFF and MODEL_R2EFF in self.models:
                self.interpreter.value.copy(
                    pipe_from=self.name_pipe(MODEL_R2EFF),
                    pipe_to=model_pipe,
                    param='r2eff')

            # Calculate the R2eff values for the fixed relaxation time period data types.
            if model == MODEL_R2EFF and not has_exponential_exp_type():
                self.interpreter.minimise.calculate()

            # Optimise the model.
            else:
                self.optimise(model=model, model_path=model_path)

            # Write out the results.
            self.write_results(path=path, model=model)

        # The final model selection data pipe.
        if len(self.models) >= 2:
            # Printout.
            section(file=sys.stdout, text="Final results", prespace=2)

            # Perform model selection.
            self.interpreter.model_selection(
                method=self.modsel,
                modsel_pipe=self.name_pipe('final'),
                bundle=self.pipe_bundle,
                pipes=self.model_pipes)

            # Final Monte Carlo simulations only.
            if not self.mc_sim_all_models:
                self.interpreter.monte_carlo.setup(number=self.mc_sim_num)
                self.interpreter.monte_carlo.create_data()
                self.interpreter.monte_carlo.initial_values()
                self.interpreter.minimise.execute(
                    'simplex',
                    func_tol=self.opt_func_tol,
                    max_iter=self.opt_max_iterations,
                    constraints=True)
                if self.eliminate:
                    self.interpreter.eliminate()
                self.interpreter.monte_carlo.error_analysis()

            # Writing out the final results.
            self.write_results(path=self.results_dir + sep + 'final')

        # No model selection.
        else:
            warn(
                RelaxWarning(
                    "Model selection in the dispersion auto-analysis has been skipped as only %s models have been optimised."
                    % len(self.model_pipes)))

        # Finally save the program state.
        self.interpreter.state.save(state='final_state',
                                    dir=self.results_dir,
                                    force=True)
Esempio n. 32
0
    def error_analysis(self):
        """Perform an error analysis of the peak intensities."""

        # Printout.
        section(file=sys.stdout, text="Error analysis", prespace=2)

        # Check if intensity errors have already been calculated by the user.
        precalc = True
        for spin in spin_loop(skip_desel=True):
            # No structure.
            if not hasattr(spin, 'peak_intensity_err'):
                precalc = False
                break

            # Determine if a spectrum ID is missing from the list.
            for id in cdp.spectrum_ids:
                if id not in spin.peak_intensity_err:
                    precalc = False
                    break

        # Skip.
        if precalc:
            print("Skipping the error analysis as it has already been performed.")
            return

        # Check if there is replicates, and the user has not specified them.

        # Set flag for dublicates.
        has_dub = False

        if not hasattr(cdp, 'replicates'):
            # Collect all times, and matching spectrum ID.
            all_times = []
            all_id = []
            for spectrum_id in cdp.relax_times:
                all_times.append(cdp.relax_times[spectrum_id])
                all_id.append(spectrum_id)

            # Get the dublicates.
            dublicates = [(val, [i for i in range(len(all_times)) if all_times[i] == val]) for val in all_times]

            # Loop over the list of the mapping of times and duplications.
            list_dub_mapping = []
            for i, dub in enumerate(dublicates):
                # Get current spectum id.
                cur_spectrum_id = all_id[i]

                # Get the tuple of time and indexes of duplications.
                time, list_index_occur = dub

                # Collect mapping of index to id.
                id_list = []
                if len(list_index_occur) > 1:
                    # There exist dublications.
                    has_dub = True

                    for list_index in list_index_occur:
                        id_list.append(all_id[list_index])

                # Store to list
                list_dub_mapping.append((cur_spectrum_id, id_list))

        # If there is dublication, then assign them.
        if has_dub:
            # Assign dublicates.
            for spectrum_id, dub_pair in list_dub_mapping:
                if len(dub_pair) > 0:
                    self.interpreter.spectrum.replicated(spectrum_ids=dub_pair)

        # Run the error analysis.
        self.interpreter.spectrum.error_analysis()
Esempio n. 33
0
    def summary_skipped(self):
        """Print out information about skipped tests."""

        # Counts.
        system_count = {}
        unit_count = {}
        gui_count = {}
        verification_count = {}
        for i in range(len(status.skipped_tests)):
            # Alias.
            test = status.skipped_tests[i]

            # Skip all skipped tests whereby the module is set to None to indicate that the test skipping should not be reported.
            if test[1] == None:
                continue

            # Initialise in needed.
            if not test[1] in system_count:
                system_count[test[1]] = 0
                unit_count[test[1]] = 0
                gui_count[test[1]] = 0
                verification_count[test[1]] = 0

            # A system test.
            if test[2] == 'system':
                system_count[test[1]] += 1

            # A unit test.
            if test[2] == 'unit':
                unit_count[test[1]] += 1

            # A GUI test.
            if test[2] == 'gui':
                gui_count[test[1]] += 1

            # A verification test.
            if test[2] == 'verification':
                verification_count[test[1]] += 1

        # The missing modules.
        missing_modules = sorted(system_count.keys())
        section(file=sys.stdout, text="Optional packages/modules")

        # Nothing missing.
        if not missing_modules:
            # Except for the wx module!
            if not dep_check.wx_module and hasattr(self, 'gui_result'):
                print(
                    "All GUI tests skipped due to the missing wxPython module, no other tests skipped due to missing modules.\n"
                )

            # Normal printout.
            else:
                print("No tests skipped due to missing modules.\n")

            # The skip the table.
            return

        # Header.
        print(
            "Tests skipped due to missing optional packages/modules/software:\n"
        )
        header = "%-33s" % "Module/package/software"
        if len(system_count):
            header = "%s %20s" % (header, "System test count")
        if len(unit_count):
            header = "%s %20s" % (header, "Unit test count")
        if len(gui_count):
            header = "%s %20s" % (header, "GUI test count")
        if len(verification_count):
            header = "%s %20s" % (header, "Verification test count")
        print('-' * len(header))
        print(header)
        print('-' * len(header))

        # The table.
        for module in missing_modules:
            text = "%-33s" % module
            if len(system_count):
                text = "%s %20s" % (text, system_count[module])
            if len(unit_count):
                text = "%s %20s" % (text, unit_count[module])
            if len(gui_count):
                text = "%s %20s" % (text, gui_count[module])
            if len(verification_count):
                text = "%s %20s" % (text, verification_count[module])
            print(text)

        # End the table.
        print('-' * len(header))
        print("\n")
Esempio n. 34
0
value.set(param='cone_theta_y', val=0.2)

# Fix the true pivot point.
frame_order.pivot([ 37.254, 0.5, 16.7465], fix=True)

# Change the model.
frame_order.select_model('pseudo-ellipse')

# Loop over the 3 permutations.
pipe_name = 'pseudo-ellipse'
tag = ''
for perm in [None, 'A', 'B']:
    # The original permutation.
    if perm == None:
        # Title printout.
        section(file=sys.stdout, text="Pseudo-ellipse original permutation")

        # Create a new data base data pipe for the pseudo-ellipse.
        pipe.copy(pipe_from='frame order', pipe_to='pseudo-ellipse')
        pipe.switch(pipe_name='pseudo-ellipse')

    # Operations for the 'A' and 'B' permutations.
    else:
        # Title printout.
        section(file=sys.stdout, text="Pseudo-ellipse permutation %s" % perm)

        # The pipe name and tag.
        pipe_name = 'pseudo-ellipse perm %s' % perm
        tag = '_perm_%s' % perm

        # Create a new data pipe.