コード例 #1
0
ファイル: relax_disp.py プロジェクト: tlinnet/relax
    def check_numpy_less_1_8_and_numerical_model(self):
        """Check for numerical model using numpy version under 1.8.  This will result in slow "for loop" calculation through data, making the analysis 5-6 times slower."""

        # Some warning for the user if the pure numeric solution is selected.
        if float(version.version[:3]) < 1.8:
            # Store which models are in numeric.
            models = []

            # Loop through models.
            for model in self.models:
                if model in MODEL_LIST_NUMERIC:
                    models.append(model)

            # Write system message if numerical models is present and numpy version is below 1.8.
            if len(models) > 0:
                # Printout.
                section(file=sys.stdout,
                        text="Numpy version checking for numerical models.",
                        prespace=2)
                warn(
                    RelaxWarning(
                        "Your version of numpy is %s, and below the recommended version of 1.8 for numerical models."
                        % (version.version)))
                warn(
                    RelaxWarning(
                        "Please consider upgrading your numpy version to 1.8.")
                )

                # Loop over models.
                for model in models:
                    warn(
                        RelaxWarning(
                            "This could make the numerical analysis with model '%s', 5 to 6 times slower."
                            % (model)))
コード例 #2
0
ファイル: spectrum.py プロジェクト: tlinnet/relax
def replicated(spectrum_ids=None):
    """Set which spectra are replicates.

    @keyword spectrum_ids:  A list of spectrum ids corresponding to replicated spectra.
    @type spectrum_ids:     list of str
    """

    # Test if the current pipe exists.
    check_pipe()

    # Test for None.
    if spectrum_ids == None:
        warn(RelaxWarning("The spectrum ID list cannot be None."))
        return

    # Test if spectra have been loaded.
    if not hasattr(cdp, 'spectrum_ids'):
        raise RelaxError(
            "No spectra have been loaded therefore replicates cannot be specified."
        )

    # Test the spectrum id strings.
    for spectrum_id in spectrum_ids:
        check_spectrum_id(spectrum_id)

    # Test for more than one element.
    if len(spectrum_ids) == 1:
        warn(
            RelaxWarning(
                "The number of spectrum IDs in the list %s must be greater than one."
                % spectrum_ids))
        return

    # Initialise.
    if not hasattr(cdp, 'replicates'):
        cdp.replicates = []

    # Check if the spectrum IDs are already in the list.
    found = False
    for i in range(len(cdp.replicates)):
        # Loop over all elements of the first.
        for j in range(len(spectrum_ids)):
            if spectrum_ids[j] in cdp.replicates[i]:
                found = True

        # One of the spectrum IDs already have a replicate specified.
        if found:
            # Add the remaining replicates to the list and quit this function.
            for j in range(len(spectrum_ids)):
                if spectrum_ids[j] not in cdp.replicates[i]:
                    cdp.replicates[i].append(spectrum_ids[j])

            # Nothing more to do.
            return

    # A new set of replicates.
    cdp.replicates.append(spectrum_ids)
コード例 #3
0
ファイル: noesy.py プロジェクト: tlinnet/relax
def parse_noe_restraints(lines, proton1_col=None, proton2_col=None, lower_col=None, upper_col=None, sep=None):
    """Parse and return the NOE restraints from the generic formatted file.

    @param lines:           The file, or file fragment, split into lines.
    @type lines:            list of str
    @keyword proton1_col:   The column containing the first proton of the NOE or ROE cross peak.
    @type proton1_col:      None or int
    @keyword proton2_col:   The column containing the second proton of the NOE or ROE cross peak.
    @type proton2_col:      None or int
    @keyword lower_col:     The column containing the lower NOE bound.
    @type lower_col:        None or int
    @keyword upper_col:     The column containing the upper NOE bound.
    @type upper_col:        None or int
    @keyword sep:           The column separator (the default is white space).
    @type sep:              None or str
    @return:                The NOE restraint list in the format of two atom identification strings
                            and the lower and upper restraints.
    @rtype:                 list of lists of [str, str, float, float]
    """

    # Default column numbers.
    if proton1_col == None:
        warn(RelaxWarning("The proton1_col argument has not been supplied, defaulting to column 1."))
        proton1_col = 1
    if proton2_col == None:
        warn(RelaxWarning("The proton2_col argument has not been supplied, defaulting to column 2."))
        proton2_col = 2
    if lower_col == None:
        warn(RelaxWarning("The lower_col argument has not been supplied, defaulting to column 3."))
        lower_col = 3
    if upper_col == None:
        warn(RelaxWarning("The upper_col argument has not been supplied, defaulting to column 4."))
        upper_col = 4

    # Loop over the lines.
    data = []
    for line in lines:
        # Split the line.
        row = line.split(sep)

        # Header lines:
        if len(row) < 4:
            continue
        try:
            tokenise(row[proton1_col-1])
        except RelaxError:
            continue

        # Pack the data.
        data.append([row[proton1_col-1], row[proton2_col-1], float(row[lower_col-1]), float(row[upper_col-1])])

    # Return the data.
    return data
コード例 #4
0
def sobol_setup(max_num=200, oversample=100):
    """Oversampling setup for the quasi-random Sobol' sequence used for numerical PCS integration.

    @keyword max_num:       The maximum number of integration points N.
    @type max_num:          int
    @keyword oversample:    The oversampling factor Ov used for the N * Ov * 10**M, where M is the number of dimensions or torsion-tilt angles for the system.
    @type oversample:       int
    """

    # Test if the current data pipe exists.
    check_pipe()

    # Throw a warning to the user if not enough points are being used.
    if max_num < 200:
        warn(
            RelaxWarning(
                "To obtain reliable results in a frame order analysis, the maximum number of integration points should be greater than 200."
            ))

    # Store the values.
    cdp.sobol_max_points = max_num
    cdp.sobol_oversample = oversample

    # Count the number of Sobol' points for the current model.
    count_sobol_points()
コード例 #5
0
ファイル: checks.py プロジェクト: tlinnet/relax
    def __call__(self, *args, **kargs):
        """Make the object callable, and perform the checks.

        This will call the function used to initialise the class and then


        @keyword escalate:      The feedback to give if the check fails.  This can be 0 for no printouts, 1 to throw a RelaxWarning, or 2 to raise a RelaxError.
        @type escalate:         int
        @raises RelaxError:     If escalate is set to 2 and the check fails.
        @return:                True if the check passes, False otherwise.
        @rtype:                 bool
        """

        # Remove the escalate keyword argument.
        if 'escalate' not in kargs:
            escalate = 2
        else:
            escalate = kargs['escalate']
            del kargs['escalate']

        # Perform the check.
        error = self.checks(*args, **kargs)

        # No errors.
        if error == None:
            return True

        # Send the text of the RelaxError object into the RelaxWarning system.
        if escalate == 1:
            warn(RelaxWarning(error.text))
            return False

        # The error system.
        if escalate == 2:
            raise error
コード例 #6
0
ファイル: selection.py プロジェクト: tlinnet/relax
    def contains_spin_id(self, spin_id):
        """Is the molecule, residue, and/or spin of the spin_id string located in the selection.

        Only the simple selections allowed by the tokenise function are currently supported.


        @param spin_id: The spin identification string.
        @type spin_id:  str
        @return:        The answer of whether the molecule, residue, and/or spin corresponding to the spin_id string found within the selection object.
        @rtype:         bool
        """

        # No ID string.
        if spin_id == '':
            warn(RelaxWarning("The spin ID string '' is empty."))
            return False

        # Parse the spin_id string.
        mol_name, res_num, res_name, spin_num, spin_name = spin_id_to_data_list(
            spin_id)

        # Check if the spin is in the selection object.
        return self.contains_spin(spin_num=spin_num,
                                  spin_name=spin_name,
                                  res_num=res_num,
                                  res_name=res_name,
                                  mol=mol_name)
コード例 #7
0
ファイル: relax_disp.py プロジェクト: tlinnet/relax
    def check_vars(self):
        """Check that the user has set the variables correctly."""

        # Printout.
        section(file=sys.stdout, text="Variable checking", prespace=2)

        # The pipe name.
        if not has_pipe(self.pipe_name):
            raise RelaxNoPipeError(self.pipe_name)

        # Check the model selection.
        allowed = ['AIC', 'AICc', 'BIC']
        if self.modsel not in allowed:
            raise RelaxError(
                "The model selection technique '%s' is not in the allowed list of %s."
                % (self.modsel, allowed))

        # Some warning for the user if the pure numeric solution is selected.
        if self.numeric_only:
            # Loop over all models.
            for model in self.models:
                # Skip the models used for nesting.
                if model in MODEL_LIST_NEST:
                    continue

                # Warnings for all other analytic models.
                if model in MODEL_LIST_ANALYTIC:
                    warn(
                        RelaxWarning(
                            "The analytic model '%s' will be optimised but will not be used in any way in this numeric model only auto-analysis."
                            % model))

        # Printout.
        print("The dispersion auto-analysis variables are OK.")
コード例 #8
0
def select_model(model=None):
    """Select the N-state model type.

    @param model:   The N-state model type.  Can be one of '2-domain', 'population', or 'fixed'.
    @type model:    str
    """

    # Test if the current data pipe exists.
    check_pipe()

    # Test if the model name exists.
    if not model in ['2-domain', 'population', 'fixed']:
        raise RelaxError("The model name " + repr(model) + " is invalid.")

    # Test if the model is setup.
    if hasattr(cdp, 'model'):
        warn(
            RelaxWarning(
                "The N-state model has already been set up.  Switching from model '%s' to '%s'."
                % (cdp.model, model)))

    # Set the model
    cdp.model = model

    # Initialise the list of model parameters.
    cdp.params = []

    # Update the model.
    update_model()
コード例 #9
0
ファイル: relax_data.py プロジェクト: tlinnet/relax
def temp_calibration(ri_id=None, method=None):
    """Set the temperature calibration method.

    @keyword ri_id:     The relaxation data type, ie 'R1', 'R2', or 'NOE'.
    @type ri_id:        str
    @keyword method:    The temperature calibration method.
    @type method:       str
    """

    # Test if the current pipe exists.
    check_pipe()

    # Test if sequence data is loaded.
    if not exists_mol_res_spin_data():
        raise RelaxNoSequenceError

    # Test if data exists.
    if not hasattr(cdp, 'ri_ids') or ri_id not in cdp.ri_ids:
        raise RelaxNoRiError(ri_id)

    # Check the values, and warn if not in the list.
    valid = ['methanol', 'monoethylene glycol', 'no calibration applied']
    if method not in valid:
        warn(
            RelaxWarning(
                "The '%s' method is unknown.  Please try to use one of %s." %
                (method, valid)))

    # Set up the experimental info data container, if needed.
    if not hasattr(cdp, 'exp_info'):
        cdp.exp_info = ExpInfo()

    # Store the method.
    cdp.exp_info.temp_calibration_setup(ri_id, method)
コード例 #10
0
    def open_pdb(self):
        """Open the PDB file in PyMOL."""

        # Test if PyMOL is running.
        if not self.running():
            return

        # Reinitialise PyMOL.
        self.exec_cmd("reinitialize")

        # Open the PDB files.
        open_files = []
        for model in cdp.structure.structural_data:
            for mol in model.mol:
                # No file path.
                if not hasattr(mol, 'file_name'):
                    warn(RelaxWarning("Cannot display the current molecular data in PyMOL as it has not been exported as a PDB file."))
                    continue

                # The file path as the current directory.
                file_path = None
                if access(mol.file_name, F_OK):
                    file_path = mol.file_name

                # The file path using the relative path.
                if file_path == None and hasattr(mol, 'file_path') and mol.file_path != None:
                    file_path = mol.file_path + sep + mol.file_name
                    if not access(file_path, F_OK):
                        file_path = None

                # The file path using the absolute path.
                if file_path == None and hasattr(mol, 'file_path_abs') and mol.file_path_abs != None:
                    file_path = mol.file_path_abs + sep + mol.file_name
                    if not access(file_path, F_OK):
                        file_path = None

                # Hmmm, maybe the absolute path no longer exists and we are in a results subdirectory?
                if file_path == None and hasattr(mol, 'file_path') and mol.file_path != None:
                    file_path = pardir + sep + mol.file_path + sep + mol.file_name
                    if not access(file_path, F_OK):
                        file_path = None

                # Fall back to the current directory.
                if file_path == None:
                    file_path = mol.file_name

                # Already loaded.
                if file_path in open_files:
                    continue

                # Already loaded.
                if file_path in open_files:
                    continue

                # Open the file in PyMOL.
                self.exec_cmd("load " + file_path)

                # Add to the open file list.
                open_files.append(file_path)
コード例 #11
0
def frequency_checks(frq):
    """Perform a number of checks on the given proton frequency.

    @param frq:     The proton frequency value in Hertz.
    @type frq:      float or None
    """

    # No frequency given.
    if frq == None:
        return

    # Make sure the precise value has been supplied.
    frac, integer = modf(frq / 1e6)
    if frac == 0.0 or frac > 0.99999:
        warn(RelaxWarning("The precise spectrometer frequency should be supplied, a value such as 500000000 or 5e8 for a 500 MHz machine is not acceptable.  Please see the 'sfrq' parameter in the Varian procpar file or the 'SFO1' parameter in the Bruker acqus file."))

    # Check that the frequency value is reasonable.
    if frq < 1e8:
        warn(RelaxWarning("The proton frequency of %s Hz appears to be too low." % frq))
    if frq > 2e9:
        warn(RelaxWarning("The proton frequency of %s Hz appears to be too high." % frq))
コード例 #12
0
def centre_of_mass(pos=None, elements=None, verbosity=1):
    """Calculate and return the centre of mass for the given atomic coordinates.

    @keyword pos:           The list of atomic coordinates.
    @type pos:              list of lists of float
    @keyword elements:      The list of elements corresponding to the atoms.
    @type elements:         list of str
    @keyword verbosity:     The amount of text to print out.  0 results in no printouts, 1 the full amount.
    @type verbosity:        int
    @return:                The centre of mass vector and the mass.
    @rtype:                 3D list of floats, float
    """

    # Print out.
    if verbosity:
        print("Calculating the centre of mass.")

    # Initialise the centre of mass.
    R = zeros(3, float64)

    # Initialise the total mass.
    M = 0.0

    # Loop over all atoms.
    for i in range(len(pos)):
        # Atomic mass.
        try:
            mass = periodic_table.atomic_mass(elements[i])
        except RelaxError:
            warn(
                RelaxWarning(
                    "Skipping the atom index %s as the element '%s' is unknown."
                    % (i, elements[i])))

        # Total mass.
        M = M + mass

        # Sum of mass * position.
        R = R + mass * pos[i]

    # Normalise.
    R = R / M

    # Final printout.
    if verbosity:
        print("    Total mass:      M = " + repr(M))
        print("    Centre of mass:  R = " + repr(R))

    # Return the centre of mass and total mass
    return R, M
コード例 #13
0
ファイル: api.py プロジェクト: tlinnet/relax
    def overfit_deselect(self, data_check=True, verbose=True):
        """Deselect spins which have insufficient data to support minimisation.

        @keyword data_check:    A flag to signal if the presence of base data is to be checked for.
        @type data_check:       bool
        @keyword verbose:       A flag which if True will allow printouts.
        @type verbose:          bool
        """

        # Nothing to do.
        if not data_check:
            return

        # Loop over spin data, checking for PCS data.
        ids = []
        for spin, spin_id in spin_loop(return_id=True, skip_desel=True):
            if not hasattr(spin, 'pcs'):
                spin.select = False
                ids.append(spin_id)
        if verbose and len(ids):
            warn(
                RelaxWarning(
                    "No PCS data is present, deselecting the spins %s." % ids))

        # Loop over the interatomic data containers, checking for RDC data.
        ids = []
        for interatom in interatomic_loop(selection1=domain_moving()):
            if not hasattr(interatom, 'rdc'):
                interatom.select = False
                ids.append("%s - %s" %
                           (interatom.spin_id1, interatom.spin_id2))
        if verbose and len(ids):
            warn(
                RelaxWarning(
                    "No RDC data is present, deselecting the interatomic data containers between spin pairs %s."
                    % ids))
コード例 #14
0
ファイル: molecules.py プロジェクト: tlinnet/relax
    def _det_pdb_element(self, atom_name):
        """Try to determine the element from the PDB atom name.

        @param atom_name:   The PDB atom name.
        @type atom_name:    str
        @return:            The element name, or None if unsuccessful.
        @rtype:             str or None
        """

        # Strip away the "'" character (for RNA, etc.).
        element = atom_name.strip("'")

        # Strip away atom numbering, from the front and end.
        element = element.strip(digits)

        # Amino acid atom translation table (note, numbers have been stripped already!).
        table = {
            'C': ['CA', 'CB', 'CG', 'CD', 'CE', 'CH', 'CZ'],
            'N': ['ND', 'NE', 'NH', 'NZ'],
            'H': ['HA', 'HB', 'HG', 'HD', 'HE', 'HH', 'HT', 'HZ'],
            'O': ['OG', 'OD', 'OE', 'OH', 'OT'],
            'S': ['SD', 'SG']
        }

        # NMR Pseudo-atoms (with trailing numbers stripped).
        table['H'] += [
            'QA', 'QB', 'QD', 'QE', 'QG', 'QH', 'QQD', 'QQG', 'QR', 'QZ'
        ]

        # Translate amino acids.
        for key in table:
            if element in table[key]:
                element = key
                break

        # Return the element if it is in the periodic table.
        if periodic_table.has_element(symbol=element):
            return element

        # Else, throw a warning.
        warn(
            RelaxWarning(
                "Cannot determine the element associated with atom '%s'." %
                atom_name))
コード例 #15
0
ファイル: optimisation.py プロジェクト: tlinnet/relax
def back_calc_peak_intensities(spin=None, spin_id=None, exp_type=None, frq=None, offset=None, point=None):
    """Back-calculation of peak intensity for the given relaxation time.

    @keyword spin:      The specific spin data container.
    @type spin:         SpinContainer instance
    @keyword spin_id:   The optional spin ID string for use in warning messages.
    @type spin_id:      str or None
    @keyword exp_type:  The experiment type.
    @type exp_type:     str
    @keyword frq:       The spectrometer frequency.
    @type frq:          float
    @keyword offset:    For R1rho-type data, the spin-lock offset value in ppm.
    @type offset:       None or float
    @keyword point:     The dispersion point data (either the spin-lock field strength in Hz or the nu_CPMG frequency in Hz).
    @type point:        float
    @return:            The back-calculated peak intensities for the given exponential curve.
    @rtype:             numpy rank-1 float array
    """

    # Check.
    if not has_exponential_exp_type():
        raise RelaxError("Back-calculation is not allowed for the fixed time experiment types.")

    # The key.
    param_key = return_param_key_from_data(exp_type=exp_type, frq=frq, offset=offset, point=point)

    # Create the initial parameter vector.
    param_vector = assemble_param_vector(spins=[spin], key=param_key)

    # The peak intensities and times.
    values = []
    errors = []
    times = []
    for time in loop_time(exp_type=exp_type, frq=frq, offset=offset, point=point):
        # Check the peak intensity keys.
        int_keys = find_intensity_keys(exp_type=exp_type, frq=frq, offset=offset, point=point, time=time)
        for i in range(len(int_keys)):
            if int_keys[i] not in spin.peak_intensity:
                if spin_id:
                    warn(RelaxWarning("The spin %s peak intensity key '%s' is not present, skipping the back-calculation." % (spin_id, int_keys[i])))
                else:
                    warn(RelaxWarning("The peak intensity key '%s' is not present, skipping the back-calculation." % int_keys[i]))
                return

        # The data.
        values.append(average_intensity(spin=spin, exp_type=exp_type, frq=frq, offset=offset, point=point, time=time))
        errors.append(average_intensity(spin=spin, exp_type=exp_type, frq=frq, offset=offset, point=point, time=time, error=True))
        times.append(time)

    # The scaling matrix in a diagonalised list form.
    scaling_list = []
    for i in range(len(param_vector)):
        scaling_list.append(1.0)

    # Initialise the relaxation fit functions.
    model = Relax_fit_opt(model='exp', num_params=len(spin.params), values=values, errors=errors, relax_times=times, scaling_matrix=scaling_list)

    # Make a single function call.  This will cause back calculation and the data will be stored in the C module.
    model.func(param_vector)

    # Get the data back.
    results = model.back_calc_data()

    # Return the correct peak height.
    return results
コード例 #16
0
ファイル: relax_disp.py プロジェクト: tlinnet/relax
    def run(self):
        """Execute the auto-analysis."""

        # Peak intensity error analysis.
        if MODEL_R2EFF in self.models:
            self.error_analysis()

        # R1 parameter fitting.
        if self.r1_fit:
            subtitle(file=sys.stdout,
                     text="R1 parameter optimisation activation",
                     prespace=3)
            self.interpreter.relax_disp.r1_fit(fit=self.r1_fit)
        else:
            # No print out.
            self.interpreter.relax_disp.r1_fit(fit=self.r1_fit)

        # Loop over the models.
        self.model_pipes = []
        for model in self.models:
            # Printout.
            subtitle(file=sys.stdout,
                     text="The '%s' model" % model,
                     prespace=3)

            # The results directory path.
            model_path = model.replace(" ", "_")
            path = self.results_dir + sep + model_path

            # The name of the data pipe for the model.
            model_pipe = self.name_pipe(model)
            if self.is_model_for_selection(model):
                self.model_pipes.append(model_pipe)

            # Check that results do not already exist - i.e. a previous run was interrupted.
            path1 = path + sep + 'results'
            path2 = path1 + '.bz2'
            path3 = path1 + '.gz'
            if access(path1, F_OK) or access(path2, F_OK) or access(
                    path2, F_OK):
                # Printout.
                print(
                    "Detected the presence of results files for the '%s' model - loading these instead of performing optimisation for a second time."
                    % model)

                # Create a data pipe and switch to it.
                self.interpreter.pipe.create(pipe_name=model_pipe,
                                             pipe_type='relax_disp',
                                             bundle=self.pipe_bundle)
                self.interpreter.pipe.switch(model_pipe)

                # Load the results.
                self.interpreter.results.read(file='results', dir=path)

                # Jump to the next model.
                continue

            # Create the data pipe by copying the base pipe, then switching to it.
            self.interpreter.pipe.copy(pipe_from=self.pipe_name,
                                       pipe_to=model_pipe,
                                       bundle_to=self.pipe_bundle)
            self.interpreter.pipe.switch(model_pipe)

            # Select the model.
            self.interpreter.relax_disp.select_model(model)

            # Copy the R2eff values from the R2eff model data pipe.
            if model != MODEL_R2EFF and MODEL_R2EFF in self.models:
                self.interpreter.value.copy(
                    pipe_from=self.name_pipe(MODEL_R2EFF),
                    pipe_to=model_pipe,
                    param='r2eff')

            # Calculate the R2eff values for the fixed relaxation time period data types.
            if model == MODEL_R2EFF and not has_exponential_exp_type():
                self.interpreter.minimise.calculate()

            # Optimise the model.
            else:
                self.optimise(model=model, model_path=model_path)

            # Write out the results.
            self.write_results(path=path, model=model)

        # The final model selection data pipe.
        if len(self.models) >= 2:
            # Printout.
            section(file=sys.stdout, text="Final results", prespace=2)

            # Perform model selection.
            self.interpreter.model_selection(
                method=self.modsel,
                modsel_pipe=self.name_pipe('final'),
                bundle=self.pipe_bundle,
                pipes=self.model_pipes)

            # Final Monte Carlo simulations only.
            if not self.mc_sim_all_models:
                self.interpreter.monte_carlo.setup(number=self.mc_sim_num)
                self.interpreter.monte_carlo.create_data()
                self.interpreter.monte_carlo.initial_values()
                self.interpreter.minimise.execute(
                    'simplex',
                    func_tol=self.opt_func_tol,
                    max_iter=self.opt_max_iterations,
                    constraints=True)
                if self.eliminate:
                    self.interpreter.eliminate()
                self.interpreter.monte_carlo.error_analysis()

            # Writing out the final results.
            self.write_results(path=self.results_dir + sep + 'final')

        # No model selection.
        else:
            warn(
                RelaxWarning(
                    "Model selection in the dispersion auto-analysis has been skipped as only %s models have been optimised."
                    % len(self.model_pipes)))

        # Finally save the program state.
        self.interpreter.state.save(state='final_state',
                                    dir=self.results_dir,
                                    force=True)
コード例 #17
0
ファイル: relax_disp.py プロジェクト: tlinnet/relax
    def optimise(self, model=None, model_path=None):
        """Optimise the model, taking model nesting into account.

        @keyword model:         The model to be optimised.
        @type model:            str
        @keyword model_path:    The folder name for the model, where possible spaces has been replaced with underscore.
        @type model_path:       str
        """

        # Printout.
        section(file=sys.stdout, text="Optimisation", prespace=2)

        # Deselect insignificant spins.
        if model not in [MODEL_R2EFF, MODEL_NOREX]:
            self.interpreter.relax_disp.insignificance(
                level=self.insignificance)

        # Speed-up grid-search by using minium R2eff value.
        if self.set_grid_r20 and model != MODEL_R2EFF:
            self.interpreter.relax_disp.r20_from_min_r2eff(force=True)

        # Use pre-run results as the optimisation starting point.
        # Test if file exists.
        if self.pre_run_dir:
            path = self.pre_run_dir + sep + model_path
            # File path.
            file_path = get_file_path('results', path)

            # Test if the file exists and determine the compression type.
            try:
                compress_type, file_path = determine_compression(file_path)
                res_file_exists = True

            except RelaxFileError:
                res_file_exists = False

        if self.pre_run_dir and res_file_exists:
            self.pre_run_parameters(model=model, model_path=model_path)

        # Otherwise use the normal nesting check and grid search if not nested.
        else:
            # Nested model simplification.
            nested = self.nesting(model=model)

            # Otherwise use a grid search of default values to start optimisation with.
            if not nested:
                # Grid search.
                if self.grid_inc:
                    self.interpreter.minimise.grid_search(inc=self.grid_inc)

                # Default values.
                else:
                    # The standard parameters.
                    for param in MODEL_PARAMS[model]:
                        self.interpreter.value.set(param=param, index=None)

                    # The optional R1 parameter.
                    if is_r1_optimised(model=model):
                        self.interpreter.value.set(param='r1', index=None)

        # 'R2eff' model minimisation flags.
        do_minimise = False
        if model == MODEL_R2EFF:
            # The constraints flag.
            constraints = False

            # The minimisation algorithm to use.
            # Both the Jacobian and Hessian matrix has been specified for exponential curve-fitting, allowing for the much faster algorithms to be used.
            min_algor = 'Newton'

            # Check if all spins contains 'r2eff and it associated error.
            has_r2eff = False

            # Loop over all spins.
            for cur_spin, spin_id in spin_loop(return_id=True,
                                               skip_desel=True):
                # Check 'r2eff'
                if hasattr(cur_spin, 'r2eff') and hasattr(
                        cur_spin, 'r2eff_err'):
                    has_r2eff = True
                else:
                    has_r2eff = False
                    break

            # Skip optimisation, if 'r2eff' + 'r2eff_err' is present and flag for forcing optimisation is not raised.
            if has_r2eff and not self.optimise_r2eff:
                pass

            # Do optimisation, if 'r2eff' + 'r2eff_err' is present and flag for forcing optimisation is raised.
            elif has_r2eff and self.optimise_r2eff:
                do_minimise = True

            # Optimise, if no R2eff and error is present.
            elif not has_r2eff:
                do_minimise = True

        # Dispersion model minimisation flags.
        else:
            do_minimise = True
            constraints = True
            # The minimisation algorithm to use. If the Jacobian and Hessian matrix have not been specified for fitting, 'simplex' should be used.
            min_algor = 'simplex'

        # Do the minimisation.
        if do_minimise:
            self.interpreter.minimise.execute(min_algor=min_algor,
                                              func_tol=self.opt_func_tol,
                                              max_iter=self.opt_max_iterations,
                                              constraints=constraints)

        # Model elimination.
        if self.eliminate:
            self.interpreter.eliminate()

        # Monte Carlo simulations.
        do_monte_carlo = False
        if model == MODEL_R2EFF:
            # The constraints flag.
            constraints = False

            # Both the Jacobian and Hessian matrix has been specified for exponential curve-fitting, allowing for the much faster algorithms to be used.
            min_algor = 'Newton'

            # Skip optimisation, if 'r2eff' + 'r2eff_err' is present and flag for forcing optimisation is not raised.
            if has_r2eff and not self.optimise_r2eff:
                pass

            # Do optimisation, if 'r2eff' + 'r2eff_err' is present and flag for forcing optimisation is raised.
            elif has_r2eff and self.optimise_r2eff:
                do_monte_carlo = True

            # Optimise, if no R2eff and error is present.
            elif not has_r2eff:
                do_monte_carlo = True

        elif self.mc_sim_all_models or len(self.models) < 2:
            do_monte_carlo = True
            # The constraints flag.
            constraints = True
            # The minimisation algorithm to use. If the Jacobian and Hessian matrix have not been specified for fitting, 'simplex' should be used.
            min_algor = 'simplex'

        # Error estimation by Monte Carlo simulations.
        if do_monte_carlo:
            # Set the number of Monte-Carlo simulations.
            monte_carlo_sim = self.mc_sim_num

            # If the number for exponential curve fitting has been set.
            if model == MODEL_R2EFF and self.exp_mc_sim_num != None:
                monte_carlo_sim = self.exp_mc_sim_num

            # When set to minus 1, estimation of the errors will be extracted from the covariance matrix.
            # This is HIGHLY likely to be wrong, but can be used in an initial test fase.
            if model == MODEL_R2EFF and self.exp_mc_sim_num == -1:
                # Print
                subsection(file=sys.stdout,
                           text="Estimating errors from Covariance matrix",
                           prespace=1)

                # Raise warning.
                text = 'Estimating errors from the Covariance matrix is highly likely to be "quite" wrong.  Use only with extreme care, and for initial rapid testing of your data.'
                warn(RelaxWarning(text))

                # Estimate errors
                self.interpreter.relax_disp.r2eff_err_estimate()
            else:
                self.interpreter.monte_carlo.setup(number=monte_carlo_sim)
                self.interpreter.monte_carlo.create_data()
                self.interpreter.monte_carlo.initial_values()
                self.interpreter.minimise.execute(
                    min_algor=min_algor,
                    func_tol=self.opt_func_tol,
                    max_iter=self.opt_max_iterations,
                    constraints=constraints)
                if self.eliminate:
                    self.interpreter.eliminate()
                self.interpreter.monte_carlo.error_analysis()
コード例 #18
0
def read_spin_data(file=None, dir=None, file_data=None, spin_id_col=None, mol_name_col=None, res_num_col=None, res_name_col=None, spin_num_col=None, spin_name_col=None, data_col=None, error_col=None, sep=None, spin_id=None, raise_flag=True):
    """Generator function for reading the spin specific data from file.

    Description
    ===========

    This function reads a columnar formatted file where each line corresponds to a spin system. Spin identification is either through a spin ID string or through columns containing the molecule name, residue name and number, and/or spin name and number.


    @keyword file:          The name of the file to open.
    @type file:             str
    @keyword dir:           The directory containing the file (defaults to the current directory if None).
    @type dir:              str or None
    @keyword file_data:     An alternative to opening a file, if the data already exists in the correct format.  The format is a list of lists where the first index corresponds to the row and the second the column.
    @type file_data:        list of lists
    @keyword spin_id_col:   The column containing the spin ID strings.  If supplied, the mol_name_col, res_name_col, res_num_col, spin_name_col, and spin_num_col arguments must be none.
    @type spin_id_col:      int or None
    @keyword mol_name_col:  The column containing the molecule name information.  If supplied, spin_id_col must be None.
    @type mol_name_col:     int or None
    @keyword res_name_col:  The column containing the residue name information.  If supplied, spin_id_col must be None.
    @type res_name_col:     int or None
    @keyword res_num_col:   The column containing the residue number information.  If supplied, spin_id_col must be None.
    @type res_num_col:      int or None
    @keyword spin_name_col: The column containing the spin name information.  If supplied, spin_id_col must be None.
    @type spin_name_col:    int or None
    @keyword spin_num_col:  The column containing the spin number information.  If supplied, spin_id_col must be None.
    @type spin_num_col:     int or None
    @keyword data_col:      The column containing the data.
    @type data_col:         int or None
    @keyword error_col:     The column containing the errors.
    @type error_col:        int or None
    @keyword sep:           The column separator which, if None, defaults to whitespace.
    @type sep:              str or None
    @keyword spin_id:       The spin ID string used to restrict data loading to a subset of all spins.
    @type spin_id:          None or str
    @keyword raise_flag:    A flag which if True will cause a RelaxError to be raised if no data can be found.
    @type raise_flag:       bool
    @return:                A list of the spin specific data is yielded.  The format is a list consisting of the spin ID string, the data value (if data_col is give), and the error value (if error_col is given).  If both data_col and error_col are None, then the spin ID string is simply yielded.
    @rtype:                 str, list of [str, float], or list of [str, float, float]
    """

    # Argument tests.
    col_args = [spin_id_col, mol_name_col, res_name_col, res_num_col, spin_name_col, spin_num_col, data_col, error_col]
    col_arg_names = ['spin_id_col', 'mol_name_col', 'res_name_col', 'res_num_col', 'spin_name_col', 'spin_num_col', 'data_col', 'error_col']
    for i in range(len(col_args)):
        if col_args[i] == 0:
            raise RelaxError("The '%s' argument cannot be zero, column numbering starts at one." % col_arg_names[i])
    if spin_id_col and (mol_name_col or res_name_col or res_num_col or spin_name_col or spin_num_col):
        raise RelaxError("If the 'spin_id_col' argument has been supplied, then the mol_name_col, res_name_col, res_num_col, spin_name_col, and spin_num_col must all be set to None.")

    # Minimum number of columns.
    min_col_num = max([_f for _f in [spin_id_col, mol_name_col, res_num_col, res_name_col, spin_num_col, spin_name_col, data_col, error_col] if _f])

    # Extract the data from the file.
    if not file_data:
        # Extract.
        file_data = extract_data(file, dir, sep=sep)

        # Strip the data of all comments and empty lines.
        if spin_id_col != None:
            file_data = strip(file_data, comments=False)
        else:
            file_data = strip(file_data)

    # No data!
    if not file_data:
        warn(RelaxFileEmptyWarning(file))
        return

    # Yield the data, spin by spin.
    missing_data = True
    for line in file_data:
        # Convert the spin IDs.
        if spin_id_col != None and line[spin_id_col-1][0] in ["\"", "\'"]:
            line[spin_id_col-1] = eval(line[spin_id_col-1])

        # Convert.
        # Validate the sequence.
        if not check_sequence(line, spin_id_col=spin_id_col, mol_name_col=mol_name_col, res_num_col=res_num_col, res_name_col=res_name_col, spin_num_col=spin_num_col, spin_name_col=spin_name_col, data_col=data_col, error_col=error_col, escalate=1):
            continue

        # Get the spin data from the ID.
        if spin_id_col:
            # Invalid spin ID.
            if line[spin_id_col-1] == '#':
                warn(RelaxWarning("Invalid spin ID, skipping the line %s" % line))
                continue

            mol_name, res_num, res_name, spin_num, spin_name = spin_id_to_data_list(line[spin_id_col-1])

        # Convert the spin data.
        else:
            # The molecule.
            mol_name = None
            if mol_name_col != None and line[mol_name_col-1] != 'None':
                mol_name = line[mol_name_col-1]

            # The residue number, catching bad values.
            res_num = None
            if res_num_col != None:
                try:
                    if line[res_num_col-1] == 'None':
                        res_num = None
                    else:
                        res_num = int(line[res_num_col-1])
                except ValueError:
                    warn(RelaxWarning("Invalid residue number, skipping the line %s" % line))
                    continue

            # The residue name.
            res_name = None
            if res_name_col != None and line[res_name_col-1] != 'None':
                res_name = line[res_name_col-1]

            # The spin number, catching bad values.
            spin_num = None
            if spin_num_col != None:
                try:
                    if line[spin_num_col-1] == 'None':
                        spin_num = None
                    else:
                        spin_num = int(line[spin_num_col-1])
                except ValueError:
                    warn(RelaxWarning("Invalid spin number, skipping the line %s" % line))
                    continue

            # The spin name.
            spin_name = None
            if spin_name_col != None and line[spin_name_col-1] != 'None':
                spin_name = line[spin_name_col-1]

        # Convert the data.
        value = None
        if data_col != None:
            try:
                # None.
                if line[data_col-1] == 'None':
                    value = None

                # A float.
                else:
                    value = float(line[data_col-1])

                    # If it a float, test if is nan.
                    if not isFinite(value):
                        warn(RelaxWarning("The value is not finite, skipping the line %s" % line))
                        continue

            # Bad data.
            except ValueError:
                warn(RelaxWarning("Invalid data, skipping the line %s" % line))
                continue

        # Convert the errors.
        error = None
        if error_col != None:
            try:
                # None.
                if line[error_col-1] == 'None':
                    error = None

                # A float.
                else:
                    error = float(line[error_col-1])

                    # If it a float, test if is nan.
                    if not isFinite(error):
                        warn(RelaxWarning("The error is not finite, skipping the line %s" % line))
                        continue

            # Bad data.
            except ValueError:
                warn(RelaxWarning("Invalid errors, skipping the line %s" % line))
                continue

        # Right, data is OK and exists.
        missing_data = False

        # Yield the data.
        if data_col and error_col:
            yield mol_name, res_num, res_name, spin_num, spin_name, value, error
        elif data_col:
            yield mol_name, res_num, res_name, spin_num, spin_name, value
        elif error_col:
            yield mol_name, res_num, res_name, spin_num, spin_name, error
        else:
            yield mol_name, res_num, res_name, spin_num, spin_name

    # Hmmm, no data!
    if raise_flag and missing_data:
        raise RelaxError("No corresponding data could be found within the file.")
コード例 #19
0
ファイル: spectrum.py プロジェクト: tlinnet/relax
def read(file=None,
         dir=None,
         spectrum_id=None,
         dim=1,
         int_col=None,
         int_method=None,
         spin_id_col=None,
         mol_name_col=None,
         res_num_col=None,
         res_name_col=None,
         spin_num_col=None,
         spin_name_col=None,
         sep=None,
         spin_id=None,
         ncproc=None,
         verbose=True):
    """Read the peak intensity data.

    @keyword file:          The name of the file(s) containing the peak intensities.
    @type file:             str or list of str
    @keyword dir:           The directory where the file is located.
    @type dir:              str
    @keyword spectrum_id:   The spectrum identification string.
    @type spectrum_id:      str or list of str
    @keyword dim:           The dimension of the peak list to associate the data with.
    @type dim:              int
    @keyword int_col:       The column containing the peak intensity data (used by the generic intensity file format).
    @type int_col:          int or list of int
    @keyword int_method:    The integration method, one of 'height', 'point sum' or 'other'.
    @type int_method:       str
    @keyword spin_id_col:   The column containing the spin ID strings (used by the generic intensity file format).  If supplied, the mol_name_col, res_name_col, res_num_col, spin_name_col, and spin_num_col arguments must be none.
    @type spin_id_col:      int or None
    @keyword mol_name_col:  The column containing the molecule name information (used by the generic intensity file format).  If supplied, spin_id_col must be None.
    @type mol_name_col:     int or None
    @keyword res_name_col:  The column containing the residue name information (used by the generic intensity file format).  If supplied, spin_id_col must be None.
    @type res_name_col:     int or None
    @keyword res_num_col:   The column containing the residue number information (used by the generic intensity file format).  If supplied, spin_id_col must be None.
    @type res_num_col:      int or None
    @keyword spin_name_col: The column containing the spin name information (used by the generic intensity file format).  If supplied, spin_id_col must be None.
    @type spin_name_col:    int or None
    @keyword spin_num_col:  The column containing the spin number information (used by the generic intensity file format).  If supplied, spin_id_col must be None.
    @type spin_num_col:     int or None
    @keyword sep:           The column separator which, if None, defaults to whitespace.
    @type sep:              str or None
    @keyword spin_id:       The spin ID string used to restrict data loading to a subset of all spins.  If 'auto' is provided for a NMRPipe seriesTab formatted file, the ID's are auto generated in form of Z_Ai.
    @type spin_id:          None or str
    @keyword ncproc:        The Bruker ncproc binary intensity scaling factor.
    @type ncproc:           int or None
    @keyword verbose:       A flag which if True will cause all relaxation data loaded to be printed out.
    @type verbose:          bool
    """

    # Data checks.
    check_pipe()
    check_mol_res_spin_data()

    # Check the file name.
    if file == None:
        raise RelaxError("The file name must be supplied.")

    # Test that the intensity measures are identical.
    if hasattr(cdp, 'int_method') and cdp.int_method != int_method:
        raise RelaxError(
            "The '%s' measure of peak intensities does not match '%s' of the previously loaded spectra."
            % (int_method, cdp.int_method))

    # Multiple ID flags.
    flag_multi = False
    flag_multi_file = False
    flag_multi_col = False
    if isinstance(spectrum_id, list) or spectrum_id == 'auto':
        flag_multi = True
    if isinstance(file, list):
        flag_multi_file = True
    if isinstance(int_col, list) or spectrum_id == 'auto':
        flag_multi_col = True

    # List argument checks.
    if flag_multi:
        # Too many lists.
        if flag_multi_file and flag_multi_col:
            raise RelaxError(
                "If a list of spectrum IDs is supplied, the file names and intensity column arguments cannot both be lists."
            )

        # Not enough lists.
        if not flag_multi_file and not flag_multi_col:
            raise RelaxError(
                "If a list of spectrum IDs is supplied, either the file name or intensity column arguments must be a list of equal length."
            )

        # List lengths for multiple files.
        if flag_multi_file and len(spectrum_id) != len(file):
            raise RelaxError(
                "The file list %s and spectrum ID list %s do not have the same number of elements."
                % (file, spectrum_id))

        # List lengths for multiple intensity columns.
        if flag_multi_col and spectrum_id != 'auto' and len(
                spectrum_id) != len(int_col):
            raise RelaxError(
                "The spectrum ID list %s and intensity column list %s do not have the same number of elements."
                % (spectrum_id, int_col))

    # More list argument checks (when only one spectrum ID is supplied).
    else:
        # Multiple files.
        if flag_multi_file:
            raise RelaxError(
                "If multiple files are supplied, then multiple spectrum IDs must also be supplied."
            )

        # Multiple intensity columns.
        if flag_multi_col:
            raise RelaxError(
                "If multiple intensity columns are supplied, then multiple spectrum IDs must also be supplied."
            )

    # Intensity column checks.
    if spectrum_id != 'auto' and not flag_multi and flag_multi_col:
        raise RelaxError(
            "If a list of intensity columns is supplied, the spectrum ID argument must also be a list of equal length."
        )

    # Check the intensity measure.
    if not int_method in ['height', 'point sum', 'other']:
        raise RelaxError(
            "The intensity measure '%s' is not one of 'height', 'point sum', 'other'."
            % int_method)

    # Set the peak intensity measure.
    cdp.int_method = int_method

    # Convert the file argument to a list if necessary.
    if not isinstance(file, list):
        file = [file]

    # Loop over all files.
    for file_index in range(len(file)):
        # Read the peak list data.
        peak_list = read_peak_list(file=file[file_index],
                                   dir=dir,
                                   int_col=int_col,
                                   spin_id_col=spin_id_col,
                                   mol_name_col=mol_name_col,
                                   res_num_col=res_num_col,
                                   res_name_col=res_name_col,
                                   spin_num_col=spin_num_col,
                                   spin_name_col=spin_name_col,
                                   sep=sep,
                                   spin_id=spin_id)

        # Automatic spectrum IDs.
        if spectrum_id == 'auto':
            spectrum_id = peak_list[0].intensity_name

        # Loop over the assignments.
        data = []
        data_flag = False
        for assign in peak_list:
            # Generate the spin_id.
            spin_id = generate_spin_id_unique(res_num=assign.res_nums[dim - 1],
                                              spin_name=assign.spin_names[dim -
                                                                          1])

            # Convert the intensity data to a list if needed.
            intensity = assign.intensity
            if not isinstance(intensity, list):
                intensity = [intensity]

            # Loop over the intensity data.
            for int_index in range(len(intensity)):
                # Sanity check.
                if intensity[int_index] == 0.0:
                    warn(
                        RelaxWarning(
                            "A peak intensity of zero has been encountered for the spin '%s' - this could be fatal later on."
                            % spin_id))

                # Get the spin container.
                spin = return_spin(spin_id=spin_id)
                if not spin:
                    warn(RelaxNoSpinWarning(spin_id))
                    continue

                # Skip deselected spins.
                if not spin.select:
                    continue

                # Initialise.
                if not hasattr(spin, 'peak_intensity'):
                    spin.peak_intensity = {}

                # Intensity scaling.
                if ncproc != None:
                    intensity[int_index] = intensity[int_index] / float(2**
                                                                        ncproc)

                # Add the data.
                if flag_multi_file:
                    id = spectrum_id[file_index]
                elif flag_multi_col:
                    id = spectrum_id[int_index]
                else:
                    id = spectrum_id
                spin.peak_intensity[id] = intensity[int_index]

                # Switch the flag.
                data_flag = True

                # Append the data for printing out.
                data.append([spin_id, repr(intensity[int_index])])

        # Add the spectrum id (and ncproc) to the relax data store.
        spectrum_ids = spectrum_id
        if isinstance(spectrum_id, str):
            spectrum_ids = [spectrum_id]
        if ncproc != None and not hasattr(cdp, 'ncproc'):
            cdp.ncproc = {}
        for i in range(len(spectrum_ids)):
            add_spectrum_id(spectrum_ids[i])
            if ncproc != None:
                cdp.ncproc[spectrum_ids[i]] = ncproc

        # No data.
        if not data_flag:
            # Delete all the data.
            delete(spectrum_id)

            # Raise the error.
            raise RelaxError("No data could be loaded from the peak list")

        # Printout.
        if verbose:
            print(
                "\nThe following intensities have been loaded into the relax data store:\n"
            )
            write_data(out=sys.stdout,
                       headings=["Spin_ID", "Intensity"],
                       data=data)
        print('')
コード例 #20
0
def read(file=None, dir=None, file_data=None, spin_id1_col=None, spin_id2_col=None, data_col=None, error_col=None, sign_col=None, sep=None):
    """Read the J coupling data from file.

    @keyword file:          The name of the file to open.
    @type file:             str
    @keyword dir:           The directory containing the file (defaults to the current directory if None).
    @type dir:              str or None
    @keyword file_data:     An alternative to opening a file, if the data already exists in the correct format.  The format is a list of lists where the first index corresponds to the row and the second the column.
    @type file_data:        list of lists
    @keyword spin_id1_col:  The column containing the spin ID strings of the first spin.
    @type spin_id1_col:     int
    @keyword spin_id2_col:  The column containing the spin ID strings of the second spin.
    @type spin_id2_col:     int
    @keyword data_col:      The column containing the J coupling data in Hz.
    @type data_col:         int or None
    @keyword error_col:     The column containing the J coupling errors.
    @type error_col:        int or None
    @keyword sign_col:      The optional column containing the sign of the J coupling.
    @type sign_col:         int or None
    @keyword sep:           The column separator which, if None, defaults to whitespace.
    @type sep:              str or None
    """

    # Check the pipe setup.
    check_pipe_setup(sequence=True)

    # Either the data or error column must be supplied.
    if data_col == None and error_col == None:
        raise RelaxError("One of either the data or error column must be supplied.")

    # Extract the data from the file, and remove comments and blank lines.
    file_data = extract_data(file, dir, sep=sep)
    file_data = strip(file_data, comments=True)

    # Loop over the J coupling data.
    data = []
    for line in file_data:
        # Invalid columns.
        if spin_id1_col > len(line):
            warn(RelaxWarning("The data %s is invalid, no first spin ID column can be found." % line))
            continue
        if spin_id2_col > len(line):
            warn(RelaxWarning("The data %s is invalid, no second spin ID column can be found." % line))
            continue
        if data_col and data_col > len(line):
            warn(RelaxWarning("The data %s is invalid, no data column can be found." % line))
            continue
        if error_col and error_col > len(line):
            warn(RelaxWarning("The data %s is invalid, no error column can be found." % line))
            continue
        if sign_col and sign_col > len(line):
            warn(RelaxWarning("The data %s is invalid, no sign column can be found." % line))
            continue

        # Unpack.
        spin_id1 = line[spin_id1_col-1]
        spin_id2 = line[spin_id2_col-1]
        value = None
        if data_col:
            value = line[data_col-1]
        error = None
        if error_col:
            error = line[error_col-1]
        sign = None
        if sign_col:
            sign = line[sign_col-1]

        # Convert the spin IDs.
        if spin_id1[0] in ["\"", "\'"]:
            spin_id1 = eval(spin_id1)
        if spin_id2[0] in ["\"", "\'"]:
            spin_id2 = eval(spin_id2)

        # Convert and check the value.
        if value == 'None':
            value = None
        if value != None:
            try:
                value = float(value)
            except ValueError:
                warn(RelaxWarning("The J coupling value of the line %s is invalid." % line))
                continue

        # The sign data.
        if sign == 'None':
            sign = None
        if sign != None:
            try:
                sign = float(sign)
            except ValueError:
                warn(RelaxWarning("The J coupling sign of the line %s is invalid." % line))
                continue
            if sign not in [1.0, -1.0]:
                warn(RelaxWarning("The J coupling sign of the line %s is invalid." % line))
                continue

        # Convert and check the error.
        if error == 'None':
            error = None
        if error != None:
            try:
                error = float(error)
            except ValueError:
                warn(RelaxWarning("The error value of the line %s is invalid." % line))
                continue

        # Get the spins.
        spin1 = return_spin(spin_id=spin_id1)
        spin2 = return_spin(spin_id=spin_id2)

        # Check the spin IDs.
        if not spin1:
            warn(RelaxWarning("The spin ID '%s' cannot be found in the current data pipe, skipping the data %s." % (spin_id1, line)))
            continue
        if not spin2:
            warn(RelaxWarning("The spin ID '%s' cannot be found in the current data pipe, skipping the data %s." % (spin_id2, line)))
            continue

        # Test the error value (cannot be 0.0).
        if error == 0.0:
            raise RelaxError("An invalid error value of zero has been encountered.")

        # Get the interatomic data container.
        interatom = return_interatom(spin_hash1=spin1._hash, spin_hash2=spin2._hash)

        # Create the container if needed.
        if interatom == None:
            interatom = create_interatom(spin_id1=spin_id1, spin_id2=spin_id2)

        # Add the data.
        if data_col:
            # Sign conversion.
            if sign != None:
                value = value * sign

            # Add the value.
            interatom.j_coupling = value

        # Add the error.
        if error_col:
            interatom.j_coupling_err = error

        # Append the data for printout.
        data.append([spin_id1, spin_id2])
        if is_float(value):
            data[-1].append("%20.15f" % value)
        else:
            data[-1].append("%20s" % value)
        if is_float(error):
            data[-1].append("%20.15f" % error)
        else:
            data[-1].append("%20s" % error)

    # No data, so fail hard!
    if not len(data):
        raise RelaxError("No J coupling data could be extracted.")

    # Print out.
    print("The following J coupling have been loaded into the relax data store:\n")
    write_data(out=sys.stdout, headings=["Spin_ID1", "Spin_ID2", "Value", "Error"], data=data)
コード例 #21
0
ファイル: io.py プロジェクト: bopopescu/relax
def open_write_file(file_name=None,
                    dir=None,
                    force=False,
                    compress_type=0,
                    verbosity=1,
                    return_path=False):
    """Function for opening a file for writing and creating directories if necessary.

    @keyword file_name:     The name of the file to extract the data from.
    @type file_name:        str
    @keyword dir:           The path where the file is located.  If None, then the current directory is assumed.
    @type dir:              str
    @keyword force:         Boolean argument which if True causes the file to be overwritten if it already exists.
    @type force:            bool
    @keyword compress_type: The compression type.  The integer values correspond to the compression type: 0, no compression; 1, Bzip2 compression; 2, Gzip compression.  If no compression is given but the file name ends in '.gz' or '.bz2', then the compression will be automatically set.
    @type compress_type:    int
    @keyword verbosity:     The verbosity level.
    @type verbosity:        int
    @keyword return_path:   If True, the function will return a tuple of the file object and the full file path.
    @type return_path:      bool
    @return:                The open, writable file object and, if the return_path is True, then the full file path is returned as well.
    @rtype:                 writable file object (if return_path, then a tuple of the writable file and the full file path)
    """

    # No file name?
    if file_name == None:
        raise RelaxError("The name of the file must be supplied.")

    # A file descriptor object.
    if is_filetype(file_name):
        # Nothing to do here!
        return file_name

    # Something pretending to be a file object.
    if hasattr(file_name, 'write'):
        # Nothing to do here!
        return file_name

    # The null device.
    if search('devnull', file_name):
        # Print out.
        if verbosity:
            print("Opening the null device file for writing.")

        # Open the null device.
        file_obj = open(devnull, 'w')

        # Return the file.
        if return_path:
            return file_obj, None
        else:
            return file_obj

    # Create the directories.
    mkdir_nofail(dir, verbosity=0)

    # File path.
    file_path = get_file_path(file_name, dir)

    # If no compression is supplied, determine the compression to be used from the file extension.
    if compress_type == 0:
        if search('.bz2$', file_path):
            compress_type = 1
        elif search('.gz$', file_path):
            compress_type = 2

    # Bzip2 compression.
    if compress_type == 1 and not search('.bz2$', file_path):
        # Bz2 module exists.
        if bz2:
            file_path = file_path + '.bz2'

        # Switch to gzip compression.
        else:
            warn(
                RelaxWarning(
                    "Cannot use Bzip2 compression, using gzip compression instead.  "
                    + bz2_module_message + "."))
            compress_type = 2

    # Gzip compression.
    if compress_type == 2 and not search('.gz$', file_path):
        file_path = file_path + '.gz'

    # Fail if the file already exists and the force flag is set to 0.
    if access(file_path, F_OK) and not force:
        raise RelaxFileOverwriteError(file_path, 'force flag')

    # Open the file for writing.
    try:
        # Print out.
        if verbosity:
            print("Opening the file " + repr(file_path) + " for writing.")

        # Uncompressed text.
        if compress_type == 0:
            file_obj = open(file_path, 'w')

        # Bzip2 compressed text.
        elif compress_type == 1:
            file_obj = bz2_open(file=file_path, mode='w')

        # Gzipped compressed text.
        elif compress_type == 2:
            file_obj = gz_open(file=file_path, mode='w')

    # Cannot open.
    except IOError:
        message = sys.exc_info()[1]
        raise RelaxError("Cannot open the file " + repr(file_path) + ".  " +
                         message.args[1] + ".")

    # Return the opened file.
    if return_path:
        return file_obj, file_path
    else:
        return file_obj
コード例 #22
0
ファイル: sparky.py プロジェクト: tlinnet/relax
def read_list(peak_list=None, file_data=None):
    """Extract the peak intensity information from the Sparky peak intensity file.

    @keyword peak_list: The peak list object to place all data into.
    @type peak_list:    lib.spectrum.objects.Peak_list instance
    @keyword file_data: The data extracted from the file converted into a list of lists.
    @type file_data:    list of lists of str
    @raises RelaxError: When the expected peak intensity is not a float.
    """

    # The number of header lines.
    num = 0
    if file_data[0][0] == 'Assignment':
        num = num + 1
    if file_data[1] == '':
        num = num + 1
    print("Number of header lines found: %s" % num)

    # The columns according to the file.
    w1_col = None
    w2_col = None
    w3_col = None
    w4_col = None
    int_col = None
    for i in range(len(file_data[0])):
        # The chemical shifts.
        if file_data[0][i] == 'w1':
            w1_col = i
        elif file_data[0][i] == 'w2':
            w2_col = i
        elif file_data[0][i] == 'w3':
            w3_col = i
        elif file_data[0][i] == 'w4':
            w4_col = i

        # The peak height.
        elif file_data[0][i] == 'Height':
            # The peak height when exported from CcpNmr Analysis export without 'Data'.
            int_col = i

            # The peak height when exported from Sparky.
            if file_data[0][i - 1] == 'Data' and file_data[0][i] == 'Height':
                int_col = i - 1

        # The peak volume.
        elif file_data[0][i] == 'Intensity':
            int_col = i

    # Remove the header.
    file_data = file_data[num:]

    # Strip the data.
    file_data = strip(file_data)

    # The dimensionality.
    if w4_col != None:
        dim = 4
    elif w3_col != None:
        dim = 3
    elif w2_col != None:
        dim = 2
    elif w1_col != None:
        dim = 1
    else:
        raise RelaxError(
            "The dimensionality of the peak list cannot be determined.")
    print("%sD peak list detected." % dim)

    # Loop over the file data.
    for line in file_data:
        # Skip non-assigned peaks.
        if line[0] == '?-?':
            continue

        # Split up the assignments.
        if dim == 1:
            assign1 = line[0]
        elif dim == 2:
            assign1, assign2 = split('-', line[0])
        elif dim == 3:
            assign1, assign2, assign3 = split('-', line[0])
        elif dim == 4:
            assign1, assign2, assign3, assign4 = split('-', line[0])

        # Process the assignment for each dimension.
        if dim >= 1:
            row1 = split('([a-zA-Z]+)', assign1)
            name1 = row1[-2] + row1[-1]
        if dim >= 2:
            row2 = split('([a-zA-Z]+)', assign2)
            name2 = row2[-2] + row2[-1]
        if dim >= 3:
            row3 = split('([a-zA-Z]+)', assign3)
            name3 = row3[-2] + row3[-1]
        if dim >= 4:
            row4 = split('([a-zA-Z]+)', assign4)
            name4 = row4[-2] + row4[-1]

        # Get the residue number for dimension 1.
        got_res_num1 = True
        try:
            res_num1 = int(row1[-3])
        except:
            got_res_num1 = False
            raise RelaxError(
                "Improperly formatted Sparky file, cannot process the residue number for dimension 1 in assignment: %s."
                % line[0])

        # Get the residue number for dimension 2.
        try:
            res_num2 = int(row2[-3])
        except:
            # We cannot always expect dimension 2 to have residue number.
            if got_res_num1:
                res_num2 = res_num1
            else:
                res_num2 = None
                warn(
                    RelaxWarning(
                        "Improperly formatted Sparky file, cannot process the residue number for dimension 2 in assignment: %s. Setting residue number to %s."
                        % (line[0], res_num2)))

        # The residue name for dimension 1.
        got_res_name1 = True
        try:
            res_name1 = row1[-4]
        except:
            got_res_name1 = False
            res_name1 = None
            warn(
                RelaxWarning(
                    "Improperly formatted Sparky file, cannot process the residue name for dimension 1 in assignment: %s. Setting residue name to %s."
                    % (line[0], res_name1)))

        # The residue name for dimension 2.
        try:
            res_name2 = row2[-4]
        except:
            # We cannot always expect dimension 2 to have residue name.
            if got_res_name1:
                res_name2 = res_name1
            else:
                res_name2 = None
                warn(
                    RelaxWarning(
                        "Improperly formatted NMRPipe SeriesTab file, cannot process the residue name for dimension 2 in assignment: %s. Setting residue name to %s."
                        % (line[0], res_name2)))

        # Chemical shifts.
        w1 = None
        w2 = None
        w3 = None
        w4 = None
        if w1_col != None:
            try:
                w1 = float(line[w1_col])
            except ValueError:
                raise RelaxError(
                    "The chemical shift from the line %s is invalid." % line)
        if w2_col != None:
            try:
                w2 = float(line[w2_col])
            except ValueError:
                raise RelaxError(
                    "The chemical shift from the line %s is invalid." % line)
        if w3_col != None:
            try:
                w3 = float(line[w3_col])
            except ValueError:
                raise RelaxError(
                    "The chemical shift from the line %s is invalid." % line)
        if w4_col != None:
            try:
                w4 = float(line[w4_col])
            except ValueError:
                raise RelaxError(
                    "The chemical shift from the line %s is invalid." % line)

        # Intensity.
        if int_col != None:
            try:
                intensity = float(line[int_col])
            except ValueError:
                raise RelaxError(
                    "The peak intensity value from the line %s is invalid." %
                    line)

            # Add the assignment to the peak list object.
            if dim == 1:
                peak_list.add(res_nums=[res_num1],
                              res_names=[res_name1],
                              spin_names=[name1],
                              shifts=[w1],
                              intensity=intensity)
            elif dim == 2:
                peak_list.add(res_nums=[res_num1, res_num2],
                              res_names=[res_name1, res_name2],
                              spin_names=[name1, name2],
                              shifts=[w1, w2],
                              intensity=intensity)
            elif dim == 3:
                peak_list.add(res_nums=[res_num1, res_num2, res_num1],
                              res_names=[res_name1, res_name2, res_name1],
                              spin_names=[name1, name2, name3],
                              shifts=[w1, w2, w3],
                              intensity=intensity)
            elif dim == 4:
                peak_list.add(
                    res_nums=[res_num1, res_num2, res_num1, res_num1],
                    res_names=[res_name1, res_name2, res_name1, res_name1],
                    spin_names=[name1, name2, name3, name4],
                    shifts=[w1, w2, w3, w4],
                    intensity=intensity)

        # If no intensity column, for example when reading spins from a spectrum list.
        elif int_col == None:
            warn(
                RelaxWarning((
                    "The peak intensity value from the line %s is invalid. The return value will be without intensity."
                    % line)))

            # Add the assignment to the peak list object.
            if dim == 1:
                peak_list.add(res_nums=[res_num1],
                              res_names=[res_name1],
                              spin_names=[name1],
                              shifts=[w1])
            elif dim == 2:
                peak_list.add(res_nums=[res_num1, res_num2],
                              res_names=[res_name1, res_name2],
                              spin_names=[name1, name2],
                              shifts=[w1, w2])
            elif dim == 3:
                peak_list.add(res_nums=[res_num1, res_num2, res_num1],
                              res_names=[res_name1, res_name2, res_name1],
                              spin_names=[name1, name2, name3],
                              shifts=[w1, w2, w3])
            elif dim == 4:
                peak_list.add(
                    res_nums=[res_num1, res_num2, res_num1, res_num1],
                    res_names=[res_name1, res_name2, res_name1, res_name1],
                    spin_names=[name1, name2, name3, name4],
                    shifts=[w1, w2, w3, w4])
コード例 #23
0
def unit_vectors(ave=True):
    """Extract the bond vectors from the loaded structures and store them in the spin container.

    @keyword ave:           A flag which if True will cause the average of all vectors to be calculated.
    @type ave:              bool
    """

    # Test if the current data pipe exists.
    check_pipe()

    # Test if interatomic data exists.
    if not exists_data():
        raise RelaxNoInteratomError

    # Print out.
    if ave:
        print("Averaging all vectors.")
    else:
        print("No averaging of the vectors.")

    # Loop over the interatomic data containers.
    no_vectors = True
    pos_info = False
    for interatom in interatomic_loop(skip_desel=False):
        # Get the spin info.
        spin1 = return_spin(spin_hash=interatom._spin_hash1)
        spin2 = return_spin(spin_hash=interatom._spin_hash2)

        # No positional information.
        if not hasattr(spin1, 'pos'):
            continue
        if not hasattr(spin2, 'pos'):
            continue

        # Positional information flag.
        pos_info = True

        # Both single positions.
        if is_float(spin1.pos[0], raise_error=False) and is_float(spin2.pos[0], raise_error=False):
            # The vector.
            vector_list = [spin2.pos - spin1.pos]

        # A single and multiple position pair.
        elif is_float(spin1.pos[0], raise_error=False) or is_float(spin2.pos[0], raise_error=False):
            # The first spin has multiple positions.
            if is_float(spin2.pos[0], raise_error=False):
                vector_list = []
                for i in range(len(spin1.pos)):
                    vector_list.append(spin2.pos - spin1.pos[i])

            # The second spin has multiple positions.
            else:
                vector_list = []
                for i in range(len(spin2.pos)):
                    vector_list.append(spin2.pos[i] - spin1.pos)

        # Both spins have multiple positions.
        else:
            # Non-matching number of positions.
            if len(spin1.pos) != len(spin2.pos):
                raise RelaxError("The spin '%s' consists of %s positions whereas the spin '%s' consists of %s - these numbers must match." % (interatom.spin_id1, len(spin1.pos), interatom.spin_id1, len(spin1.pos)))

            # Calculate all vectors.
            vector_list = []
            for i in range(len(spin1.pos)):
                # No structural information.
                if spin1.pos[i] is None or spin2.pos[i] is None:
                    warn(RelaxWarning("No structural information for state %i can be found between spins '%s' and '%s'." % (i, interatom.spin_id1, interatom.spin_id2)))
                    vector_list.append(None)

                # All data is present.
                else:
                    vector_list.append(spin2.pos[i] - spin1.pos[i])

        # Unit vectors.
        for i in range(len(vector_list)):
            # No vector.
            if vector_list[i] is None:
                continue

            # Normalisation factor.
            norm_factor = norm(vector_list[i])

            # Test for zero length.
            if norm_factor == 0.0:
                warn(RelaxZeroVectorWarning(spin_id1=interatom.spin_id1, spin_id2=interatom.spin_id2))

            # Calculate the normalised vector.
            else:
                vector_list[i] = vector_list[i] / norm_factor

        # Average.
        if ave:
            ave_vector = zeros(3, float64)
            count = 0
            for i in range(len(vector_list)):
                if vector_list[i] is not None:
                    ave_vector = ave_vector + vector_list[i]
                    count += 1
            vector_list = [ave_vector / count]

        # Convert to a single vector if needed.
        if len(vector_list) == 1:
            vector_list = vector_list[0]

        # Store the unit vector(s).
        setattr(interatom, 'vector', vector_list)

        # We have a vector!
        no_vectors = False

        # Printout.
        num = 1
        if not is_float(vector_list[0], raise_error=False):
            num = len(vector_list)
        plural = 's'
        if num == 1:
            plural = ''
        if spin1.name:
            spin1_str = spin1.name
        else:
            spin1_str = spin1.num
        if spin2.name:
            spin2_str = spin2.name
        else:
            spin2_str = spin2.num
        print("Calculated %s %s-%s unit vector%s between the spins '%s' and '%s'." % (num, spin1_str, spin2_str, plural, interatom.spin_id1, interatom.spin_id2))

    # Catch the problem of no positional information being present.
    if not pos_info:
        raise RelaxError("Positional information could not be found for any spins.")

    # Right, catch the problem of missing vectors to prevent massive user confusion!
    if no_vectors:
        raise RelaxError("No vectors could be extracted.")
コード例 #24
0
def read_dist(file=None, dir=None, unit='meter', spin_id1_col=None, spin_id2_col=None, data_col=None, sep=None):
    """Set up the magnetic dipole-dipole interaction.

    @keyword file:          The name of the file to open.
    @type file:             str
    @keyword dir:           The directory containing the file (defaults to the current directory if None).
    @type dir:              str or None
    @keyword unit:          The measurement unit.  This can be either 'meter' or 'Angstrom'.
    @type unit:             str
    @keyword spin_id1_col:  The column containing the spin ID strings of the first spin.
    @type spin_id1_col:     int
    @keyword spin_id2_col:  The column containing the spin ID strings of the second spin.
    @type spin_id2_col:     int
    @keyword data_col:      The column containing the averaged distances in meters.
    @type data_col:         int or None
    @keyword sep:           The column separator which, if None, defaults to whitespace.
    @type sep:              str or None
    """

    # Check the units.
    if unit not in ['meter', 'Angstrom']:
        raise RelaxError("The measurement unit of '%s' must be one of 'meter' or 'Angstrom'." % unit)

    # Test if the current data pipe exists.
    check_pipe()

    # Test if sequence data exists.
    if not exists_mol_res_spin_data():
        raise RelaxNoSequenceError

    # Extract the data from the file, and clean it up.
    file_data = extract_data(file, dir, sep=sep)
    file_data = strip(file_data, comments=True)

    # Loop over the RDC data.
    data = []
    for line in file_data:
        # Invalid columns.
        if spin_id1_col > len(line):
            warn(RelaxWarning("The data %s is invalid, no first spin ID column can be found." % line))
            continue
        if spin_id2_col > len(line):
            warn(RelaxWarning("The data %s is invalid, no second spin ID column can be found." % line))
            continue
        if data_col and data_col > len(line):
            warn(RelaxWarning("The data %s is invalid, no data column can be found." % line))
            continue

        # Unpack.
        spin_id1 = line[spin_id1_col-1]
        spin_id2 = line[spin_id2_col-1]
        ave_dist = None
        if data_col:
            ave_dist = line[data_col-1]

        # Convert and check the value.
        if ave_dist != None:
            try:
                ave_dist = float(ave_dist)
            except ValueError:
                warn(RelaxWarning("The averaged distance of '%s' from the line %s is invalid." % (ave_dist, line)))
                continue

        # Unit conversion.
        if unit == 'Angstrom':
            ave_dist = ave_dist * 1e-10

        # Get the interatomic data container.
        spin1 = return_spin(spin_id=spin_id1)
        spin2 = return_spin(spin_id=spin_id2)
        interatom = return_interatom(spin_hash1=spin1._hash, spin_hash2=spin2._hash)

        # No container found, so create it.
        if interatom == None:
            interatom = create_interatom(spin_id1=spin_id1, spin_id2=spin_id2, verbose=True)

        # Store the averaged distance.
        interatom.r = ave_dist

        # Store the data for the printout.
        data.append([repr(interatom.spin_id1), repr(interatom.spin_id2), repr(ave_dist)])

    # No data, so fail!
    if not len(data):
        raise RelaxError("No data could be extracted from the file.")

    # Print out.
    print("The following averaged distances have been read:\n")
    write_data(out=sys.stdout, headings=["Spin_ID_1", "Spin_ID_2", "Ave_distance(meters)"], data=data)
コード例 #25
0
ファイル: mass.py プロジェクト: tlinnet/relax
def pipe_centre_of_mass(atom_id=None,
                        model=None,
                        return_mass=False,
                        verbosity=1,
                        missing_error=True):
    """Calculate and return the centre of mass of the structures in the current data pipe.

    @keyword atom_id:       The molecule, residue, and atom identifier string.  Only atoms matching this selection will be used.
    @type atom_id:          str or None
    @keyword model:         Only use a specific model.
    @type model:            int or None
    @keyword return_mass:   A flag which if False will cause only the centre of mass to be returned, but if True will cause the centre of mass and the mass itself to be returned as a tuple.
    @type return_mass:      bool
    @keyword verbosity:     The amount of text to print out.  0 results in no printouts, 1 the full amount.
    @type verbosity:        int
    @keyword missing_error: A flag which if True will cause an error to be raised if structural data is absent.  Otherwise if False, a warning will be given and the CoM of [0, 0, 0] will be returned.
    @type missing_error:    bool
    @return:                The centre of mass vector, and additionally the mass.
    @rtype:                 list of 3 floats (or tuple of a list of 3 floats and one float)
    """

    # Test if a structure has been loaded.
    if missing_error:
        check_structure()
    else:
        if not check_structure(escalate=1):
            return [0.0, 0.0, 0.0]

    # The selection object.
    selection = cdp.structure.selection(atom_id=atom_id)

    # Loop over all atoms.
    coord = []
    element_list = []
    for mol_name, res_num, res_name, atom_num, atom_name, element, pos in cdp.structure.atom_loop(
            selection=selection,
            model_num=model,
            mol_name_flag=True,
            res_num_flag=True,
            res_name_flag=True,
            atom_num_flag=True,
            atom_name_flag=True,
            element_flag=True,
            pos_flag=True,
            ave=True):
        # Initialise the spin
        id = ''

        # Get the corresponding molecule container.
        if mol_name != None:
            id = id + '#' + mol_name

        # Get the corresponding residue container.
        if res_num != None:
            id = id + ':' + repr(res_num)

        # Get the corresponding spin container.
        if atom_num != None:
            id = id + '@' + repr(atom_num)

        # No element?
        if element == None:
            warn(
                RelaxWarning(
                    "Skipping the atom '%s' as the element type cannot be determined."
                    % id))
            continue

        # Store the position and element.
        coord.append(pos)
        element_list.append(element)

    # Calculate the CoM.
    com, mass = centre_of_mass(pos=coord,
                               elements=element_list,
                               verbosity=verbosity)

    # Return the centre of mass.
    if return_mass:
        return com, mass
    else:
        return com
コード例 #26
0
ファイル: api.py プロジェクト: tlinnet/relax
    def grid_search(self,
                    lower=None,
                    upper=None,
                    inc=None,
                    scaling_matrix=None,
                    constraints=False,
                    verbosity=0,
                    sim_index=None):
        """Perform a grid search.

        @keyword lower:             The per-model lower bounds of the grid search which must be equal to the number of parameters in the model.
        @type lower:                list of lists of numbers
        @keyword upper:             The per-model upper bounds of the grid search which must be equal to the number of parameters in the model.
        @type upper:                list of lists of numbers
        @keyword inc:               The per-model increments for each dimension of the space for the grid search. The number of elements in the array must equal to the number of parameters in the model.
        @type inc:                  list of lists of int
        @keyword scaling_matrix:    The per-model list of diagonal and square scaling matrices.
        @type scaling_matrix:       list of numpy rank-2, float64 array or list of None
        @keyword constraints:       If True, constraints are applied during the grid search (eliminating parts of the grid).  If False, no constraints are used.
        @type constraints:          bool
        @keyword verbosity:         A flag specifying the amount of information to print.  The higher the value, the greater the verbosity.
        @type verbosity:            int
        @keyword sim_index:         The Monte Carlo simulation index.
        @type sim_index:            None or int
        """

        # Test if the Frame Order model has been set up.
        if not hasattr(cdp, 'model'):
            raise RelaxNoModelError('Frame Order')

        # Test if the pivot has been set.
        check_pivot()

        # The number of parameters.
        n = param_num()

        # Alias the single model grid bounds and increments.
        lower = lower[0]
        upper = upper[0]
        inc = inc[0]

        # Initialise the grid increments structures.
        grid = []
        """This structure is a list of lists.  The first dimension corresponds to the model
        parameter.  The second dimension are the grid node positions."""

        # Generate the grid.
        for i in range(n):
            # Fixed parameter.
            if inc[i] == None:
                grid.append(None)
                continue

            # Reset.
            dist_type = None
            end_point = True

            # Arccos grid from 0 to pi.
            if cdp.params[i] in ['ave_pos_beta', 'eigen_beta', 'axis_theta']:
                # Change the default increment numbers.
                if not isinstance(inc, list):
                    inc[i] = int(inc[i] / 2) + 1

                # The distribution type and end point.
                dist_type = 'acos'
                end_point = False

            # Append the grid row.
            row = grid_row(inc[i],
                           lower[i],
                           upper[i],
                           dist_type=dist_type,
                           end_point=end_point)
            grid.append(row)

            # Remove an inc if the end point has been removed.
            if not end_point:
                inc[i] -= 1

        # Total number of points.
        total_pts = 1
        for i in range(n):
            # Fixed parameter.
            if grid[i] == None:
                continue

            total_pts = total_pts * len(grid[i])

        # Check the number.
        max_pts = 50e6
        if total_pts > max_pts:
            raise RelaxError(
                "The total number of grid points '%s' exceeds the maximum of '%s'."
                % (total_pts, int(max_pts)))

        # Build the points array.
        pts = zeros((total_pts, n), float64)
        indices = zeros(n, int)
        for i in range(total_pts):
            # Loop over the dimensions.
            for j in range(n):
                # Fixed parameter.
                if grid[j] == None:
                    # Get the current parameter value.
                    pts[i, j] = getattr(
                        cdp, cdp.params[j]) / scaling_matrix[0][j, j]

                # Add the point coordinate.
                else:
                    pts[i, j] = grid[j][indices[j]] / scaling_matrix[0][j, j]

            # Increment the step positions.
            for j in range(n):
                if inc[j] != None and indices[j] < inc[j] - 1:
                    indices[j] += 1
                    break  # Exit so that the other step numbers are not incremented.
                else:
                    indices[j] = 0

        # Linear constraints.
        A, b = None, None
        if constraints:
            # Obtain the constraints.
            A, b = linear_constraints(scaling_matrix=scaling_matrix[0])

            # Constraint flag set but no constraints present.
            if A is None:
                if verbosity:
                    warn(
                        RelaxWarning(
                            "The '%s' model parameters are not constrained, turning the linear constraint algorithm off."
                            % cdp.model))
                constraints = False

        # The numeric integration information.
        if not hasattr(cdp, 'quad_int'):
            cdp.quad_int = False
        sobol_max_points, sobol_oversample = None, None
        if hasattr(cdp, 'sobol_max_points'):
            sobol_max_points = cdp.sobol_max_points
            sobol_oversample = cdp.sobol_oversample

        # Set up the data structures for the target function.
        param_vector, full_tensors, full_in_ref_frame, rdcs, rdc_err, rdc_weight, rdc_vect, rdc_const, pcs, pcs_err, pcs_weight, atomic_pos, temp, frq, paramag_centre, com, ave_pos_pivot, pivot, pivot_opt = target_fn_data_setup(
            sim_index=sim_index, verbosity=verbosity)

        # Get the Processor box singleton (it contains the Processor instance) and alias the Processor.
        processor_box = Processor_box()
        processor = processor_box.processor

        # Set up for multi-processor execution.
        if processor.processor_size() > 1:
            # Printout.
            print("Parallelised grid search.")
            print(
                "Randomising the grid points to equalise the time required for each grid subdivision.\n"
            )

            # Randomise the points.
            shuffle(pts)

        # Loop over each grid subdivision, with all points violating constraints being eliminated.
        for subdivision in grid_split_array(
                divisions=processor.processor_size(),
                points=pts,
                A=A,
                b=b,
                verbosity=verbosity):
            # Set up the memo for storage on the master.
            memo = Frame_order_memo(sim_index=sim_index,
                                    scaling_matrix=scaling_matrix[0])

            # Set up the command object to send to the slave and execute.
            command = Frame_order_grid_command(
                points=subdivision,
                scaling_matrix=scaling_matrix[0],
                sim_index=sim_index,
                model=cdp.model,
                param_vector=param_vector,
                full_tensors=full_tensors,
                full_in_ref_frame=full_in_ref_frame,
                rdcs=rdcs,
                rdc_err=rdc_err,
                rdc_weight=rdc_weight,
                rdc_vect=rdc_vect,
                rdc_const=rdc_const,
                pcs=pcs,
                pcs_err=pcs_err,
                pcs_weight=pcs_weight,
                atomic_pos=atomic_pos,
                temp=temp,
                frq=frq,
                paramag_centre=paramag_centre,
                com=com,
                ave_pos_pivot=ave_pos_pivot,
                pivot=pivot,
                pivot_opt=pivot_opt,
                sobol_max_points=sobol_max_points,
                sobol_oversample=sobol_oversample,
                verbosity=verbosity,
                quad_int=cdp.quad_int)

            # Add the slave command and memo to the processor queue.
            processor.add_to_queue(command, memo)

        # Execute the queued elements.
        processor.run_queue()
コード例 #27
0
def read_list(peak_list=None, file_data=None, int_col=None):
    """Extract the peak intensity information from the NMRView peak intensity file.

    @keyword peak_list: The peak list object to place all data into.
    @type peak_list:    lib.spectrum.objects.Peak_list instance
    @keyword file_data: The data extracted from the file converted into a list of lists.
    @type file_data:    list of lists of str
    @keyword int_col:   The column containing the peak intensity data. The default is 16 for intensities. Setting the int_col argument to 15 will use the volumes (or evolumes). For a non-standard formatted file, use a different value.
    @type int_col:      int
    @raises RelaxError: When the expected peak intensity is not a float.
    """

    # Assume the NMRView file has six header lines!
    num = 6
    print("Number of header lines: %s" % num)

    # Remove the header.
    file_data = file_data[num:]

    # Strip the data.
    file_data = strip(file_data)

    # The chemical shift columns.
    w2_col = 2
    w1_col = 9

    # The peak intensity column.
    if int_col == None:
        int_col = 16
    if int_col == 16:
        print('Using peak heights.')
    if int_col == 15:
        print('Using peak volumes (or evolumes).')

    # Loop over the file data.
    for line in file_data:
        # Unknown assignment.
        if line[1] == '{}':
            warn(
                RelaxWarning(
                    "The assignment '%s' is unknown, skipping this peak." %
                    line[1]))
            continue

        # The residue number
        res_num = ''
        try:
            res_num = line[1].strip('{')
            res_num = res_num.strip('}')
            res_num = res_num.split('.')
            res_num = int(res_num[0])
        except ValueError:
            raise RelaxError(
                "Improperly formatted NMRView file, cannot process the assignment '%s'."
                % line[1])

        # Nuclei names.
        name2 = ''
        if line[1] != '{}':
            name2 = line[1].strip('{')
            name2 = name2.strip('}')
            name2 = name2.split('.')
            name2 = name2[1]
        name1 = ''
        if line[8] != '{}':
            name1 = line[8].strip('{')
            name1 = name1.strip('}')
            name1 = name1.split('.')
            name1 = name1[1]

        # Chemical shifts.
        w1 = None
        w2 = None
        if w1_col != None:
            try:
                w1 = float(line[w1_col])
            except ValueError:
                raise RelaxError(
                    "The chemical shift from the line %s is invalid." % line)
        if w2_col != None:
            try:
                w2 = float(line[w2_col])
            except ValueError:
                raise RelaxError(
                    "The chemical shift from the line %s is invalid." % line)

        # Intensity.
        try:
            intensity = float(line[int_col])
        except ValueError:
            raise RelaxError("The peak intensity value " + repr(intensity) +
                             " from the line " + repr(line) + " is invalid.")

        # Add the assignment to the peak list object.
        peak_list.add(res_nums=[res_num, res_num],
                      spin_names=[name1, name2],
                      shifts=[w1, w2],
                      intensity=intensity)
コード例 #28
0
def read_seriestab(peak_list=None, file_data=None, int_col=None):
    """Extract the intensity information from the NMRPipe SeriesTab peak intensity file.

    @keyword peak_list: The peak list object to place all data into.
    @type peak_list:    lib.spectrum.objects.Peak_list instance
    @keyword file_data: The data extracted from the file converted into a list of lists.
    @type file_data:    list of lists of str
    @keyword int_col:  The column which to multiply the peak intensity data (used by the SeriesTab intensity file format).
    @type int_col:     int
    @raises RelaxError: When the expected peak intensity is not a float.
    """

    # Set start variables.
    modeline = False
    mode = False
    varsline = False
    header = False

    # Loop over lines, to extract variables and find header size.
    line_nr = 0
    for line in file_data:
        if len(line) > 0:
            if line[0] == 'REMARK' and line[1] == 'Mode:':
                modeline = line[2:]
                mode = modeline[0]
            elif line[0] == 'VARS':
                varsline = line[1:]
            elif line[0] == '1':
                header = line_nr
                break
        line_nr += 1

    # Raise RelaxError, if the MODE is not found.
    if not (modeline and mode):
        raise RelaxError("MODE not detected. Expecting line 2:\nREMARK Mode: Summation")

    # Raise RelaxError, if the VARS line is not found.
    if not (varsline):
        raise RelaxError("VARS not detected. Expecting line 8:\nVARS INDEX X_AXIS Y_AXIS X_PPM Y_PPM VOL ASS Z_A0")

    # Raise RelaxError, if the header size is not found.
    if not header:
        raise RelaxError("'1' not detected in start of line. Cannot determine header size.")

    # Find index of assignment ASS.
    ass_i = varsline.index('ASS')

    # If a list of int_col is given, make sure it only is one value
    if type(int_col) == list:
        # Make a set of all int columns
        set_int_col = list(set(int_col))

        # If there is only integer column
        if len(set_int_col) == 1:
            int_col = set_int_col[0]
        else:
            warn(RelaxWarning("Multiple int_col is set to '%s'. I only accept a list of same values."%(int_col )))

    # Find index of assignment HEIGHT or VOL.
    if int_col == None or type(int_col) == list:
        if "VOL" in varsline:
            int_type = "VOL"
        elif "HEIGHT" in varsline:
            int_type = "HEIGHT"
        else:
            raise RelaxError("The int_col is set to '%s'. Cannot determine which column to multiply with."%(int_col))
        warn(RelaxWarning("The int_col is set to '%s'. Looking for the '%s' index."%(int_col, int_type) ))
        int_col = varsline.index('%s'%int_type) + 1
        warn(RelaxWarning("The int_col is set to '%i' from the '%s' index."%(int_col, int_type) ))

    # Chemical shifts preparation.
    w1_col = None
    w2_col = None

    # Find index of chemical shift Y_PPM which in sparky is w1.
    w1_col = varsline.index('Y_PPM')

    # Find index of chemical shift X_PPM which in sparky is w2.
    w2_col = varsline.index('X_PPM')

    # Make a regular search for Z_A entries.
    Z_A = re.compile("Z_A*")
    spectra = list(filter(Z_A.search, varsline))

    # Find index of Z_A entries.
    spectra_i = []
    for y in spectra:
        spectra_i.append(varsline.index(y))

    # Remove the header.
    file_data = file_data[header:]

    # Loop over the file data.
    for line in file_data:
        # Skip non-assigned peaks.
        if line[ass_i] == '?-?':
            continue

        # Standard assign if None
        if line[ass_i] == 'None':
            new_line_ass_i = "A%sN-HN"%line[0]
            warn(RelaxWarning("Improperly formatted NMRPipe SeriesTab file. The spin assignment column 'ASS' is set to '%s'. Setting to %s." % (line[ass_i], new_line_ass_i)))
            line[ass_i] = new_line_ass_i

        # First split by the 2D separator.
        assign1, assign2 = re.split('-', line[ass_i])

        # The assignment of the first dimension.
        row1 = re.split('([a-zA-Z]+)', assign1)
        name1 = row1[-2] + row1[-1]

        # The assignment of the second dimension.
        row2 = re.split('([a-zA-Z]+)', assign2)
        name2 = row2[-2] + row2[-1]

        # Get the residue number for dimension 1.
        got_res_num1 = True
        try:
            res_num1 = int(row1[-3])
        except:
            got_res_num1 = False
            raise RelaxError("Improperly formatted NMRPipe SeriesTab file, cannot process the residue number for dimension 1 in assignment: %s." % line[0])

        # Get the residue number for dimension 2.
        try:
            res_num2 = int(row2[-3])
        except:
            # We cannot always expect dimension 2 to have residue number.
            if got_res_num1:
                res_num2 = res_num1
            else:
                res_num2 = None
                warn(RelaxWarning("Improperly formatted NMRPipe SeriesTab file, cannot process the residue number for dimension 2 in assignment: %s. Setting residue number to %s." % (line[0], res_num2)))

        # The residue name for dimension 1.
        got_res_name1 = True
        try:
            res_name1 = row1[-4]
        except:
            got_res_name1 = False
            res_name1 = None
            warn(RelaxWarning("Improperly formatted NMRPipe SeriesTab file, cannot process the residue name for dimension 1 in assignment: %s. Setting residue name to %s." % (line[0], res_name1)))

        # The residue name for dimension 2.
        try:
            res_name2 = row2[-4]
        except:
            # We cannot always expect dimension 2 to have residue name.
            if got_res_name1:
                res_name2 = res_name1
            else:
                res_name2 = None
                warn(RelaxWarning("Improperly formatted NMRPipe SeriesTab file, cannot process the residue name for dimension 2 in assignment: %s. Setting residue name to %s." % (line[0], res_name2)))

        # Get the intensities.
        try:
            # Loop over the spectra.
            intensities = []
            for i in range(len(spectra)):
                # The intensity is given by column multiplication.
                intensities.append( float(line[spectra_i[i]]) * float(line[int_col-1]) )

        # Bad data.
        except ValueError:
            raise RelaxError("The peak intensity value %s from the line %s is invalid." % (intensity, line))

        # Chemical shifts.
        w1 = None
        w2 = None
        if w1_col != None:
            try:
                w1 = float(line[w1_col])
            except ValueError:
                raise RelaxError("The chemical shift from the line %s is invalid." % line)
        if w2_col != None:
            try:
                w2 = float(line[w2_col])
            except ValueError:
                raise RelaxError("The chemical shift from the line %s is invalid." % line)

        # Add the assignment to the peak list object.
        peak_list.add(res_nums=[res_num1, res_num2], res_names=[res_name1, res_name2], spin_names=[name1, name2], shifts=[w1, w2], intensity=intensities, intensity_name=spectra)
コード例 #29
0
ファイル: simulation.py プロジェクト: bopopescu/relax
def uniform_distribution(file=None,
                         model=None,
                         structure=None,
                         parameters={},
                         eigenframe=None,
                         pivot=None,
                         atom_id=None,
                         total=1000,
                         max_rotations=100000):
    """Uniform distribution of the frame order motions.

    @keyword file:          The opened and writable file object to place the PDB models of the distribution into.
    @type file:             str
    @keyword structure:     The internal structural object containing the domain to distribute as a single model.
    @type structure:        lib.structure.internal.object.Internal instance
    @keyword model:         The frame order model to distribute.
    @type model:            str
    @keyword parameters:    The dictionary of model parameter values.  The key is the parameter name and the value is the value.
    @type parameters:       dict of float
    @keyword eigenframe:    The full 3D eigenframe of the frame order motions.
    @type eigenframe:       numpy rank-2, 3D float64 array
    @keyword pivot:         The list of pivot points of the frame order motions.
    @type pivot:            numpy rank-2 (N, 3) float64 array
    @keyword atom_id:       The atom ID string for the atoms in the structure to rotate - i.e. the moving domain.
    @type atom_id:          None or str
    @keyword total:         The total number of states in the distribution.
    @type total:            int
    @keyword max_rotations: The maximum number of rotations to generate the distribution from.  This prevents an execution for an infinite amount of time when a frame order amplitude parameter is close to zero so that the subset of all rotations within the distribution is close to zero.
    @type max_rotations:    int
    """

    # Check the structural object.
    if structure.num_models() > 1:
        raise RelaxError("Only a single model is supported.")

    # Set the model number.
    structure.set_model(model_orig=None, model_new=1)

    # Generate the internal structural selection object.
    selection = structure.selection(atom_id)

    # The initial states and motional limits.
    num_states = len(pivot)
    states = zeros((num_states, 3, 3), float64)
    theta_max = []
    sigma_max = []
    for i in range(num_states):
        states[i] = eye(3)
        theta_max.append(None)
        sigma_max.append(None)

    # Initialise the rotation matrix data structures.
    R = eye(3, dtype=float64)

    # Axis permutations.
    perm = [None]
    if model == MODEL_DOUBLE_ROTOR:
        perm = [[2, 0, 1], [1, 2, 0]]
        perm_rev = [[1, 2, 0], [2, 0, 1]]

    # The maximum cone opening angles (isotropic cones).
    if 'cone_theta' in parameters:
        theta_max[0] = parameters['cone_theta']

    # The maximum cone opening angles (isotropic cones).
    theta_x = None
    theta_y = None
    if 'cone_theta_x' in parameters:
        theta_x = parameters['cone_theta_x']
        theta_y = parameters['cone_theta_y']

    # The maximum torsion angle.
    if 'cone_sigma_max' in parameters:
        sigma_max[0] = parameters['cone_sigma_max']
    elif 'free rotor' in model:
        sigma_max[0] = pi

    # The second torsion angle.
    if 'cone_sigma_max_2' in parameters:
        sigma_max[1] = parameters['cone_sigma_max_2']

    # Printout.
    print("\nGenerating the distribution:")

    # Distribution.
    current_state = 1
    num = -1
    while True:
        # The total number of rotations.
        num += 1

        # End.
        if current_state == total:
            break
        if num >= max_rotations:
            sys.stdout.write('\n')
            warn(
                RelaxWarning(
                    "Maximum number of rotations encountered - the distribution only contains %i states."
                    % current_state))
            break

        # Loop over each state, or motional mode.
        inside = True
        for i in range(num_states):
            # The random rotation matrix.
            R_random_hypersphere(R)

            # Shift the current state.
            states[i] = dot(R, states[i])

            # Rotation in the eigenframe.
            R_eigen = dot(transpose(eigenframe), dot(states[i], eigenframe))

            # Axis permutation to shift each rotation axis to Z.
            if perm[i] != None:
                for j in range(3):
                    R_eigen[:, j] = R_eigen[perm[i], j]
                for j in range(3):
                    R_eigen[j, :] = R_eigen[j, perm[i]]

            # The angles.
            phi, theta, sigma = R_to_tilt_torsion(R_eigen)
            sigma = wrap_angles(sigma, -pi, pi)

            # Determine theta_max for the pseudo-ellipse models.
            if theta_x != None:
                theta_max[i] = 1.0 / sqrt((cos(phi) / theta_x)**2 +
                                          (sin(phi) / theta_y)**2)

            # The cone opening angle is outside of the limit.
            if theta_max[i] != None:
                if theta > theta_max[i]:
                    inside = False

            # No tilt component.
            else:
                theta = 0.0
                phi = 0.0

            # The torsion angle is outside of the limits.
            if sigma_max[i] != None:
                if sigma > sigma_max[i]:
                    inside = False
                elif sigma < -sigma_max[i]:
                    inside = False
            else:
                sigma = 0.0

            # Reconstruct the rotation matrix, in the eigenframe, without sigma.
            tilt_torsion_to_R(phi, theta, sigma, R_eigen)

            # Reverse axis permutation to shift each rotation z-axis back.
            if perm[i] != None:
                for j in range(3):
                    R_eigen[:, j] = R_eigen[perm_rev[i], j]
                for j in range(3):
                    R_eigen[j, :] = R_eigen[j, perm_rev[i]]

            # Rotate back out of the eigenframe.
            states[i] = dot(eigenframe, dot(R_eigen, transpose(eigenframe)))

        # The state is outside of the distribution.
        if not inside:
            continue

        # Progress.
        sys.stdout.write('.')
        sys.stdout.flush()

        # Increment the snapshot number.
        current_state += 1

        # Copy the original structural data.
        structure.add_model(model=current_state, coords_from=1)

        # Rotate the model.
        for i in range(num_states):
            structure.rotate(R=states[i],
                             origin=pivot[i],
                             model=current_state,
                             selection=selection)

    # Save the result.
    structure.write_pdb(file=file)
コード例 #30
0
ファイル: spectrum.py プロジェクト: tlinnet/relax
def sn_ratio_deselection(ratio=10.0,
                         operation='<',
                         all_sn=False,
                         select=False,
                         verbose=True):
    """Use user function deselect.spin on spins with signal to noise ratio higher or lower than ratio.  The operation determines the selection operation.

    @keyword ratio:         The ratio to compare to.
    @type ratio:            float
    @keyword operation:     The comparison operation by which to select the spins.  Of the operation(sn_ratio, ratio), where operation can either be:  '<', '<=', '>', '>=', '==', '!='.
    @type operation:        str
    @keyword all_sn:        A flag specifying if all the signal to noise ratios per spin should match the comparison operator, of if just a single comparison match is enough.
    @type all_sn:           bool
    @keyword select:        A flag specifying if the user function select.spin should be used instead.
    @type select:           bool
    @keyword verbose:       A flag which if True will print additional information out.
    @type verbose:          bool
    """

    # Tests.
    check_pipe()
    check_mol_res_spin_data()

    # Test if spectra have been loaded.
    if not hasattr(cdp, 'spectrum_ids'):
        raise RelaxError("No spectra have been loaded.")

    # Assign the comparison operator.
    # "'<' : strictly less than"
    if operation == '<':
        op = operator.lt

    # "'<=' : less than or equal"
    elif operation == '<=':
        op = operator.le

    # "'>' : strictly greater than"
    elif operation == '>':
        op = operator.gt

    # "'>=' : greater than or equal"
    elif operation == '>=':
        op = operator.ge

    # "'==' : equal"
    elif operation == '==':
        op = operator.eq

    # "'!=' : not equal",
    elif operation == '!=':
        op = operator.ne

    # If not assigned, raise error.
    else:
        raise RelaxError(
            "The compare operation does not belong to the allowed list of methods: ['<', '<=', '>', '>=', '==', '!=']"
        )

    # Assign text for print out.
    if all_sn:
        text_all_sn = "all"
    else:
        text_all_sn = "any"

    if select:
        text_sel = "selected"
        sel_func = sel_spin
    else:
        text_sel = "deselected"
        sel_func = desel_spin

    # Print
    section(file=sys.stdout,
            text="Signal to noise ratio comparison selection",
            prespace=1,
            postspace=0)
    print("For the comparion test: S/N %s %1.1f" % (operation, ratio))

    # Loop over the spins.
    spin_ids = []
    for spin, spin_id in spin_loop(return_id=True):
        # Skip spins missing sn_ratio.
        if not hasattr(spin, 'sn_ratio'):
            # Skip warning for deselected spins.
            if spin.select:
                warn(
                    RelaxWarning(
                        "Spin '%s' does not contain Signal to Noise calculations. Perform the user function 'spectrum.sn_ratio'. This spin is skipped."
                        % spin_id))
            continue

        # Loop over the ID, collect and sort.
        ids = []
        for id in spin.peak_intensity:
            # Append the ID to the list.
            ids.append(id)

        # Sort the ids alphanumeric.
        ids = sort_filenames(filenames=ids, rev=False)

        # Loop over the sorted ids.
        sn_val = []
        for id in ids:
            # Append the Signal to Noise in the list.
            sn_val.append(spin.sn_ratio[id])

        # Convert the list to array.
        sn_val = asarray(sn_val)

        # Make the comparison for the whole array.
        test_arr = op(sn_val, ratio)

        # Determine how the test should evaluate.
        if all_sn:
            test = test_arr.all()
        else:
            test = test_arr.any()

        # Make an numpy array for the ids, an extract id which failed the test.
        ids_arr = asarray(ids)
        ids_test_arr = ids_arr[test_arr]

        # Make inversion of bool
        test_arr_inv = test_arr == False
        ids_test_arr_inv = ids_arr[test_arr_inv]

        # print
        if verbose:
            subsection(
                file=sys.stdout,
                text="Signal to noise ratio comparison for spin ID '%s'" %
                spin_id,
                prespace=1,
                postspace=0)
            print("Following spectra ID evaluated to True: %s" % ids_test_arr)
            print("Following spectra ID evaluated to False: %s" %
                  ids_test_arr_inv)
            print(
                "'%s' comparisons have been used for evaluation, which evaluated to: %s"
                % (text_all_sn, test))
            if test:
                print("The spin ID '%s' is %s" % (spin_id, text_sel))
            else:
                print("The spin ID '%s' is skipped" % spin_id)

        # If the test evaluates to True, then do selection action.
        if test:
            # Select/Deselect the spin.
            sel_func(spin_id=spin_id)

            # Assign spin_id to list, for printing.
            spin_ids.append(spin_id)

    # Make summary
    if verbose:
        if len(spin_ids) > 0:
            subsection(
                file=sys.stdout,
                text=
                "For all of the S/N comparion test, the following spin ID's was %s"
                % text_sel,
                prespace=1,
                postspace=0)
            print(spin_ids)