Example #1
0
def software(name=None, version=None, url=None, vendor_name=None, cite_ids=None, tasks=None):
    """Select by name the software used in the analysis.

    @param name:            The name of the software program.
    @type name:             str
    @keyword version:       The program version.
    @type version:          None or str
    @keyword url:           The program's URL.
    @type url:              None or str
    @keyword vendor_name:   The name of the company or person behind the program.
    @type vendor_name:      str
    @keyword cite_ids:      The citation ID numbers.
    @type cite_ids:         None or str
    @keyword tasks:         The tasks performed by the program.
    @type tasks:            list of str
    """

    # Test if the current pipe exists.
    check_pipe()

    # Set up the experimental info data container, if needed.
    if not hasattr(cdp, 'exp_info'):
        cdp.exp_info = ExpInfo()

    # Place the data in the container.
    cdp.exp_info.software_setup(name=name, version=version, url=url, vendor_name=vendor_name, cite_ids=cite_ids, tasks=tasks)
Example #2
0
def bmrb_write_citations(star):
    """Generate the Citations saveframe records.

    @param star:        The NMR-STAR dictionary object.
    @type star:         NMR_STAR instance
    """

    # Test if the current pipe exists.
    check_pipe()

    # Loop over the citations.
    if hasattr(cdp, 'exp_info') and hasattr(cdp.exp_info, 'citations'):
        for citations in cdp.exp_info.citations:
            # Rearrange the author list.
            author_given_name = []
            author_family_name = []
            author_first_init = []
            author_mid_init = []
            author_family_title = []
            for i in range(len(citations.authors)):
                author_given_name.append(citations.authors[i][0])
                author_family_name.append(citations.authors[i][1])
                author_first_init.append(citations.authors[i][2])
                author_mid_init.append(citations.authors[i][3])
                author_family_title.append(None)

            # Add the citation.
            star.citations.add(citation_label=citations.cite_id, author_given_name=author_given_name, author_family_name=author_family_name, author_first_init=author_first_init, author_mid_init=author_mid_init, author_family_title=author_family_title, doi=citations.doi, pubmed_id=citations.pubmed_id, full_citation=citations.full_citation, title=citations.title, status=citations.status, type=citations.type, journal_abbrev=citations.journal_abbrev, journal_full=citations.journal_full, volume=citations.volume, issue=citations.issue, page_first=citations.page_first, page_last=citations.page_last, year=citations.year)
Example #3
0
def bmrb_write_software(star):
    """Generate the Software saveframe records.

    @param star:        The NMR-STAR dictionary object.
    @type star:         NMR_STAR instance
    @return:            A list BMRB software IDs and a list of software labels.
    @rtype:             tuple of list of int and list of str
    """

    # Test if the current pipe exists.
    check_pipe()

    # Loop over the software.
    software_ids = []
    software_labels = []
    if hasattr(cdp, 'exp_info') and hasattr(cdp.exp_info, 'software'):
        for software in cdp.exp_info.software:
            # Get the citation ID numbers.
            cite_id_nums = []
            for cite in software.cite_ids:
                cite_id_nums.append(cdp.exp_info.get_cite_id_num(cite))

            # The program info.
            id = star.software.add(name=software.software_name, version=software.version, vendor_name=software.vendor_name, vendor_eaddress=software.url, task=software.tasks, cite_ids=cite_id_nums)

            # Append the software info.
            software_ids.append(id)
            software_labels.append(software.software_name)

    # relax cannot be the only program used!
    else:
        raise RelaxError("relax cannot be the only program used in the analysis - spectral analysis programs, etc. must also have been used.  Please use the relevant BMRB user functions to specify these.")

    # Return the software info.
    return software_ids, software_labels
Example #4
0
def select_model(model=None, spin_id=None):
    """Function for the selection of a preset model-free model.

    @param model:   The name of the model.
    @type model:    str
    @param spin_id: The spin identification string.
    @type spin_id:  str
    """

    # Test if the current data pipe exists.
    check_pipe()

    # Test if the pipe type is 'mf'.
    function_type = pipes.get_type()
    if function_type != "mf":
        raise RelaxFuncSetupError(specific_analyses.get_string(function_type))

    # Test if sequence data is loaded.
    if not exists_mol_res_spin_data():
        raise RelaxNoSequenceError

    # Obtain the model info.
    equation, params = model_map(model)

    # Set up the model.
    model_setup(model, equation, params, spin_id)
Example #5
0
File: uf.py Project: tlinnet/relax
def sobol_setup(max_num=200, oversample=100):
    """Oversampling setup for the quasi-random Sobol' sequence used for numerical PCS integration.

    @keyword max_num:       The maximum number of integration points N.
    @type max_num:          int
    @keyword oversample:    The oversampling factor Ov used for the N * Ov * 10**M, where M is the number of dimensions or torsion-tilt angles for the system.
    @type oversample:       int
    """

    # Test if the current data pipe exists.
    check_pipe()

    # Throw a warning to the user if not enough points are being used.
    if max_num < 200:
        warn(
            RelaxWarning(
                "To obtain reliable results in a frame order analysis, the maximum number of integration points should be greater than 200."
            ))

    # Store the values.
    cdp.sobol_max_points = max_num
    cdp.sobol_oversample = oversample

    # Count the number of Sobol' points for the current model.
    count_sobol_points()
Example #6
0
def write(ri_id=None, file=None, dir=None, bc=False, force=False):
    """Write relaxation data to a file.

    @keyword ri_id: The relaxation data ID string.
    @type ri_id:    str
    @keyword file:  The name of the file to create.
    @type file:     str
    @keyword dir:   The directory to write to.
    @type dir:      str or None
    @keyword bc:    A flag which if True will cause the back calculated relaxation data to be written.
    @type bc:       bool
    @keyword force: A flag which if True will cause any pre-existing file to be overwritten.
    @type force:    bool
    """

    # Test if the current pipe exists.
    check_pipe()

    # Test if the sequence data is loaded.
    if not exists_mol_res_spin_data():
        raise RelaxNoSequenceError

    # Test if data exists.
    if not hasattr(cdp, 'ri_ids') or ri_id not in cdp.ri_ids:
        raise RelaxNoRiError(ri_id)

    # Create the file name if none is given.
    if file == None:
        file = ri_id + ".out"

    # Write the data.
    value.write(param=ri_id, file=file, dir=dir, bc=bc, force=force, return_value=return_value, return_data_desc=return_data_desc)
Example #7
0
def peak_intensity_type(ri_id=None, type=None):
    """Set the type of intensity measured for the peaks.

    @keyword ri_id: The relaxation data ID string.
    @type ri_id:    str
    @keyword type:  The peak intensity type, one of 'height' or 'volume'.
    @type type:     str
    """

    # Test if the current pipe exists.
    check_pipe()

    # Test if sequence data is loaded.
    if not exists_mol_res_spin_data():
        raise RelaxNoSequenceError

    # Test if data exists.
    if not hasattr(cdp, 'ri_ids') or ri_id not in cdp.ri_ids:
        raise RelaxNoRiError(ri_id)

    # Check the values, and warn if not in the list.
    valid = ['height', 'volume']
    if type not in valid:
        raise RelaxError("The '%s' peak intensity type is unknown.  Please select one of %s." % (type, valid))

    # Set up the experimental info data container, if needed.
    if not hasattr(cdp, 'exp_info'):
        cdp.exp_info = ExpInfo()

    # Store the type.
    cdp.exp_info.setup_peak_intensity_type(ri_id, type)
Example #8
0
def type(ri_id=None, ri_type=None):
    """Set or reset the frequency associated with the ID.

    @param ri_id:   The relaxation data ID string.
    @type ri_id:    str
    @param ri_type: The relaxation data type, ie 'R1', 'R2', or 'NOE'.
    @type ri_type:  str
    """

    # Test if the current data pipe exists.
    check_pipe()

    # Test if sequence data exists.
    if not exists_mol_res_spin_data():
        raise RelaxNoSequenceError

    # Test if data exists.
    if not hasattr(cdp, 'ri_ids') or ri_id not in cdp.ri_ids:
        raise RelaxNoRiError(ri_id)

    # Check if the type is valid.
    if ri_type not in VALID_TYPES:
        raise RelaxError("The relaxation data type '%s' must be one of %s." % (ri_type, VALID_TYPES))

    # Initialise if needed.
    if not hasattr(cdp, 'ri_type'):
        cdp.ri_type = {}

    # Set the type.
    cdp.ri_type[ri_id] = ri_type
Example #9
0
def temp_control(ri_id=None, method=None):
    """Set the temperature control method.

    @keyword ri_id:     The relaxation data ID string.
    @type ri_id:        str
    @keyword method:    The temperature control method.
    @type method:       str
    """

    # Test if the current pipe exists.
    check_pipe()

    # Test if sequence data is loaded.
    if not exists_mol_res_spin_data():
        raise RelaxNoSequenceError

    # Test if data exists.
    if not hasattr(cdp, 'ri_ids') or ri_id not in cdp.ri_ids:
        raise RelaxNoRiError(ri_id)

    # Check the values, and warn if not in the list.
    valid = ['single scan interleaving', 'temperature compensation block', 'single scan interleaving and temperature compensation block', 'single fid interleaving', 'single experiment interleaving', 'no temperature control applied']
    if method not in valid:
        raise RelaxError("The '%s' method is unknown.  Please select one of %s." % (method, valid))

    # Set up the experimental info data container, if needed.
    if not hasattr(cdp, 'exp_info'):
        cdp.exp_info = ExpInfo()

    # Store the method.
    cdp.exp_info.temp_control_setup(ri_id, method)
Example #10
0
def select_model(model=None):
    """Select the N-state model type.

    @param model:   The N-state model type.  Can be one of '2-domain', 'population', or 'fixed'.
    @type model:    str
    """

    # Test if the current data pipe exists.
    check_pipe()

    # Test if the model name exists.
    if not model in ['2-domain', 'population', 'fixed']:
        raise RelaxError("The model name " + repr(model) + " is invalid.")

    # Test if the model is setup.
    if hasattr(cdp, 'model'):
        warn(RelaxWarning("The N-state model has already been set up.  Switching from model '%s' to '%s'." % (cdp.model, model)))

    # Set the model
    cdp.model = model

    # Initialise the list of model parameters.
    cdp.params = []

    # Update the model.
    update_model()
Example #11
0
def temp_calibration(ri_id=None, method=None):
    """Set the temperature calibration method.

    @keyword ri_id:     The relaxation data type, ie 'R1', 'R2', or 'NOE'.
    @type ri_id:        str
    @keyword method:    The temperature calibration method.
    @type method:       str
    """

    # Test if the current pipe exists.
    check_pipe()

    # Test if sequence data is loaded.
    if not exists_mol_res_spin_data():
        raise RelaxNoSequenceError

    # Test if data exists.
    if not hasattr(cdp, 'ri_ids') or ri_id not in cdp.ri_ids:
        raise RelaxNoRiError(ri_id)

    # Check the values, and warn if not in the list.
    valid = ['methanol', 'monoethylene glycol', 'no calibration applied']
    if method not in valid:
        warn(RelaxWarning("The '%s' method is unknown.  Please try to use one of %s." % (method, valid)))

    # Set up the experimental info data container, if needed.
    if not hasattr(cdp, 'exp_info'):
        cdp.exp_info = ExpInfo()

    # Store the method.
    cdp.exp_info.temp_calibration_setup(ri_id, method)
Example #12
0
def type(ri_id=None, ri_type=None):
    """Set or reset the frequency associated with the ID.

    @param ri_id:   The relaxation data ID string.
    @type ri_id:    str
    @param ri_type: The relaxation data type, ie 'R1', 'R2', or 'NOE'.
    @type ri_type:  str
    """

    # Test if the current data pipe exists.
    check_pipe()

    # Test if sequence data exists.
    if not exists_mol_res_spin_data():
        raise RelaxNoSequenceError

    # Test if data exists.
    if not hasattr(cdp, 'ri_ids') or ri_id not in cdp.ri_ids:
        raise RelaxNoRiError(ri_id)

    # Check if the type is valid.
    if ri_type not in VALID_TYPES:
        raise RelaxError("The relaxation data type '%s' must be one of %s." %
                         (ri_type, VALID_TYPES))

    # Initialise if needed.
    if not hasattr(cdp, 'ri_type'):
        cdp.ri_type = {}

    # Set the type.
    cdp.ri_type[ri_id] = ri_type
Example #13
0
def cartoon():
    """Apply the PyMOL cartoon style and colour by secondary structure."""

    # Test if the current data pipe exists.
    check_pipe()

    # Test for the structure.
    if not hasattr(cdp, 'structure'):
        raise RelaxNoPdbError

    # Loop over the PDB files.
    open_files = []
    for model in cdp.structure.structural_data:
        for mol in model.mol:
            # Identifier.
            pdb_file = mol.file_name
            if mol.file_path:
                pdb_file = mol.file_path + sep + pdb_file
            id = file_root(pdb_file)

            # Already loaded.
            if pdb_file in open_files:
                continue

            # Add to the open file list.
            open_files.append(pdb_file)

            # Hide everything.
            pymol_obj.exec_cmd("cmd.hide('everything'," + repr(id) + ")")

            # Show the cartoon style.
            pymol_obj.exec_cmd("cmd.show('cartoon'," + repr(id) + ")")

            # Colour by secondary structure.
            pymol_obj.exec_cmd("util.cbss(" + repr(id) + ", 'red', 'yellow', 'green')")
Example #14
0
def set_temperature(id=None, temp=None):
    """Set the experimental temperature.

    @keyword id:    The experiment ID string (allowing for multiple experiments per data pipe).
    @type id:       str
    @keyword temp:  The temperature in Kelvin.
    @type temp:     float
    """

    # Test if the current data pipe exists.
    check_pipe()

    # Set up the dictionary data structure if it doesn't exist yet.
    if not hasattr(cdp, 'temperature'):
        cdp.temperature = {}

    # Convert to a float.
    temp = float(temp)

    # Test the temperature has not already been set.
    if id in cdp.temperature and cdp.temperature[id] != temp:
        raise RelaxError("The temperature for the experiment '%s' has already been set to %s K." % (id, cdp.temperature[id]))

    # Set the temperature.
    cdp.temperature[id] = temp
Example #15
0
def exists_data(pipe=None):
    """Determine if any interatomic data exists.

    @keyword pipe:      The data pipe in which the interatomic data will be checked for.
    @type pipe:         str
    @return:            The answer to the question about the existence of data.
    @rtype:             bool
    """

    # The current data pipe.
    if pipe == None:
        pipe = pipes.cdp_name()

    # Test the data pipe.
    check_pipe(pipe)

    # Get the data pipe.
    dp = pipes.get_pipe(pipe)

    # The interatomic data structure is empty.
    if dp.interatomic.is_empty():
        return False

    # Otherwise.
    return True
Example #16
0
def delete_frequencies(id=None):
    """Delete the spectrometer frequency corresponding to the experiment ID.

    @keyword id:    The experiment ID string.
    @type id:       str
    """

    # Checks.
    check_pipe()
    check_frequency(id=id)

    # Delete the frequency.
    frq = cdp.spectrometer_frq[id]
    del cdp.spectrometer_frq[id]

    # Update the structures as needed.
    if frq in cdp.spectrometer_frq_list and frq not in list(cdp.spectrometer_frq.values()):
        cdp.spectrometer_frq_list.pop(cdp.spectrometer_frq_list.index(frq))
    cdp.spectrometer_frq_count = len(cdp.spectrometer_frq_list)

    # Cleanup.
    if len(cdp.spectrometer_frq) == 0:
        del cdp.spectrometer_frq
        del cdp.spectrometer_frq_list
        del cdp.spectrometer_frq_count
Example #17
0
def temp_calibration(ri_id=None, method=None):
    """Set the temperature calibration method.

    @keyword ri_id:     The relaxation data type, ie 'R1', 'R2', or 'NOE'.
    @type ri_id:        str
    @keyword method:    The temperature calibration method.
    @type method:       str
    """

    # Test if the current pipe exists.
    check_pipe()

    # Test if sequence data is loaded.
    if not exists_mol_res_spin_data():
        raise RelaxNoSequenceError

    # Test if data exists.
    if not hasattr(cdp, 'ri_ids') or ri_id not in cdp.ri_ids:
        raise RelaxNoRiError(ri_id)

    # Check the values, and warn if not in the list.
    valid = ['methanol', 'monoethylene glycol', 'no calibration applied']
    if method not in valid:
        warn(
            RelaxWarning(
                "The '%s' method is unknown.  Please try to use one of %s." %
                (method, valid)))

    # Set up the experimental info data container, if needed.
    if not hasattr(cdp, 'exp_info'):
        cdp.exp_info = ExpInfo()

    # Store the method.
    cdp.exp_info.temp_calibration_setup(ri_id, method)
Example #18
0
def exists_data(pipe=None):
    """Determine if any interatomic data exists.

    @keyword pipe:      The data pipe in which the interatomic data will be checked for.
    @type pipe:         str
    @return:            The answer to the question about the existence of data.
    @rtype:             bool
    """

    # The current data pipe.
    if pipe == None:
        pipe = pipes.cdp_name()

    # Test the data pipe.
    check_pipe(pipe)

    # Get the data pipe.
    dp = pipes.get_pipe(pipe)

    # The interatomic data structure is empty.
    if dp.interatomic.is_empty():
        return False

    # Otherwise.
    return True
Example #19
0
def ref_domain(ref=None):
    """Set the reference domain for the '2-domain' N-state model.

    @param ref: The reference domain.
    @type ref:  str
    """

    # Test if the current data pipe exists.
    check_pipe()

    # Test if the model is setup.
    if not hasattr(cdp, 'model'):
        raise RelaxNoModelError('N-state')

    # Test that the correct model is set.
    if cdp.model != '2-domain':
        raise RelaxError(
            "Setting the reference domain is only possible for the '2-domain' N-state model."
        )

    # Test if the reference domain exists.
    exists = False
    for tensor_cont in cdp.align_tensors:
        if tensor_cont.domain == ref:
            exists = True
    if not exists:
        raise RelaxError(
            "The reference domain cannot be found within any of the loaded tensors."
        )

    # Set the reference domain.
    cdp.ref_domain = ref

    # Update the model.
    update_model()
Example #20
0
def sobol_setup(max_num=200, oversample=100):
    """Oversampling setup for the quasi-random Sobol' sequence used for numerical PCS integration.

    @keyword max_num:       The maximum number of integration points N.
    @type max_num:          int
    @keyword oversample:    The oversampling factor Ov used for the N * Ov * 10**M, where M is the number of dimensions or torsion-tilt angles for the system.
    @type oversample:       int
    """

    # Test if the current data pipe exists.
    check_pipe()

    # Throw a warning to the user if not enough points are being used.
    if max_num < 200:
        warn(
            RelaxWarning(
                "To obtain reliable results in a frame order analysis, the maximum number of integration points should be greater than 200."
            )
        )

    # Store the values.
    cdp.sobol_max_points = max_num
    cdp.sobol_oversample = oversample

    # Count the number of Sobol' points for the current model.
    count_sobol_points()
Example #21
0
def delete():
    """Delete all the model-free data."""

    # Test if the current pipe exists.
    check_pipe()

    # Test if the pipe type is set to 'mf'.
    function_type = pipes.get_type()
    if function_type != "mf":
        raise RelaxFuncSetupError(specific_analyses.setup.get_string(function_type))

    # Test if the sequence data is loaded.
    if not exists_mol_res_spin_data():
        raise RelaxNoSequenceError

    # Get all data structure names.
    names = api_model_free.data_names(scope="spin")

    # Loop over the spins.
    for spin in spin_loop():
        # Loop through the data structure names.
        for name in names:
            # Skip the data structure if it does not exist.
            if not hasattr(spin, name):
                continue

            # Delete the data.
            delattr(spin, name)
Example #22
0
def peak_intensity_type(ri_id=None, type=None):
    """Set the type of intensity measured for the peaks.

    @keyword ri_id: The relaxation data ID string.
    @type ri_id:    str
    @keyword type:  The peak intensity type, one of 'height' or 'volume'.
    @type type:     str
    """

    # Test if the current pipe exists.
    check_pipe()

    # Test if sequence data is loaded.
    if not exists_mol_res_spin_data():
        raise RelaxNoSequenceError

    # Test if data exists.
    if not hasattr(cdp, 'ri_ids') or ri_id not in cdp.ri_ids:
        raise RelaxNoRiError(ri_id)

    # Check the values, and warn if not in the list.
    valid = ['height', 'volume']
    if type not in valid:
        raise RelaxError(
            "The '%s' peak intensity type is unknown.  Please select one of %s."
            % (type, valid))

    # Set up the experimental info data container, if needed.
    if not hasattr(cdp, 'exp_info'):
        cdp.exp_info = ExpInfo()

    # Store the type.
    cdp.exp_info.setup_peak_intensity_type(ri_id, type)
Example #23
0
File: uf.py Project: tlinnet/relax
def delete():
    """Delete all the model-free data."""

    # Test if the current pipe exists.
    check_pipe()

    # Test if the pipe type is set to 'mf'.
    function_type = pipes.get_type()
    if function_type != 'mf':
        raise RelaxFuncSetupError(
            specific_analyses.setup.get_string(function_type))

    # Test if the sequence data is loaded.
    if not exists_mol_res_spin_data():
        raise RelaxNoSequenceError

    # Get all data structure names.
    names = api_model_free.data_names(scope='spin')

    # Loop over the spins.
    for spin in spin_loop():
        # Loop through the data structure names.
        for name in names:
            # Skip the data structure if it does not exist.
            if not hasattr(spin, name):
                continue

            # Delete the data.
            delattr(spin, name)
Example #24
0
File: uf.py Project: tlinnet/relax
def select_model(model=None):
    """Select the Frame Order model.

    @param model:   The Frame Order model.  This can be one of 'pseudo-ellipse', 'pseudo-ellipse, torsionless', 'pseudo-ellipse, free rotor', 'iso cone', 'iso cone, torsionless', 'iso cone, free rotor', 'rotor', 'rigid', 'free rotor', 'double rotor'.
    @type model:    str
    """

    # Test if the current data pipe exists.
    check_pipe()

    # Test if the model name exists.
    if not model in MODEL_LIST:
        raise RelaxError(
            "The model name '%s' is invalid, it must be one of %s." %
            (model, MODEL_LIST))

    # Set the model
    cdp.model = model

    # Initialise the list of model parameters.
    cdp.params = []

    # Set the integration method if needed.
    if not hasattr(cdp, 'quad_int'):
        # Scipy quadratic numerical integration.
        if cdp.model in []:
            cdp.quad_int = True

        # Quasi-random numerical integration.
        else:
            cdp.quad_int = False

    # Update the model.
    update_model()
Example #25
0
File: uf.py Project: tlinnet/relax
def select_model(model=None, spin_id=None):
    """Function for the selection of a preset model-free model.

    @param model:   The name of the model.
    @type model:    str
    @param spin_id: The spin identification string.
    @type spin_id:  str
    """

    # Test if the current data pipe exists.
    check_pipe()

    # Test if the pipe type is 'mf'.
    function_type = pipes.get_type()
    if function_type != 'mf':
        raise RelaxFuncSetupError(specific_analyses.get_string(function_type))

    # Test if sequence data is loaded.
    if not exists_mol_res_spin_data():
        raise RelaxNoSequenceError

    # Obtain the model info.
    equation, params = model_map(model)

    # Set up the model.
    model_setup(model, equation, params, spin_id)
Example #26
0
def select_model(model=None):
    """Select the Frame Order model.

    @param model:   The Frame Order model.  This can be one of 'pseudo-ellipse', 'pseudo-ellipse, torsionless', 'pseudo-ellipse, free rotor', 'iso cone', 'iso cone, torsionless', 'iso cone, free rotor', 'rotor', 'rigid', 'free rotor', 'double rotor'.
    @type model:    str
    """

    # Test if the current data pipe exists.
    check_pipe()

    # Test if the model name exists.
    if not model in MODEL_LIST:
        raise RelaxError("The model name '%s' is invalid, it must be one of %s." % (model, MODEL_LIST))

    # Set the model
    cdp.model = model

    # Initialise the list of model parameters.
    cdp.params = []

    # Set the integration method if needed.
    if not hasattr(cdp, 'quad_int'):
        # Scipy quadratic numerical integration.
        if cdp.model in []:
            cdp.quad_int = True

        # Quasi-random numerical integration.
        else:
            cdp.quad_int = False

    # Update the model.
    update_model()
Example #27
0
def cartoon():
    """Apply the PyMOL cartoon style and colour by secondary structure."""

    # Test if the current data pipe exists.
    check_pipe()

    # Test for the structure.
    if not hasattr(cdp, 'structure'):
        raise RelaxNoPdbError

    # Loop over the PDB files.
    open_files = []
    for model in cdp.structure.structural_data:
        for mol in model.mol:
            # Identifier.
            pdb_file = mol.file_name
            if mol.file_path:
                pdb_file = mol.file_path + sep + pdb_file
            id = file_root(pdb_file)

            # Already loaded.
            if pdb_file in open_files:
                continue

            # Add to the open file list.
            open_files.append(pdb_file)

            # Hide everything.
            pymol_obj.exec_cmd("cmd.hide('everything'," + repr(id) + ")")

            # Show the cartoon style.
            pymol_obj.exec_cmd("cmd.show('cartoon'," + repr(id) + ")")

            # Colour by secondary structure.
            pymol_obj.exec_cmd("util.cbss(" + repr(id) + ", 'red', 'yellow', 'green')")
Example #28
0
def baseplane_rmsd(error=0.0, spectrum_id=None, spin_id=None):
    """Set the peak intensity errors, as defined as the baseplane RMSD.

    @param error:           The peak intensity error value defined as the RMSD of the base plane
                            noise.
    @type error:            float
    @keyword spectrum_id:   The spectrum id.
    @type spectrum_id:      str
    @param spin_id:         The spin identification string.
    @type spin_id:          str
    """

    # Data checks.
    check_pipe()
    check_mol_res_spin_data()
    check_spectrum_id(spectrum_id)

    # The scaling by NC_proc.
    if hasattr(cdp, 'ncproc') and spectrum_id in cdp.ncproc:
        scale = 1.0 / 2**cdp.ncproc[spectrum_id]
    else:
        scale = 1.0

    # Loop over the spins.
    for spin in spin_loop(spin_id):
        # Skip deselected spins.
        if not spin.select:
            continue

        # Initialise or update the baseplane_rmsd data structure as necessary.
        if not hasattr(spin, 'baseplane_rmsd'):
            spin.baseplane_rmsd = {}

        # Set the error.
        spin.baseplane_rmsd[spectrum_id] = float(error) * scale
Example #29
0
def ref_domain(ref=None):
    """Set the reference domain for the '2-domain' N-state model.

    @param ref: The reference domain.
    @type ref:  str
    """

    # Test if the current data pipe exists.
    check_pipe()

    # Test if the model is setup.
    if not hasattr(cdp, 'model'):
        raise RelaxNoModelError('N-state')

    # Test that the correct model is set.
    if cdp.model != '2-domain':
        raise RelaxError("Setting the reference domain is only possible for the '2-domain' N-state model.")

    # Test if the reference domain exists.
    exists = False
    for tensor_cont in cdp.align_tensors:
        if tensor_cont.domain == ref:
            exists = True
    if not exists:
        raise RelaxError("The reference domain cannot be found within any of the loaded tensors.")

    # Set the reference domain.
    cdp.ref_domain = ref

    # Update the model.
    update_model()
Example #30
0
def bmrb_write_methods(star):
    """Generate the Software saveframe records.

    @param star:        The NMR-STAR dictionary object.
    @type star:         NMR_STAR instance
    @return:            A list BMRB software IDs and a list of software labels.
    @rtype:             tuple of list of int and list of str
    """

    # Test if the current pipe exists.
    check_pipe()

    # The scripts.
    if hasattr(cdp, 'exp_info') and hasattr(cdp.exp_info, 'scripts'):
        for script in cdp.exp_info.scripts:
            # Get the citation ID numbers.
            cite_id_nums = []
            if script.cite_ids:
                for cite in script.cite_ids:
                    cite_id_nums.append(cdp.exp_info.get_cite_id_num(cite))

            # The name.
            name = script.file + " relax script"

            # The method info.
            star.method.add(name=name, details=None, cite_ids=cite_id_nums, file_name=script.file, file_text=script.text)
Example #31
0
def bmrb_write_methods(star):
    """Generate the Software saveframe records.

    @param star:        The NMR-STAR dictionary object.
    @type star:         NMR_STAR instance
    @return:            A list BMRB software IDs and a list of software labels.
    @rtype:             tuple of list of int and list of str
    """

    # Test if the current pipe exists.
    check_pipe()

    # The scripts.
    if hasattr(cdp, 'exp_info') and hasattr(cdp.exp_info, 'scripts'):
        for script in cdp.exp_info.scripts:
            # Get the citation ID numbers.
            cite_id_nums = []
            if script.cite_ids:
                for cite in script.cite_ids:
                    cite_id_nums.append(cdp.exp_info.get_cite_id_num(cite))

            # The name.
            name = script.file + " relax script"

            # The method info.
            star.method.add(name=name, details=None, cite_ids=cite_id_nums, file_name=script.file, file_text=script.text)
Example #32
0
def select_model(model=None):
    """Select the N-state model type.

    @param model:   The N-state model type.  Can be one of '2-domain', 'population', or 'fixed'.
    @type model:    str
    """

    # Test if the current data pipe exists.
    check_pipe()

    # Test if the model name exists.
    if not model in ['2-domain', 'population', 'fixed']:
        raise RelaxError("The model name " + repr(model) + " is invalid.")

    # Test if the model is setup.
    if hasattr(cdp, 'model'):
        warn(
            RelaxWarning(
                "The N-state model has already been set up.  Switching from model '%s' to '%s'."
                % (cdp.model, model)))

    # Set the model
    cdp.model = model

    # Initialise the list of model parameters.
    cdp.params = []

    # Update the model.
    update_model()
Example #33
0
def bmrb_write_software(star):
    """Generate the Software saveframe records.

    @param star:        The NMR-STAR dictionary object.
    @type star:         NMR_STAR instance
    @return:            A list BMRB software IDs and a list of software labels.
    @rtype:             tuple of list of int and list of str
    """

    # Test if the current pipe exists.
    check_pipe()

    # Loop over the software.
    software_ids = []
    software_labels = []
    if hasattr(cdp, 'exp_info') and hasattr(cdp.exp_info, 'software'):
        for software in cdp.exp_info.software:
            # Get the citation ID numbers.
            cite_id_nums = []
            for cite in software.cite_ids:
                cite_id_nums.append(cdp.exp_info.get_cite_id_num(cite))

            # The program info.
            id = star.software.add(name=software.software_name, version=software.version, vendor_name=software.vendor_name, vendor_eaddress=software.url, task=software.tasks, cite_ids=cite_id_nums)

            # Append the software info.
            software_ids.append(id)
            software_labels.append(software.software_name)

    # relax cannot be the only program used!
    else:
        raise RelaxError("relax cannot be the only program used in the analysis - spectral analysis programs, etc. must also have been used.  Please use the relevant BMRB user functions to specify these.")

    # Return the software info.
    return software_ids, software_labels
Example #34
0
def baseplane_rmsd(error=0.0, spectrum_id=None, spin_id=None):
    """Set the peak intensity errors, as defined as the baseplane RMSD.

    @param error:           The peak intensity error value defined as the RMSD of the base plane
                            noise.
    @type error:            float
    @keyword spectrum_id:   The spectrum id.
    @type spectrum_id:      str
    @param spin_id:         The spin identification string.
    @type spin_id:          str
    """

    # Data checks.
    check_pipe()
    check_mol_res_spin_data()
    check_spectrum_id(spectrum_id)

    # The scaling by NC_proc.
    if hasattr(cdp, 'ncproc') and spectrum_id in cdp.ncproc:
        scale = 1.0 / 2**cdp.ncproc[spectrum_id]
    else:
        scale = 1.0

    # Loop over the spins.
    for spin in spin_loop(spin_id):
        # Skip deselected spins.
        if not spin.select:
            continue

        # Initialise or update the baseplane_rmsd data structure as necessary.
        if not hasattr(spin, 'baseplane_rmsd'):
            spin.baseplane_rmsd = {}

        # Set the error.
        spin.baseplane_rmsd[spectrum_id] = float(error) * scale
Example #35
0
def software(name=None, version=None, url=None, vendor_name=None, cite_ids=None, tasks=None):
    """Select by name the software used in the analysis.

    @param name:            The name of the software program.
    @type name:             str
    @keyword version:       The program version.
    @type version:          None or str
    @keyword url:           The program's URL.
    @type url:              None or str
    @keyword vendor_name:   The name of the company or person behind the program.
    @type vendor_name:      str
    @keyword cite_ids:      The citation ID numbers.
    @type cite_ids:         None or str
    @keyword tasks:         The tasks performed by the program.
    @type tasks:            list of str
    """

    # Test if the current pipe exists.
    check_pipe()

    # Set up the experimental info data container, if needed.
    if not hasattr(cdp, 'exp_info'):
        cdp.exp_info = ExpInfo()

    # Place the data in the container.
    cdp.exp_info.software_setup(name=name, version=version, url=url, vendor_name=vendor_name, cite_ids=cite_ids, tasks=tasks)
Example #36
0
def angle_diff_frame():
    """Function for calculating the angle defining the XH vector in the diffusion frame."""

    # Test if the current data pipe exists.
    check_pipe()

    # Test if the PDB file has been loaded.
    if not hasattr(cdp, 'structure'):
        raise RelaxNoPdbError

    # Test if sequence data is loaded.
    if not exists_mol_res_spin_data():
        raise RelaxNoSequenceError

    # Test if the diffusion tensor data is loaded.
    if not hasattr(cdp, 'diff_tensor'):
        raise RelaxNoTensorError('diffusion')

    # Sphere.
    if cdp.diff_tensor.type == 'sphere':
        return

    # Spheroid.
    elif cdp.diff_tensor.type == 'spheroid':
        spheroid_frame()

    # Ellipsoid.
    elif cdp.diff_tensor.type == 'ellipsoid':
        raise RelaxError("No coded yet.")
Example #37
0
def bmrb_write_citations(star):
    """Generate the Citations saveframe records.

    @param star:        The NMR-STAR dictionary object.
    @type star:         NMR_STAR instance
    """

    # Test if the current pipe exists.
    check_pipe()

    # Loop over the citations.
    if hasattr(cdp, 'exp_info') and hasattr(cdp.exp_info, 'citations'):
        for citations in cdp.exp_info.citations:
            # Rearrange the author list.
            author_given_name = []
            author_family_name = []
            author_first_init = []
            author_mid_init = []
            author_family_title = []
            for i in range(len(citations.authors)):
                author_given_name.append(citations.authors[i][0])
                author_family_name.append(citations.authors[i][1])
                author_first_init.append(citations.authors[i][2])
                author_mid_init.append(citations.authors[i][3])
                author_family_title.append(None)

            # Add the citation.
            star.citations.add(citation_label=citations.cite_id, author_given_name=author_given_name, author_family_name=author_family_name, author_first_init=author_first_init, author_mid_init=author_mid_init, author_family_title=author_family_title, doi=citations.doi, pubmed_id=citations.pubmed_id, full_citation=citations.full_citation, title=citations.title, status=citations.status, type=citations.type, journal_abbrev=citations.journal_abbrev, journal_full=citations.journal_full, volume=citations.volume, issue=citations.issue, page_first=citations.page_first, page_last=citations.page_last, year=citations.year)
Example #38
0
def display():
    """Displaying the results/contents of the current data pipe."""

    # Test if the current data pipe exists.
    check_pipe()

    # Write the results.
    ds.to_xml(sys.stdout)
Example #39
0
def display():
    """Displaying the results/contents of the current data pipe."""

    # Test if the current data pipe exists.
    check_pipe()

    # Write the results.
    ds.to_xml(sys.stdout)
Example #40
0
def read_spins(file=None, dir=None, dim=1, spin_id_col=None, mol_name_col=None, res_num_col=None, res_name_col=None, spin_num_col=None, spin_name_col=None, sep=None, spin_id=None, verbose=True):
    """Read the peak intensity data.

    @keyword file:          The name of the file containing the peak intensities.
    @type file:             str
    @keyword dir:           The directory where the file is located.
    @type dir:              str
    @keyword dim:           The dimension of the peak list to associate the data with.
    @type dim:              int
    @keyword spin_id_col:   The column containing the spin ID strings (used by the generic intensity file format).  If supplied, the mol_name_col, res_name_col, res_num_col, spin_name_col, and spin_num_col arguments must be none.
    @type spin_id_col:      int or None
    @keyword mol_name_col:  The column containing the molecule name information (used by the generic intensity file format).  If supplied, spin_id_col must be None.
    @type mol_name_col:     int or None
    @keyword res_name_col:  The column containing the residue name information (used by the generic intensity file format).  If supplied, spin_id_col must be None.
    @type res_name_col:     int or None
    @keyword res_num_col:   The column containing the residue number information (used by the generic intensity file format).  If supplied, spin_id_col must be None.
    @type res_num_col:      int or None
    @keyword spin_name_col: The column containing the spin name information (used by the generic intensity file format).  If supplied, spin_id_col must be None.
    @type spin_name_col:    int or None
    @keyword spin_num_col:  The column containing the spin number information (used by the generic intensity file format).  If supplied, spin_id_col must be None.
    @type spin_num_col:     int or None
    @keyword sep:           The column separator which, if None, defaults to whitespace.
    @type sep:              str or None
    @keyword spin_id:       The spin ID string used to restrict data loading to a subset of all spins.  If 'auto' is provided for a NMRPipe seriesTab formatted file, the ID's are auto generated in form of Z_Ai.
    @type spin_id:          None or str
    @keyword verbose:       A flag which if True will cause all relaxation data loaded to be printed out.
    @type verbose:          bool
    """

    # Data checks.
    check_pipe()

    # Check the file name.
    if file == None:
        raise RelaxError("The file name must be supplied.")

    # Read the peak list data.
    peak_list = read_peak_list(file=file, dir=dir, spin_id_col=spin_id_col, mol_name_col=mol_name_col, res_num_col=res_num_col, res_name_col=res_name_col, spin_num_col=spin_num_col, spin_name_col=spin_name_col, sep=sep, spin_id=spin_id)

    # Loop over the peak_list.
    created_spins = []
    for assign in peak_list:
        mol_name = assign.mol_names[dim-1]
        res_num = assign.res_nums[dim-1]
        res_name = assign.res_names[dim-1]
        spin_num = assign.spin_nums[dim-1]
        spin_name = assign.spin_names[dim-1]

        # Generate the spin_id.
        spin_id = generate_spin_id_unique(mol_name=mol_name, res_num=res_num, res_name=res_name, spin_name=spin_name)

        # Check if the spin already exist.
        if return_spin(spin_id=spin_id) == None:
            # Create the spin if not exist.
            create_spin(spin_num=spin_num, spin_name=spin_name, res_num=res_num, res_name=res_name, mol_name=mol_name)

    # Test that data exists.
    check_mol_res_spin_data()
Example #41
0
def pdb_model(ave_pos="ave_pos",
              rep="frame_order",
              dir=None,
              compress_type=0,
              size=30.0,
              inc=36,
              model=1,
              force=False):
    """Create 3 different PDB files for representing the frame order dynamics of the system.

    @keyword ave_pos:       The file root for the average molecule structure.
    @type ave_pos:          str or None
    @keyword rep:           The file root of the PDB representation of the frame order dynamics to create.
    @type rep:              str or None
    @keyword dist:          The file root which will contain multiple models spanning the full dynamics distribution of the frame order model.
    @type dist:             str or None
    @keyword dir:           The name of the directory to place the PDB file into.
    @type dir:              str
    @keyword compress_type: The compression type.  The integer values correspond to the compression type: 0, no compression; 1, Bzip2 compression; 2, Gzip compression.
    @type compress_type:    int
    @keyword size:          The size of the geometric object in Angstroms.
    @type size:             float
    @keyword inc:           The number of increments for the filling of the cone objects.
    @type inc:              int
    @keyword model:      Only one model from an analysed ensemble can be used for the PDB representation of the Monte Carlo simulations, as these consists of one model per simulation.
    @type model:         int
    @keyword force:         Flag which if set to True will cause any pre-existing file to be overwritten.
    @type force:            bool
    """

    # Check that at least one PDB file name is given.
    if not ave_pos and not rep and not dist:
        raise RelaxError("Minimally one PDB file name must be supplied.")

    # Test if the current data pipe exists.
    check_pipe()

    # Create the average position structure.
    if ave_pos:
        create_ave_pos(file=ave_pos,
                       dir=dir,
                       compress_type=compress_type,
                       model=model,
                       force=force)

    # Nothing more to do for the rigid model.
    if cdp.model == MODEL_RIGID:
        return

    # Create the geometric representation.
    if rep:
        create_geometric_rep(file=rep,
                             dir=dir,
                             compress_type=compress_type,
                             size=size,
                             inc=inc,
                             force=force)
Example #42
0
def calc(verbosity=1):
    """Function for calculating the function value.

    @keyword verbosity: The amount of information to print.  The higher the value, the greater the verbosity.
    @type verbosity:    int
    """

    # Test if the current data pipe exists.
    check_pipe()

    # Reset the minimisation statistics.
    reset_min_stats(verbosity=verbosity)

    # The specific analysis API object.
    api = return_api()

    # Deselect spins lacking data:
    api.overfit_deselect()

    # Create the scaling matrix.
    scaling_matrix = assemble_scaling_matrix()

    # Get the Processor box singleton (it contains the Processor instance) and alias the Processor.
    processor_box = Processor_box()
    processor = processor_box.processor

    # Monte Carlo simulation calculation.
    if hasattr(cdp, 'sim_state') and cdp.sim_state == 1:
        # Loop over the simulations.
        for i in range(cdp.sim_number):
            # Status.
            if status.current_analysis:
                status.auto_analysis[status.current_analysis].mc_number = i
            else:
                status.mc_number = i

            # Calculation.
            api.calculate(verbosity=verbosity - 1,
                          sim_index=i,
                          scaling_matrix=scaling_matrix)

            # Print out.
            if verbosity and not processor.is_queued():
                print("Simulation " + repr(i + 1))

        # Unset the status.
        if status.current_analysis:
            status.auto_analysis[status.current_analysis].mc_number = None
        else:
            status.mc_number = None

    # Minimisation.
    else:
        api.calculate(verbosity=verbosity, scaling_matrix=scaling_matrix)

    # Execute any queued commands.
    processor.run_queue()
Example #43
0
def replicated(spectrum_ids=None):
    """Set which spectra are replicates.

    @keyword spectrum_ids:  A list of spectrum ids corresponding to replicated spectra.
    @type spectrum_ids:     list of str
    """

    # Test if the current pipe exists.
    check_pipe()

    # Test for None.
    if spectrum_ids == None:
        warn(RelaxWarning("The spectrum ID list cannot be None."))
        return

    # Test if spectra have been loaded.
    if not hasattr(cdp, 'spectrum_ids'):
        raise RelaxError(
            "No spectra have been loaded therefore replicates cannot be specified."
        )

    # Test the spectrum id strings.
    for spectrum_id in spectrum_ids:
        check_spectrum_id(spectrum_id)

    # Test for more than one element.
    if len(spectrum_ids) == 1:
        warn(
            RelaxWarning(
                "The number of spectrum IDs in the list %s must be greater than one."
                % spectrum_ids))
        return

    # Initialise.
    if not hasattr(cdp, 'replicates'):
        cdp.replicates = []

    # Check if the spectrum IDs are already in the list.
    found = False
    for i in range(len(cdp.replicates)):
        # Loop over all elements of the first.
        for j in range(len(spectrum_ids)):
            if spectrum_ids[j] in cdp.replicates[i]:
                found = True

        # One of the spectrum IDs already have a replicate specified.
        if found:
            # Add the remaining replicates to the list and quit this function.
            for j in range(len(spectrum_ids)):
                if spectrum_ids[j] not in cdp.replicates[i]:
                    cdp.replicates[i].append(spectrum_ids[j])

            # Nothing more to do.
            return

    # A new set of replicates.
    cdp.replicates.append(spectrum_ids)
Example #44
0
def create(algor='LM', dir=None, force=False):
    """Create the Dasha script file 'dasha_script' for controlling the program.

    @keyword algor: The optimisation algorithm to use.  This can be the Levenberg-Marquardt algorithm 'LM' or the Newton-Raphson algorithm 'NR'.
    @type algor:    str
    @keyword dir:   The optional directory to place the script into.
    @type dir:      str or None
    @keyword force: A flag which if True will cause any pre-existing file to be overwritten.
    @type force:    bool
    """

    # Test if the current pipe exists.
    check_pipe()

    # Test if sequence data is loaded.
    if not exists_mol_res_spin_data():
        raise RelaxNoSequenceError

    # Determine the parameter set.
    model_type = determine_model_type()

    # Test if diffusion tensor data for the data_pipe exists.
    if model_type != 'local_tm' and not hasattr(cdp, 'diff_tensor'):
        raise RelaxNoTensorError('diffusion')

    # Test if the PDB file has been loaded (for the spheroid and ellipsoid).
    if model_type != 'local_tm' and cdp.diff_tensor.type != 'sphere' and not hasattr(
            cdp, 'structure'):
        raise RelaxNoPdbError

    # Test the optimisation algorithm.
    if algor not in ['LM', 'NR']:
        raise RelaxError(
            "The Dasha optimisation algorithm '%s' is unknown, it should either be 'LM' or 'NR'."
            % algor)

    # Deselect certain spins.
    __deselect_spins()

    # Directory creation.
    if dir == None:
        dir = pipes.cdp_name()
    mkdir_nofail(dir, verbosity=0)

    # Calculate the angle alpha of the XH vector in the spheroid diffusion frame.
    if cdp.diff_tensor.type == 'spheroid':
        angles.spheroid_frame()

    # Calculate the angles theta and phi of the XH vector in the ellipsoid diffusion frame.
    elif cdp.diff_tensor.type == 'ellipsoid':
        angles.ellipsoid_frame()

    # The 'dasha_script' file.
    script = open_write_file(file_name='dasha_script', dir=dir, force=force)
    create_script(script, model_type, algor)
    script.close()
Example #45
0
def create_rotor_pdb(file=None, dir=None, rotor_angle=None, axis=None, axis_pt=True, centre=None, span=2e-9, blade_length=5e-10, force=False, staggered=False):
    """Create a PDB representation of a rotor motional model.

    @keyword file:          The name of the PDB file to create.
    @type file:             str
    @keyword dir:           The name of the directory to place the PDB file into.
    @type dir:              str
    @keyword rotor_angle:   The angle of the rotor motion in degrees.
    @type rotor_angle:      float
    @keyword axis:          The vector defining the rotor axis.
    @type axis:             numpy rank-1, 3D array
    @keyword axis_pt:       A point lying anywhere on the rotor axis.  This is used to define the position of the axis in 3D space.
    @type axis_pt:          numpy rank-1, 3D array
    @keyword centre:        The central point of the representation.  If this point is not on the rotor axis, then the closest point on the axis will be used for the centre.
    @type centre:           numpy rank-1, 3D array
    @keyword span:          The distance from the central point to the rotor blades (meters).
    @type span:             float
    @keyword blade_length:  The length of the representative rotor blades.
    @type blade_length:     float
    @keyword force:         A flag which if set will overwrite any pre-existing file.
    @type force:            bool
    @keyword staggered:     A flag which if True will cause the rotor blades to be staggered.  This is used to avoid blade overlap.
    @type staggered:        bool
    """

    # Test if the current pipe exists.
    check_pipe()

    # Convert the angle to radians.
    rotor_angle = rotor_angle / 360.0 * 2.0 * pi

    # Create the structural object.
    structure = Internal()

    # Generate the rotor object.
    rotor(structure=structure, rotor_angle=rotor_angle, axis=axis, axis_pt=axis_pt, centre=centre, span=span, blade_length=blade_length, staggered=staggered)

    # Print out.
    print("\nGenerating the PDB file.")

    # Open the PDB file for writing.
    tensor_pdb_file = open_write_file(file, dir, force=force)

    # Write the data.
    structure.write_pdb(tensor_pdb_file)

    # Close the file.
    tensor_pdb_file.close()

    # Add the file to the results file list.
    if not hasattr(cdp, 'result_files'):
        cdp.result_files = []
    if dir == None:
        dir = getcwd()
    cdp.result_files.append(['rotor_pdb', 'Rotor PDB', get_file_path(file, dir)])
    status.observers.result_file.notify()
Example #46
0
def check_pipe_setup(pipe=None, pcs_id=None, sequence=False, N=False, tensors=False, pcs=False, paramag_centre=False):
    """Check that the current data pipe has been setup sufficiently.

    @keyword pipe:              The data pipe to check, defaulting to the current pipe.
    @type pipe:                 None or str
    @keyword pcs_id:            The PCS ID string to check for in cdp.pcs_ids.
    @type pcs_id:               None or str
    @keyword sequence:          A flag which when True will invoke the sequence data check.
    @type sequence:             bool
    @keyword N:                 A flag which if True will check that cdp.N is set.
    @type N:                    bool
    @keyword tensors:           A flag which if True will check that alignment tensors exist.
    @type tensors:              bool
    @keyword pcs:               A flag which if True will check that PCSs exist.
    @type pcs:                  bool
    @keyword paramag_centre:    A flag which if True will check that the paramagnetic centre has been set.
    @type paramag_centre:       bool
    """

    # The data pipe.
    if pipe == None:
        pipe = pipes.cdp_name()

    # Get the data pipe.
    dp = pipes.get_pipe(pipe)

    # Test if the current data pipe exists.
    check_pipe(pipe)

    # Test if sequence data exists.
    if sequence and not exists_mol_res_spin_data(pipe):
        raise RelaxNoSequenceError(pipe)

    # Check for dp.N.
    if N and not hasattr(dp, 'N'):
        raise RelaxError("The number of states N has not been set.")

    # Check that alignment tensors are present.
    if tensors and (not hasattr(dp, 'align_tensors') or len(dp.align_tensors) == 0):
        raise RelaxNoTensorError('alignment')

    # Test for the alignment ID.
    if pcs_id and (not hasattr(dp, 'align_ids') or pcs_id not in dp.align_ids):
        raise RelaxNoAlignError(pcs_id, pipe)

    # Test if PCS data exists.
    if pcs and not hasattr(dp, 'align_ids'):
        raise RelaxNoAlignError()
    if pcs and not hasattr(dp, 'pcs_ids'):
        raise RelaxNoPCSError()
    elif pcs and pcs_id and pcs_id not in dp.pcs_ids:
        raise RelaxNoPCSError(pcs_id)

    # Test if the paramagnetic centre is set.
    if paramag_centre and not hasattr(cdp, 'paramagnetic_centre'):
        raise RelaxError("The paramagnetic centre has not been defined.")
Example #47
0
def cpmgfit_input(dir=None, binary='cpmgfit', spin_id=None, force=False):
    """Create the CPMGFit input files.

    @keyword dir:               The optional directory to place the files into.  If None, then the files will be placed into a directory named after the dispersion model.
    @type dir:                  str or None
    @keyword binary:            The name of the CPMGFit binary file.  This can include the path to the binary.
    @type binary:               str
    @keyword spin_id:           The spin ID string to restrict the file creation to.
    @type spin_id:              str
    @keyword force:             A flag which if True will cause all pre-existing files to be overwritten.
    @type force:                bool
    """

    # Test if the current pipe exists.
    check_pipe()

    # Test if sequence data is loaded.
    if not exists_mol_res_spin_data():
        raise RelaxNoSequenceError

    # Test if the experiment type has been set.
    if not hasattr(cdp, 'exp_type'):
        raise RelaxError("The relaxation dispersion experiment type has not been specified.")

    # Test if the model has been set.
    if not hasattr(cdp, 'model_type'):
        raise RelaxError("The relaxation dispersion model has not been specified.")

    # Directory creation.
    if dir != None:
        mkdir_nofail(dir, verbosity=0)

    # The 'run.sh' script.
    batch = open_write_file('batch_run.sh', dir, force)
    batch.write("#! /bin/sh\n\n")

    # Generate the input files for each spin.
    for spin, spin_id in spin_loop(return_id=True, skip_desel=True):
        # Translate the model.
        function = translate_model(spin.model)

        # Create the input file.
        file_in = create_spin_input(function=function, spin=spin, spin_id=spin_id, dir=dir)

        # The output file name.
        file_out = spin_file_name(spin_id=spin_id, output=True)

        # Add the file to the batch script.
        batch.write("%s -grid -xmgr -f %s | tee %s\n" % (binary, file_in, file_out))

    # Close the batch script, then make it executable.
    batch.close()
    if dir:
        chmod(dir + sep + 'batch_run.sh', S_IRWXU|S_IRGRP|S_IROTH)
    else:
        chmod('batch_run.sh', S_IRWXU|S_IRGRP|S_IROTH)
Example #48
0
def cpmgfit_input(dir=None, binary='cpmgfit', spin_id=None, force=False):
    """Create the CPMGFit input files.

    @keyword dir:               The optional directory to place the files into.  If None, then the files will be placed into a directory named after the dispersion model.
    @type dir:                  str or None
    @keyword binary:            The name of the CPMGFit binary file.  This can include the path to the binary.
    @type binary:               str
    @keyword spin_id:           The spin ID string to restrict the file creation to.
    @type spin_id:              str
    @keyword force:             A flag which if True will cause all pre-existing files to be overwritten.
    @type force:                bool
    """

    # Test if the current pipe exists.
    check_pipe()

    # Test if sequence data is loaded.
    if not exists_mol_res_spin_data():
        raise RelaxNoSequenceError

    # Test if the experiment type has been set.
    if not hasattr(cdp, 'exp_type'):
        raise RelaxError("The relaxation dispersion experiment type has not been specified.")

    # Test if the model has been set.
    if not hasattr(cdp, 'model_type'):
        raise RelaxError("The relaxation dispersion model has not been specified.")

    # Directory creation.
    if dir != None:
        mkdir_nofail(dir, verbosity=0)

    # The 'run.sh' script.
    batch = open_write_file('batch_run.sh', dir, force)
    batch.write("#! /bin/sh\n\n")

    # Generate the input files for each spin.
    for spin, spin_id in spin_loop(return_id=True, skip_desel=True):
        # Translate the model.
        function = translate_model(spin.model)

        # Create the input file.
        file_in = create_spin_input(function=function, spin=spin, spin_id=spin_id, dir=dir)

        # The output file name.
        file_out = spin_file_name(spin_id=spin_id, output=True)

        # Add the file to the batch script.
        batch.write("%s -grid -xmgr -f %s | tee %s\n" % (binary, file_in, file_out))

    # Close the batch script, then make it executable.
    batch.close()
    if dir:
        chmod(dir + sep + 'batch_run.sh', S_IRWXU|S_IRGRP|S_IROTH)
    else:
        chmod('batch_run.sh', S_IRWXU|S_IRGRP|S_IROTH)
Example #49
0
def calc(verbosity=1):
    """Function for calculating the function value.

    @keyword verbosity: The amount of information to print.  The higher the value, the greater the verbosity.
    @type verbosity:    int
    """

    # Test if the current data pipe exists.
    check_pipe()

    # Reset the minimisation statistics.
    reset_min_stats(verbosity=verbosity)

    # The specific analysis API object.
    api = return_api()

    # Deselect spins lacking data:
    api.overfit_deselect()

    # Create the scaling matrix.
    scaling_matrix = assemble_scaling_matrix()

    # Get the Processor box singleton (it contains the Processor instance) and alias the Processor.
    processor_box = Processor_box() 
    processor = processor_box.processor

    # Monte Carlo simulation calculation.
    if hasattr(cdp, 'sim_state') and cdp.sim_state == 1:
        # Loop over the simulations.
        for i in range(cdp.sim_number):
            # Status.
            if status.current_analysis:
                status.auto_analysis[status.current_analysis].mc_number = i
            else:
                status.mc_number = i

            # Calculation.
            api.calculate(verbosity=verbosity-1, sim_index=i, scaling_matrix=scaling_matrix)

            # Print out.
            if verbosity and not processor.is_queued():
                print("Simulation " + repr(i+1))

        # Unset the status.
        if status.current_analysis:
            status.auto_analysis[status.current_analysis].mc_number = None
        else:
            status.mc_number = None

    # Minimisation.
    else:
        api.calculate(verbosity=verbosity, scaling_matrix=scaling_matrix)

    # Execute any queued commands.
    processor.run_queue()
Example #50
0
File: uf.py Project: tlinnet/relax
def remove_tm(spin_id=None):
    """Remove local tm from the set of model-free parameters for the given spins.

    @param spin_id: The spin identification string.
    @type spin_id:  str or None
    """

    # Test if the current data pipe exists.
    check_pipe()

    # Test if the pipe type is 'mf'.
    function_type = pipes.get_type()
    if function_type != 'mf':
        raise RelaxFuncSetupError(specific_analyses.get_string(function_type))

    # Test if sequence data is loaded.
    if not exists_mol_res_spin_data():
        raise RelaxNoSequenceError

    # Loop over the spins.
    for spin in spin_loop(spin_id):
        # Skip deselected spins.
        if not spin.select:
            continue

        # Test if a local tm parameter exists.
        if not hasattr(spin, 'params') or not 'local_tm' in spin.params:
            continue

        # Remove tm.
        spin.params.remove('local_tm')

        # Model name.
        if match('^tm', spin.model):
            spin.model = spin.model[1:]

        # Delete the local tm variable.
        del spin.local_tm

        # Set all the minimisation stats to None.
        spin.chi2 = None
        spin.iter = None
        spin.f_count = None
        spin.g_count = None
        spin.h_count = None
        spin.warning = None

    # Set the global minimisation stats to None.
    cdp.chi2 = None
    cdp.iter = None
    cdp.f_count = None
    cdp.g_count = None
    cdp.h_count = None
    cdp.warning = None
Example #51
0
def remove_tm(spin_id=None):
    """Remove local tm from the set of model-free parameters for the given spins.

    @param spin_id: The spin identification string.
    @type spin_id:  str or None
    """

    # Test if the current data pipe exists.
    check_pipe()

    # Test if the pipe type is 'mf'.
    function_type = pipes.get_type()
    if function_type != "mf":
        raise RelaxFuncSetupError(specific_analyses.get_string(function_type))

    # Test if sequence data is loaded.
    if not exists_mol_res_spin_data():
        raise RelaxNoSequenceError

    # Loop over the spins.
    for spin in spin_loop(spin_id):
        # Skip deselected spins.
        if not spin.select:
            continue

        # Test if a local tm parameter exists.
        if not hasattr(spin, "params") or not "local_tm" in spin.params:
            continue

        # Remove tm.
        spin.params.remove("local_tm")

        # Model name.
        if match("^tm", spin.model):
            spin.model = spin.model[1:]

        # Delete the local tm variable.
        del spin.local_tm

        # Set all the minimisation stats to None.
        spin.chi2 = None
        spin.iter = None
        spin.f_count = None
        spin.g_count = None
        spin.h_count = None
        spin.warning = None

    # Set the global minimisation stats to None.
    cdp.chi2 = None
    cdp.iter = None
    cdp.f_count = None
    cdp.g_count = None
    cdp.h_count = None
    cdp.warning = None
Example #52
0
def create(algor='LM', dir=None, force=False):
    """Create the Dasha script file 'dasha_script' for controlling the program.

    @keyword algor: The optimisation algorithm to use.  This can be the Levenberg-Marquardt algorithm 'LM' or the Newton-Raphson algorithm 'NR'.
    @type algor:    str
    @keyword dir:   The optional directory to place the script into.
    @type dir:      str or None
    @keyword force: A flag which if True will cause any pre-existing file to be overwritten.
    @type force:    bool
    """

    # Test if the current pipe exists.
    check_pipe()

    # Test if sequence data is loaded.
    if not exists_mol_res_spin_data():
        raise RelaxNoSequenceError

    # Determine the parameter set.
    model_type = determine_model_type()

    # Test if diffusion tensor data for the data_pipe exists.
    if model_type != 'local_tm' and not hasattr(cdp, 'diff_tensor'):
        raise RelaxNoTensorError('diffusion')

    # Test if the PDB file has been loaded (for the spheroid and ellipsoid).
    if model_type != 'local_tm' and cdp.diff_tensor.type != 'sphere' and not hasattr(cdp, 'structure'):
        raise RelaxNoPdbError

    # Test the optimisation algorithm.
    if algor not in ['LM', 'NR']:
        raise RelaxError("The Dasha optimisation algorithm '%s' is unknown, it should either be 'LM' or 'NR'." % algor)

    # Deselect certain spins.
    __deselect_spins()

    # Directory creation.
    if dir == None:
        dir = pipes.cdp_name()
    mkdir_nofail(dir, verbosity=0)

    # Calculate the angle alpha of the XH vector in the spheroid diffusion frame.
    if cdp.diff_tensor.type == 'spheroid':
        angles.spheroid_frame()

    # Calculate the angles theta and phi of the XH vector in the ellipsoid diffusion frame.
    elif cdp.diff_tensor.type == 'ellipsoid':
        angles.ellipsoid_frame()

    # The 'dasha_script' file.
    script = open_write_file(file_name='dasha_script', dir=dir, force=force)
    create_script(script, model_type, algor)
    script.close()
Example #53
0
def quad_int(flag=False):
    """Turn the high precision Scipy quadratic numerical integration on or off.

    @keyword flag:  The flag which if True will perform high precision numerical integration via the scipy.integrate quad(), dblquad() and tplquad() integration methods rather than the rough quasi-random numerical integration.
    @type flag:     bool
    """

    # Test if the current data pipe exists.
    check_pipe()

    # Store the flag.
    cdp.quad_int = flag
Example #54
0
def monte_carlo_on():
    """Turn simulations on."""

    # Test if the current data pipe exists.
    check_pipe()

    # Test if simulations have been set up.
    if not hasattr(cdp, 'sim_state'):
        raise RelaxError("Monte Carlo simulations have not been set up.")

    # Turn simulations on.
    cdp.sim_state = True
Example #55
0
def quad_int(flag=False):
    """Turn the high precision Scipy quadratic numerical integration on or off.

    @keyword flag:  The flag which if True will perform high precision numerical integration via the scipy.integrate quad(), dblquad() and tplquad() integration methods rather than the rough quasi-random numerical integration.
    @type flag:     bool
    """

    # Test if the current data pipe exists.
    check_pipe()

    # Store the flag.
    cdp.quad_int = flag
Example #56
0
def grid_zoom(level=0):
    """Store the grid zoom level.

    @keyword level: The zoom level.
    @type level:    int
    """

    # Test if the current data pipe exists.
    check_pipe()

    # Store the values.
    cdp.grid_zoom_level = level
Example #57
0
def get_frequency(id=None):
    """Return the frequency corresponding to the given ID.

    @param id:  The experiment ID string.
    @type id:   str
    @return:    The spectrometer proton frequency in Hertz for the given ID.
    @rtype:     float
    """

    # Checks.
    check_pipe()
    check_frequency(id=id)

    # Return the frequency in Hz.
    return cdp.spectrometer_frq[id]
Example #58
0
def monte_carlo_initial_values():
    """Set the initial simulation parameter values."""

    # Test if the current data pipe exists.
    check_pipe()

    # Test if simulations have been set up.
    if not hasattr(cdp, 'sim_state'):
        raise RelaxError("Monte Carlo simulations have not been set up.")

    # The specific analysis API object.
    api = return_api()

    # Set the initial parameter values.
    api.sim_init_values()
Example #59
0
def delete(spectrum_id=None):
    """Delete spectral data corresponding to the spectrum ID.

    @keyword spectrum_id:   The spectrum ID string.
    @type spectrum_id:      str
    """

    # Data checks.
    check_pipe()
    check_mol_res_spin_data()
    check_spectrum_id(spectrum_id)

    # Remove the ID.
    cdp.spectrum_ids.pop(cdp.spectrum_ids.index(spectrum_id))

    # The ncproc parameter.
    if hasattr(cdp, 'ncproc') and spectrum_id in cdp.ncproc:
        del cdp.ncproc[spectrum_id]

    # Replicates.
    if hasattr(cdp, 'replicates'):
        # Loop over the replicates.
        for i in range(len(cdp.replicates)):
            # The spectrum is replicated.
            if spectrum_id in cdp.replicates[i]:
                # Duplicate.
                if len(cdp.replicates[i]) == 2:
                    cdp.replicates.pop(i)

                # More than two spectra:
                else:
                    cdp.replicates[i].pop(cdp.replicates[i].index(spectrum_id))

                # No need to check further.
                break

    # Errors.
    if hasattr(cdp, 'sigma_I') and spectrum_id in cdp.sigma_I:
        del cdp.sigma_I[spectrum_id]
    if hasattr(cdp, 'var_I') and spectrum_id in cdp.var_I:
        del cdp.var_I[spectrum_id]

    # Loop over the spins.
    for spin in spin_loop():
        # Intensity data.
        if hasattr(spin,
                   'peak_intensity') and spectrum_id in spin.peak_intensity:
            del spin.peak_intensity[spectrum_id]
Example #60
0
def pivot(pivot=None, order=1, fix=False):
    """Set the pivot point for the 2 body motion.

    @keyword pivot: The pivot point of the two bodies (domains, etc.) in the structural coordinate system.
    @type pivot:    list of num
    @keyword order: The ordinal number of the pivot point.  The value of 1 is for the first pivot point, the value of 2 for the second pivot point, and so on.
    @type order:    int
    @keyword fix:   A flag specifying if the pivot point should be fixed during optimisation.
    @type fix:      bool
    """

    # Test if the current data pipe exists.
    check_pipe()

    # Store the fixed flag.
    cdp.pivot_fixed = fix

    # No pivot given, so update the model if needed and quit.
    if pivot is None:
        if hasattr(cdp, 'model'):
            update_model()
        return

    # Convert the pivot to a numpy array.
    pivot = array(pivot, float64)

    # Check the pivot validity.
    is_float_array(pivot, name='pivot point', size=3)

    # Store the pivot point and fixed flag.
    if order == 1:
        cdp.pivot_x = pivot[0]
        cdp.pivot_y = pivot[1]
        cdp.pivot_z = pivot[2]
    else:
        # The variable names.
        name_x = 'pivot_x_%i' % order
        name_y = 'pivot_y_%i' % order
        name_z = 'pivot_z_%i' % order

        # Store the variables.
        setattr(cdp, name_x, pivot[0])
        setattr(cdp, name_y, pivot[1])
        setattr(cdp, name_z, pivot[2])

    # Update the model.
    if hasattr(cdp, 'model'):
        update_model()