コード例 #1
0
ファイル: nmrpipe.py プロジェクト: pombredanne/relax
def show_apod_extract(file_name=None, dir=None, path_to_command='showApod'):
    """Extract showApod information for spectrum fourier transformed with NMRPipe.

    @keyword file:              The filename of the NMRPipe fourier transformed file.
    @type file:                 str
    @keyword dir:               The directory where the file is located.
    @type dir:                  str
    @keyword path_to_command:   If showApod not in PATH, then specify absolute path as: /path/to/showApod
    @type path_to_command:      str
    @return:                    The output from showApod as list of lines.
    @rtype:                     list of lines
    """

    # Get the file path.
    file_path = get_file_path(file_name=file_name, dir=dir)

    if not subprocess_module:
        raise RelaxError("Python module 'subprocess' not found, cannot call showApod.")

    # Call function.
    Temp = subprocess.Popen([path_to_command, file_path], stdout=subprocess.PIPE)

    # Communicate with program, and get outout and exitcode.
    (output, errput) = Temp.communicate()

    # Wait for finish and get return code.
    return_value = Temp.wait()

    # Python 3 support - convert byte arrays to text.
    if hasattr(output, 'decode'):
        output = output.decode()

    return output.splitlines()
コード例 #2
0
def show_apod_extract(file_name=None, dir=None, path_to_command='showApod'):
    """Extract showApod information for spectrum fourier transformed with NMRPipe.

    @keyword file:              The filename of the NMRPipe fourier transformed file.
    @type file:                 str
    @keyword dir:               The directory where the file is located.
    @type dir:                  str
    @keyword path_to_command:   If showApod not in PATH, then specify absolute path as: /path/to/showApod
    @type path_to_command:      str
    @return:                    The output from showApod as list of lines.
    @rtype:                     list of lines
    """

    # Get the file path.
    file_path = get_file_path(file_name=file_name, dir=dir)

    if not subprocess_module:
        raise RelaxError("Python module 'subprocess' not found, cannot call showApod.")

    # Call function.
    Temp = subprocess.Popen([path_to_command, file_path], stdout=subprocess.PIPE)

    # Communicate with program, and get outout and exitcode.
    (output, errput) = Temp.communicate()

    # Wait for finish and get return code.
    return_value = Temp.wait()

    # Python 3 support - convert byte arrays to text.
    if hasattr(output, 'decode'):
        output = output.decode()

    return output.splitlines()
コード例 #3
0
ファイル: main.py プロジェクト: belisario21/relax_trunk
def read_xyz(file=None, dir=None, read_mol=None, set_mol_name=None, read_model=None, set_model_num=None, verbosity=1, fail=True):
    """The XYZ loading function.


    @keyword file:          The name of the XYZ file to read.
    @type file:             str
    @keyword dir:           The directory where the XYZ file is located.  If set to None, then the
                            file will be searched for in the current directory.
    @type dir:              str or None
    @keyword read_mol:      The molecule(s) to read from the file, independent of model.
                            If set to None, then all molecules will be loaded.
    @type read_mol:         None, int, or list of int
    @keyword set_mol_name:  Set the names of the molecules which are loaded.  If set to None, then
                            the molecules will be automatically labelled based on the file name or
                            other information.
    @type set_mol_name:     None, str, or list of str
    @keyword read_model:    The XYZ model to extract from the file.  If set to None, then all models
                            will be loaded.
    @type read_model:       None, int, or list of int
    @keyword set_model_num: Set the model number of the loaded molecule.  If set to None, then the
                            XYZ model numbers will be preserved, if they exist.
    @type set_model_num:    None, int, or list of int
    @keyword fail:          A flag which, if True, will cause a RelaxError to be raised if the XYZ 
                            file does not exist.  If False, then a RelaxWarning will be trown
                            instead.
    @type fail:             bool
    @keyword verbosity:     The amount of information to print to screen.  Zero corresponds to
                            minimal output while higher values increase the amount of output.  The
                            default value is 1.
    @type verbosity:        int
    @raise RelaxFileError:  If the fail flag is set, then a RelaxError is raised if the XYZ file
                            does not exist.
    """

    # Test if the current data pipe exists.
    pipes.test()

    # The file path.
    file_path = get_file_path(file, dir)

    # Try adding '.xyz' to the end of the file path, if the file can't be found.
    if not access(file_path, F_OK):
        file_path_orig = file_path
        file_path = file_path + '.xyz'

    # Test if the file exists.
    if not access(file_path, F_OK):
        if fail:
            raise RelaxFileError('XYZ', file_path_orig)
        else:
            warn(RelaxNoPDBFileWarning(file_path))
            return

    # Place the  structural object into the relax data store.
    if not hasattr(cdp, 'structure'):
        cdp.structure = Internal()

    # Load the structures.
    cdp.structure.load_xyz(file_path, read_mol=read_mol, set_mol_name=set_mol_name, read_model=read_model, set_model_num=set_model_num, verbosity=verbosity)
コード例 #4
0
ファイル: main.py プロジェクト: belisario21/relax_trunk
def read_pdb(file=None, dir=None, read_mol=None, set_mol_name=None, read_model=None, set_model_num=None, alt_loc=None, verbosity=1, merge=False, fail=True):
    """The PDB loading function.

    @keyword file:          The name of the PDB file to read.
    @type file:             str
    @keyword dir:           The directory where the PDB file is located.  If set to None, then the file will be searched for in the current directory.
    @type dir:              str or None
    @keyword read_mol:      The molecule(s) to read from the file, independent of model.  The molecules are numbered consecutively from 1.  If set to None, then all molecules will be loaded.
    @type read_mol:         None, int, or list of int
    @keyword set_mol_name:  Set the names of the molecules which are loaded.  If set to None, then the molecules will be automatically labelled based on the file name or other information.
    @type set_mol_name:     None, str, or list of str
    @keyword read_model:    The PDB model to extract from the file.  If set to None, then all models will be loaded.
    @type read_model:       None, int, or list of int
    @keyword set_model_num: Set the model number of the loaded molecule.  If set to None, then the PDB model numbers will be preserved, if they exist.
    @type set_model_num:    None, int, or list of int
    @keyword alt_loc:       The PDB ATOM record 'Alternate location indicator' field value to select which coordinates to use.
    @type alt_loc:          str or None
    @keyword verbosity:     The amount of information to print to screen.  Zero corresponds to minimal output while higher values increase the amount of output.  The default value is 1.
    @type verbosity:        int
    @keyword merge:         A flag which if set to True will try to merge the PDB structure into the currently loaded structures.
    @type merge:            bool
    @keyword fail:          A flag which, if True, will cause a RelaxError to be raised if the PDB file does not exist.  If False, then a RelaxWarning will be trown instead.
    @type fail:             bool
    @raise RelaxFileError:  If the fail flag is set, then a RelaxError is raised if the PDB file does not exist.
    """

    # Test if the current data pipe exists.
    pipes.test()

    # The file path.
    file_path = get_file_path(file, dir)

    # Try adding file extensions to the end of the file path, if the file can't be found.
    file_path_orig = file_path
    if not access(file_path, F_OK):
        # List of possible extensions.
        for ext in ['.pdb', '.gz', '.pdb.gz', '.bz2', '.pdb.bz2']:
            # Add the extension if the file can be found.
            if access(file_path+ext, F_OK):
                file_path = file_path + ext

    # Test if the file exists.
    if not access(file_path, F_OK):
        if fail:
            raise RelaxFileError('PDB', file_path_orig)
        else:
            warn(RelaxNoPDBFileWarning(file_path))
            return

    # Place the internal structural object into the relax data store.
    if not hasattr(cdp, 'structure'):
        cdp.structure = Internal()

    # Load the structures.
    cdp.structure.load_pdb(file_path, read_mol=read_mol, set_mol_name=set_mol_name, read_model=read_model, set_model_num=set_model_num, alt_loc=alt_loc, verbosity=verbosity, merge=merge)

    # Load into Molmol (if running).
    molmol.molmol_obj.open_pdb()
コード例 #5
0
ファイル: main.py プロジェクト: belisario21/relax_trunk
def create_cone_pdb(mol=None, cone=None, start_res=1, apex=None, axis=None, R=None, inc=None, scale=30.0, distribution='regular', file=None, dir=None, force=False, axis_flag=True):
    """Create a PDB representation of the given cone object.

    @keyword mol:           The molecule container.
    @type mol:              MolContainer instance
    @keyword cone:          The cone object.  This should provide the limit_check() method with determines the limits of the distribution accepting two arguments, the polar angle phi and the azimuthal angle theta, and return True if the point is in the limits or False if outside.  It should also provide the theta_max() method for returning the theta value for the given phi, the phi_max() method for returning the phi value for the given theta.
    @type cone:             class instance
    @keyword start_res:     The starting residue number.
    @type start_res:        str
    @keyword apex:          The apex of the cone.
    @type apex:             rank-1, 3D numpy array
    @keyword axis:          The central axis of the cone.  If not supplied, the z-axis will be used.
    @type axis:             rank-1, 3D numpy array
    @keyword R:             The rotation matrix.
    @type R:                rank-2, 3D numpy array
    @keyword inc:           The increment number used to determine the number of latitude and longitude lines.
    @type inc:              int
    @keyword scale:         The scaling factor to stretch the unit cone by.
    @type scale:            float
    @keyword distribution:  The type of point distribution to use.  This can be 'uniform' or 'regular'.
    @type distribution:     str
    @keyword file:          The name of the PDB file to create.
    @type file:             str
    @keyword dir:           The name of the directory to place the PDB file into.
    @type dir:              str
    @keyword force:         Flag which if set to True will overwrite any pre-existing file.
    @type force:            bool
    @keyword axis_flag:     A flag which if True will create the cone's axis.
    @type axis_flag:        bool
    """

    # No molecule supplied.
    if mol == None:
        # Create the structural object.
        structure = Internal()

        # Add a molecule.
        structure.add_molecule(name='cone')

        # Alias the single molecule from the single model.
        mol = structure.structural_data[0].mol[0]

    # Create the object.
    cone(mol=mol, cone=cone, start_res=start_res, apex=apex, axis=axis, R=R, inc=inc, scale=scale, distribution=distribution, axis_flag=axis_flag)

    # Create the PDB file.
    if file != None:
        print("\nGenerating the PDB file.")
        pdb_file = open_write_file(file_name=file, dir=dir, force=force)
        structure.write_pdb(pdb_file)
        pdb_file.close()

    # Add the file to the results file list.
    if not hasattr(cdp, 'result_files'):
        cdp.result_files = []
    cdp.result_files.append(['cone_pdb', 'Cone PDB', get_file_path(file, dir)])
    status.observers.result_file.notify()
コード例 #6
0
ファイル: geometric.py プロジェクト: belisario21/relax_trunk
def create_rotor_pdb(file=None, dir=None, rotor_angle=None, axis=None, axis_pt=True, centre=None, span=2e-9, blade_length=5e-10, force=False, staggered=False):
    """Create a PDB representation of a rotor motional model.

    @keyword file:          The name of the PDB file to create.
    @type file:             str
    @keyword dir:           The name of the directory to place the PDB file into.
    @type dir:              str
    @keyword rotor_angle:   The angle of the rotor motion in degrees.
    @type rotor_angle:      float
    @keyword axis:          The vector defining the rotor axis.
    @type axis:             numpy rank-1, 3D array
    @keyword axis_pt:       A point lying anywhere on the rotor axis.  This is used to define the position of the axis in 3D space.
    @type axis_pt:          numpy rank-1, 3D array
    @keyword centre:        The central point of the representation.  If this point is not on the rotor axis, then the closest point on the axis will be used for the centre.
    @type centre:           numpy rank-1, 3D array
    @keyword span:          The distance from the central point to the rotor blades (meters).
    @type span:             float
    @keyword blade_length:  The length of the representative rotor blades.
    @type blade_length:     float
    @keyword force:         A flag which if set will overwrite any pre-existing file.
    @type force:            bool
    @keyword staggered:     A flag which if True will cause the rotor blades to be staggered.  This is used to avoid blade overlap.
    @type staggered:        bool
    """

    # Test if the current pipe exists.
    pipes.test()

    # Convert the angle to radians.
    rotor_angle = rotor_angle / 360.0 * 2.0 * pi

    # Create the structural object.
    structure = Internal()

    # Generate the rotor object.
    rotor_pdb(structure=structure, rotor_angle=rotor_angle, axis=axis, axis_pt=axis_pt, centre=centre, span=span, blade_length=blade_length, staggered=staggered)

    # Print out.
    print("\nGenerating the PDB file.")

    # Open the PDB file for writing.
    tensor_pdb_file = open_write_file(file, dir, force=force)

    # Write the data.
    structure.write_pdb(tensor_pdb_file)

    # Close the file.
    tensor_pdb_file.close()

    # Add the file to the results file list.
    if not hasattr(cdp, 'result_files'):
        cdp.result_files = []
    if dir == None:
        dir = getcwd()
    cdp.result_files.append(['rotor_pdb', 'Rotor PDB', get_file_path(file, dir)])
    status.observers.result_file.notify()
コード例 #7
0
def macro_run(file=None, dir=None):
    """Execute the PyMOL macro from the given text file.

    @keyword file:          The name of the macro file to execute.
    @type file:             str
    @keyword dir:           The name of the directory where the macro file is located.
    @type dir:              str
    """

    # Open the file for reading.
    file_path = get_file_path(file, dir)
    file = open_read_file(file, dir)

    # Loop over the commands and apply them.
    for command in file.readlines():
        pymol_obj.exec_cmd(command)
コード例 #8
0
def macro_run(file=None, dir=None):
    """Execute the Molmol macro from the given text file.

    @keyword file:          The name of the macro file to execute.
    @type file:             str
    @keyword dir:           The name of the directory where the macro file is located.
    @type dir:              str
    """

    # Open the file for reading.
    file_path = get_file_path(file, dir)
    file = open_read_file(file, dir)

    # Loop over the commands and apply them.
    for command in file.readlines():
        molmol_obj.exec_cmd(command)
コード例 #9
0
ファイル: results.py プロジェクト: tlinnet/relax
def read(file='results', dir=None):
    """Function for reading the data out of a file."""

    # Test if the current data pipe exists.
    check_pipe()

    # Make sure that the data pipe is empty.
    if not cdp.is_empty():
        raise RelaxError("The current data pipe is not empty.")

    # Get the full file path, for later use.
    file_path = get_file_path(file_name=file, dir=dir)

    # Open the file.
    file = open_read_file(file_name=file_path)

    # Determine the format of the file.
    format = determine_format(file)

    # XML results.
    if format == 'xml':
        ds.from_xml(file, dir=dirname(file_path), pipe_to=pipes.cdp_name())

    # Columnar results (for backwards compatibility with ancient relax results model-free files).
    elif format == 'columnar':
        # Extract the data from the file.
        file_data = extract_data(file=file)

        # Strip data.
        file_data = strip(file_data)

        # Do nothing if the file does not exist.
        if not file_data:
            raise RelaxFileEmptyError

        # Read the results.
        read_columnar_results(file_data)

    # Unknown results file.
    else:
        raise RelaxError("The format of the results file " + repr(file_path) + " cannot be determined.")

    # Update all of the required metadata structures.
    mol_res_spin.metadata_update()
    interatomic.metadata_update()
コード例 #10
0
def write(param=None, file=None, dir=None, scaling=1.0, return_value=None, return_data_desc=None, comment=None, bc=False, force=False):
    """Write data to a file.

    @keyword param:             The name of the parameter to write to file.
    @type param:                str
    @keyword file:              The file to write the data to.
    @type file:                 str
    @keyword dir:               The name of the directory to place the file into (defaults to the current directory).
    @type dir:                  str
    @keyword scaling:           The value to scale the parameter by.
    @type scaling:              float
    @keyword return_value:      An optional function which if supplied will override the default value returning function.
    @type return_value:         None or func
    @keyword return_data_desc:  An optional function which if supplied will override the default parameter description returning function.
    @type return_data_desc:     None or func
    @keyword comment:           Text which will be added to the start of the file as comments.  All lines will be prefixed by '# '.
    @type comment:              str
    @keyword bc:                A flag which if True will cause the back calculated values to be written.
    @type bc:                   bool
    @keyword force:             A flag which if True will cause any pre-existing file to be overwritten.
    @type force:                bool
    """

    # Test if the current pipe exists.
    check_pipe()

    # Test if the sequence data is loaded.
    if not exists_mol_res_spin_data():
        raise RelaxNoSequenceError

    # Open the file for writing.
    file_path = get_file_path(file, dir)
    file = open_write_file(file, dir, force)

    # Write the data.
    write_data(param=param, file=file, scaling=scaling, return_value=return_value, return_data_desc=return_data_desc, comment=comment, bc=bc)

    # Close the file.
    file.close()

    # Add the file to the results file list.
    add_result_file(type='text', label='Text', file=file_path)
コード例 #11
0
ファイル: value.py プロジェクト: pombredanne/relax
def write(param=None, file=None, dir=None, scaling=1.0, return_value=None, return_data_desc=None, comment=None, bc=False, force=False):
    """Write data to a file.

    @keyword param:             The name of the parameter to write to file.
    @type param:                str
    @keyword file:              The file to write the data to.
    @type file:                 str
    @keyword dir:               The name of the directory to place the file into (defaults to the current directory).
    @type dir:                  str
    @keyword scaling:           The value to scale the parameter by.
    @type scaling:              float
    @keyword return_value:      An optional function which if supplied will override the default value returning function.
    @type return_value:         None or func
    @keyword return_data_desc:  An optional function which if supplied will override the default parameter description returning function.
    @type return_data_desc:     None or func
    @keyword comment:           Text which will be added to the start of the file as comments.  All lines will be prefixed by '# '.
    @type comment:              str
    @keyword bc:                A flag which if True will cause the back calculated values to be written.
    @type bc:                   bool
    @keyword force:             A flag which if True will cause any pre-existing file to be overwritten.
    @type force:                bool
    """

    # Test if the current pipe exists.
    check_pipe()

    # Test if the sequence data is loaded.
    if not exists_mol_res_spin_data():
        raise RelaxNoSequenceError

    # Open the file for writing.
    file_path = get_file_path(file, dir)
    file = open_write_file(file, dir, force)

    # Write the data.
    write_data(param=param, file=file, scaling=scaling, return_value=return_value, return_data_desc=return_data_desc, comment=comment, bc=bc)

    # Close the file.
    file.close()

    # Add the file to the results file list.
    add_result_file(type='text', label='Text', file=file_path)
コード例 #12
0
def view(file=None, dir=None, grace_exe='xmgrace'):
    """Execute Grace.

    @keyword file:      The name of the file to open in Grace.
    @type file:         str
    @keyword dir:       The optional directory containing the file.
    @type dir:          str
    @keyword grace_exe: The name of the Grace executable file.  This should be located within the
                        system path.
    @type grace_exe:    str
    """

    # Test the binary file string corresponds to a valid executable.
    test_binary(grace_exe)

    # File path.
    file_path = get_file_path(file, dir)

    # Run Grace.
    system(grace_exe + " \"" + file_path + "\" &")
コード例 #13
0
ファイル: grace.py プロジェクト: pombredanne/relax
def view(file=None, dir=None, grace_exe='xmgrace'):
    """Execute Grace.

    @keyword file:      The name of the file to open in Grace.
    @type file:         str
    @keyword dir:       The optional directory containing the file.
    @type dir:          str
    @keyword grace_exe: The name of the Grace executable file.  This should be located within the
                        system path.
    @type grace_exe:    str
    """

    # Test the binary file string corresponds to a valid executable.
    test_binary(grace_exe)

    # File path.
    file_path = get_file_path(file, dir)

    # Run Grace.
    system(grace_exe + " \"" + file_path + "\" &")
コード例 #14
0
def script(file=None, dir=None):
    """Function for executing a script file.

    @keyword file:  The name of the script to execute.
    @type file:     str
    @keyword dir:   The name of the directory in which the script is located.
    @type dir:      str or None
    """

    # File argument.
    if file == None:
        raise RelaxNoneError('file')
    elif not isinstance(file, str):
        raise RelaxStrError('file', file)

    # The full path.
    file_path = get_file_path(file, dir)

    # Test if the script file exists.
    if not access(file_path, F_OK):
        raise RelaxError("The script file '%s' does not exist." % file_path)

    # Turn on the function intro flag.
    orig_intro_state = status.uf_intro
    status.uf_intro = True

    # Load the interpreter.
    interpreter = prompt.interpreter.Interpreter(show_script=False,
                                                 raise_relax_error=True)
    interpreter.populate_self()
    interpreter.on(verbose=False)

    # Execute the script.
    prompt.interpreter.run_script(local=interpreter._locals,
                                  script_file=file_path)

    # Return the function intro flag to the original value.
    status.uf_intro = orig_intro_state

    # Make a notification of pipe_alteration, so the GUI will update.
    status.observers.pipe_alteration.notify()
コード例 #15
0
ファイル: results.py プロジェクト: pombredanne/relax
def read(file='results', dir=None):
    """Function for reading the data out of a file."""

    # Test if the current data pipe exists.
    check_pipe()

    # Make sure that the data pipe is empty.
    if not cdp.is_empty():
        raise RelaxError("The current data pipe is not empty.")

    # Get the full file path, for later use.
    file_path = get_file_path(file_name=file, dir=dir)

    # Open the file.
    file = open_read_file(file_name=file_path)

    # Determine the format of the file.
    format = determine_format(file)

    # XML results.
    if format == 'xml':
        ds.from_xml(file, dir=dirname(file_path), pipe_to=pipes.cdp_name())

    # Columnar results (for backwards compatibility with ancient relax results model-free files).
    elif format == 'columnar':
        # Extract the data from the file.
        file_data = extract_data(file=file)

        # Strip data.
        file_data = strip(file_data)

        # Do nothing if the file does not exist.
        if not file_data:
            raise RelaxFileEmptyError

        # Read the results.
        read_columnar_results(file_data)

    # Unknown results file.
    else:
        raise RelaxError("The format of the results file " + repr(file_path) + " cannot be determined.")
コード例 #16
0
ファイル: grace.py プロジェクト: tlinnet/relax
def create_grace2images(dir=None):
    """Create the grace2images.py executable script.

    @keyword dir:   The directory to place the script into.
    @type dir:      str
    """

    # Expand any ~ characters.
    dir = expanduser(dir)

    # Open the file.
    print("\nCreating the Python \"grace to PNG/EPS/SVG...\" conversion script.")
    file_name = "grace2images.py"
    file_path = get_file_path(file_name=file_name, dir=dir)
    file = open_write_file(file_name=file_name, dir=dir, force=True)

    # Write the Python "grace to PNG/EPS/SVG..." conversion script.
    script_grace2images(file=file)

    # Close the batch script, then make it executable (expanding any ~ characters).
    file.close()
    chmod(file_path, S_IRWXU|S_IRGRP|S_IROTH)
コード例 #17
0
ファイル: script.py プロジェクト: edward-dauvergne/relax
def script(file=None, dir=None):
    """Function for executing a script file.

    @keyword file:  The name of the script to execute.
    @type file:     str
    @keyword dir:   The name of the directory in which the script is located.
    @type dir:      str or None
    """

    # File argument.
    if file == None:
        raise RelaxNoneError('file')
    elif not isinstance(file, str):
        raise RelaxStrError('file', file)

    # The full path.
    file_path = get_file_path(file, dir)

    # Test if the script file exists.
    if not access(file_path, F_OK):
        raise RelaxError("The script file '%s' does not exist." % file_path)

    # Turn on the function intro flag.
    orig_intro_state = status.uf_intro
    status.uf_intro = True

    # Load the interpreter.
    interpreter = prompt.interpreter.Interpreter(show_script=False, raise_relax_error=True)
    interpreter.populate_self()
    interpreter.on(verbose=False)

    # Execute the script.
    prompt.interpreter.run_script(local=interpreter._locals, script_file=file_path)

    # Return the function intro flag to the original value.
    status.uf_intro = orig_intro_state

    # Make a notification of pipe_alteration, so the GUI will update.
    status.observers.pipe_alteration.notify()
コード例 #18
0
def read(file=None, dir=None, version=None, sample_conditions=None):
    """Read the contents of a BMRB NMR-STAR formatted file.

    @keyword file:              The name of the BMRB STAR formatted file.
    @type file:                 str
    @keyword dir:               The directory where the file is located.
    @type dir:                  None or str
    @keyword version:           The BMRB version to force the reading.
    @type version:              None or str
    @keyword sample_conditions: The sample condition label to read.  Only one sample condition can be read per data pipe.
    @type sample_conditions:    None or str
    """

    # Test if bmrblib is installed.
    if not dep_check.bmrblib_module:
        raise RelaxNoModuleInstallError('BMRB library', 'bmrblib')

    # Test if the current data pipe exists.
    pipe_name = cdp_name()
    if not pipe_name:
        raise RelaxNoPipeError

    # Make sure that the data pipe is empty.
    if not ds[pipe_name].is_empty():
        raise RelaxError("The current data pipe is not empty.")

    # Get the full file path.
    file_path = get_file_path(file_name=file, dir=dir)

    # Fail if the file does not exist.
    if not access(file_path, F_OK):
        raise RelaxFileError(file_path)

    # Read the results.
    api = return_api(pipe_name=pipe_name)
    api.bmrb_read(file_path,
                  version=version,
                  sample_conditions=sample_conditions)
コード例 #19
0
ファイル: bmrb.py プロジェクト: belisario21/relax_trunk
def read(file=None, dir=None, version=None, sample_conditions=None):
    """Read the contents of a BMRB NMR-STAR formatted file.

    @keyword file:              The name of the BMRB STAR formatted file.
    @type file:                 str
    @keyword dir:               The directory where the file is located.
    @type dir:                  None or str
    @keyword version:           The BMRB version to force the reading.
    @type version:              None or str
    @keyword sample_conditions: The sample condition label to read.  Only one sample condition can be read per data pipe.
    @type sample_conditions:    None or str
    """

    # Test if bmrblib is installed.
    if not dep_check.bmrblib_module:
        raise RelaxNoModuleInstallError('BMRB library', 'bmrblib')

    # Test if the current data pipe exists.
    pipe_name = cdp_name()
    if not pipe_name:
        raise RelaxNoPipeError

    # Make sure that the data pipe is empty.
    if not ds[pipe_name].is_empty():
        raise RelaxError("The current data pipe is not empty.")

    # Get the full file path.
    file_path = get_file_path(file_name=file, dir=dir)

    # Fail if the file does not exist.
    if not access(file_path, F_OK):
        raise RelaxFileError(file_path)

    # Specific results reading function.
    read_function = specific_analyses.setup.get_specific_fn('bmrb_read', ds[pipe_name].pipe_type)

    # Read the results.
    read_function(file_path, version=version, sample_conditions=sample_conditions)
コード例 #20
0
ファイル: main.py プロジェクト: belisario21/relax_trunk
def write_pdb(file=None, dir=None, model_num=None, compress_type=0, force=False):
    """The PDB writing function.

    @keyword file:          The name of the PDB file to write.  This can also be a file instance.
    @type file:             str or file instance
    @keyword dir:           The directory where the PDB file will be placed.  If set to None, then the file will be placed in the current directory.
    @type dir:              str or None
    @keyword model_num:     The model to place into the PDB file.  If not supplied, then all models will be placed into the file.
    @type model_num:        None or int
    @keyword compress_type: The compression type.  The integer values correspond to the compression type: 0, no compression; 1, Bzip2 compression; 2, Gzip compression.
    @type compress_type:    int
    @keyword force:         The force flag which if True will cause the file to be overwritten.
    @type force:            bool
    """

    # Test if the current data pipe exists.
    pipes.test()

    # Check if the structural object exists.
    if not hasattr(cdp, 'structure'):
        raise RelaxError("No structural data is present in the current data pipe.")

    # Path handling.
    if isinstance(file, str):
        # The file path.
        file = get_file_path(file, dir)

        # Add '.pdb' to the end of the file path if it isn't there yet.
        if not search(".pdb$", file):
            file = file + '.pdb'

    # Open the file for writing.
    file = open_write_file(file, compress_type=compress_type, force=force)

    # Write the structures.
    cdp.structure.write_pdb(file, model_num=model_num)
コード例 #21
0
ファイル: relax_disp.py プロジェクト: tlinnet/relax
    def optimise(self, model=None, model_path=None):
        """Optimise the model, taking model nesting into account.

        @keyword model:         The model to be optimised.
        @type model:            str
        @keyword model_path:    The folder name for the model, where possible spaces has been replaced with underscore.
        @type model_path:       str
        """

        # Printout.
        section(file=sys.stdout, text="Optimisation", prespace=2)

        # Deselect insignificant spins.
        if model not in [MODEL_R2EFF, MODEL_NOREX]:
            self.interpreter.relax_disp.insignificance(
                level=self.insignificance)

        # Speed-up grid-search by using minium R2eff value.
        if self.set_grid_r20 and model != MODEL_R2EFF:
            self.interpreter.relax_disp.r20_from_min_r2eff(force=True)

        # Use pre-run results as the optimisation starting point.
        # Test if file exists.
        if self.pre_run_dir:
            path = self.pre_run_dir + sep + model_path
            # File path.
            file_path = get_file_path('results', path)

            # Test if the file exists and determine the compression type.
            try:
                compress_type, file_path = determine_compression(file_path)
                res_file_exists = True

            except RelaxFileError:
                res_file_exists = False

        if self.pre_run_dir and res_file_exists:
            self.pre_run_parameters(model=model, model_path=model_path)

        # Otherwise use the normal nesting check and grid search if not nested.
        else:
            # Nested model simplification.
            nested = self.nesting(model=model)

            # Otherwise use a grid search of default values to start optimisation with.
            if not nested:
                # Grid search.
                if self.grid_inc:
                    self.interpreter.minimise.grid_search(inc=self.grid_inc)

                # Default values.
                else:
                    # The standard parameters.
                    for param in MODEL_PARAMS[model]:
                        self.interpreter.value.set(param=param, index=None)

                    # The optional R1 parameter.
                    if is_r1_optimised(model=model):
                        self.interpreter.value.set(param='r1', index=None)

        # 'R2eff' model minimisation flags.
        do_minimise = False
        if model == MODEL_R2EFF:
            # The constraints flag.
            constraints = False

            # The minimisation algorithm to use.
            # Both the Jacobian and Hessian matrix has been specified for exponential curve-fitting, allowing for the much faster algorithms to be used.
            min_algor = 'Newton'

            # Check if all spins contains 'r2eff and it associated error.
            has_r2eff = False

            # Loop over all spins.
            for cur_spin, spin_id in spin_loop(return_id=True,
                                               skip_desel=True):
                # Check 'r2eff'
                if hasattr(cur_spin, 'r2eff') and hasattr(
                        cur_spin, 'r2eff_err'):
                    has_r2eff = True
                else:
                    has_r2eff = False
                    break

            # Skip optimisation, if 'r2eff' + 'r2eff_err' is present and flag for forcing optimisation is not raised.
            if has_r2eff and not self.optimise_r2eff:
                pass

            # Do optimisation, if 'r2eff' + 'r2eff_err' is present and flag for forcing optimisation is raised.
            elif has_r2eff and self.optimise_r2eff:
                do_minimise = True

            # Optimise, if no R2eff and error is present.
            elif not has_r2eff:
                do_minimise = True

        # Dispersion model minimisation flags.
        else:
            do_minimise = True
            constraints = True
            # The minimisation algorithm to use. If the Jacobian and Hessian matrix have not been specified for fitting, 'simplex' should be used.
            min_algor = 'simplex'

        # Do the minimisation.
        if do_minimise:
            self.interpreter.minimise.execute(min_algor=min_algor,
                                              func_tol=self.opt_func_tol,
                                              max_iter=self.opt_max_iterations,
                                              constraints=constraints)

        # Model elimination.
        if self.eliminate:
            self.interpreter.eliminate()

        # Monte Carlo simulations.
        do_monte_carlo = False
        if model == MODEL_R2EFF:
            # The constraints flag.
            constraints = False

            # Both the Jacobian and Hessian matrix has been specified for exponential curve-fitting, allowing for the much faster algorithms to be used.
            min_algor = 'Newton'

            # Skip optimisation, if 'r2eff' + 'r2eff_err' is present and flag for forcing optimisation is not raised.
            if has_r2eff and not self.optimise_r2eff:
                pass

            # Do optimisation, if 'r2eff' + 'r2eff_err' is present and flag for forcing optimisation is raised.
            elif has_r2eff and self.optimise_r2eff:
                do_monte_carlo = True

            # Optimise, if no R2eff and error is present.
            elif not has_r2eff:
                do_monte_carlo = True

        elif self.mc_sim_all_models or len(self.models) < 2:
            do_monte_carlo = True
            # The constraints flag.
            constraints = True
            # The minimisation algorithm to use. If the Jacobian and Hessian matrix have not been specified for fitting, 'simplex' should be used.
            min_algor = 'simplex'

        # Error estimation by Monte Carlo simulations.
        if do_monte_carlo:
            # Set the number of Monte-Carlo simulations.
            monte_carlo_sim = self.mc_sim_num

            # If the number for exponential curve fitting has been set.
            if model == MODEL_R2EFF and self.exp_mc_sim_num != None:
                monte_carlo_sim = self.exp_mc_sim_num

            # When set to minus 1, estimation of the errors will be extracted from the covariance matrix.
            # This is HIGHLY likely to be wrong, but can be used in an initial test fase.
            if model == MODEL_R2EFF and self.exp_mc_sim_num == -1:
                # Print
                subsection(file=sys.stdout,
                           text="Estimating errors from Covariance matrix",
                           prespace=1)

                # Raise warning.
                text = 'Estimating errors from the Covariance matrix is highly likely to be "quite" wrong.  Use only with extreme care, and for initial rapid testing of your data.'
                warn(RelaxWarning(text))

                # Estimate errors
                self.interpreter.relax_disp.r2eff_err_estimate()
            else:
                self.interpreter.monte_carlo.setup(number=monte_carlo_sim)
                self.interpreter.monte_carlo.create_data()
                self.interpreter.monte_carlo.initial_values()
                self.interpreter.minimise.execute(
                    min_algor=min_algor,
                    func_tol=self.opt_func_tol,
                    max_iter=self.opt_max_iterations,
                    constraints=constraints)
                if self.eliminate:
                    self.interpreter.eliminate()
                self.interpreter.monte_carlo.error_analysis()
コード例 #22
0
ファイル: geometric.py プロジェクト: tlinnet/relax
def create_vector_dist(length=None,
                       symmetry=True,
                       file=None,
                       dir=None,
                       force=False):
    """Create a PDB representation of the vector distribution.

    @keyword length:    The length to set the vectors to in the PDB file.
    @type length:       float
    @keyword symmetry:  The symmetry flag which if set will create a second PDB chain 'B' which is the same as chain 'A' but with the vectors reversed.
    @type symmetry:     bool
    @keyword file:      The name of the PDB file to create.
    @type file:         str
    @keyword dir:       The name of the directory to place the PDB file into.
    @type dir:          str
    @keyword force:     Flag which if set will overwrite any pre-existing file.
    @type force:        bool
    """

    # Test if the current pipe exists.
    check_pipe()

    # Test if a structure has been loaded.
    if not hasattr(cdp, 'structure') or not cdp.structure.num_models() > 0:
        raise RelaxNoPdbError

    # Test if sequence data is loaded.
    if not exists_mol_res_spin_data():
        raise RelaxNoSequenceError

    # Test if unit vectors exist.
    vectors = False
    for interatom in interatomic_loop():
        if hasattr(interatom, 'vector'):
            vectors = True
            break
    if not vectors:
        raise RelaxNoVectorsError

    # Initialise.
    #############

    # Create the structural object.
    structure = Internal()

    # Add a structure.
    structure.add_molecule(name='vector_dist')

    # Alias the single molecule from the single model.
    mol = structure.structural_data[0].mol[0]

    # Initialise the residue and atom numbers.
    res_num = 1
    atom_num = 1

    # Centre of mass.
    #################

    # Calculate the centre of mass.
    R = pipe_centre_of_mass()

    # Increment the residue number.
    res_num = res_num + 1

    # The vectors.
    ##############

    # Loop over the interatomic data containers.
    for interatom in interatomic_loop():
        # Get the spins.
        spin1 = return_spin(spin_hash=interatom._spin_hash1)
        spin2 = return_spin(spin_hash=interatom._spin_hash2)

        # Skip deselected spin systems.
        if not spin1.select or not spin2.select:
            continue

        # Skip containers missing vectors.
        if not hasattr(interatom, 'vector'):
            continue

        # Scale the vector.
        vector = interatom.vector * length * 1e10

        # Add the first spin as the central atom.
        mol.atom_add(pdb_record='ATOM',
                     atom_num=atom_num,
                     atom_name=spin1.name,
                     res_name=spin1._res_name,
                     chain_id='A',
                     res_num=spin1._res_num,
                     pos=R,
                     segment_id=None,
                     element=spin1.element)

        # Add the second spin as the end atom.
        mol.atom_add(pdb_record='ATOM',
                     atom_num=atom_num + 1,
                     atom_name=spin2.name,
                     res_name=spin2._res_name,
                     chain_id='A',
                     res_num=spin2._res_num,
                     pos=R + vector,
                     segment_id=None,
                     element=spin2.element)

        # Connect the two atoms.
        mol.atom_connect(index1=atom_num - 1, index2=atom_num)

        # Increment the atom number.
        atom_num = atom_num + 2

    # Symmetry chain.
    if symmetry:
        # Loop over the interatomic data containers.
        for interatom in interatomic_loop():
            # Get the spins.
            spin1 = return_spin(spin_hash=interatom._spin_hash1)
            spin2 = return_spin(spin_hash=interatom._spin_hash2)

            # Skip deselected spin systems.
            if not spin1.select or not spin2.select:
                continue

            # Skip containers missing vectors.
            if not hasattr(interatom, 'vector'):
                continue

            # Scale the vector.
            vector = interatom.vector * length * 1e10

            # Add the first spin as the central atom.
            mol.atom_add(pdb_record='ATOM',
                         atom_num=atom_num,
                         atom_name=spin1.name,
                         res_name=spin1._res_name,
                         chain_id='B',
                         res_num=spin1._res_num,
                         pos=R,
                         segment_id=None,
                         element=spin1.element)

            # Add the second spin as the end atom.
            mol.atom_add(pdb_record='ATOM',
                         atom_num=atom_num + 1,
                         atom_name=spin2.name,
                         res_name=spin2._res_name,
                         chain_id='B',
                         res_num=spin2._res_num,
                         pos=R - vector,
                         segment_id=None,
                         element=spin2.element)

            # Connect the two atoms.
            mol.atom_connect(index1=atom_num - 1, index2=atom_num)

            # Increment the atom number.
            atom_num = atom_num + 2

    # Create the PDB file.
    ######################

    # Print out.
    print("\nGenerating the PDB file.")

    # Open the PDB file for writing.
    tensor_pdb_file = open_write_file(file, dir, force=force)

    # Write the data.
    structure.write_pdb(tensor_pdb_file)

    # Close the file.
    tensor_pdb_file.close()

    # Add the file to the results file list.
    if not hasattr(cdp, 'result_files'):
        cdp.result_files = []
    if dir == None:
        dir = getcwd()
    cdp.result_files.append([
        'vector_dist_pdb', 'Vector distribution PDB',
        get_file_path(file, dir)
    ])
    status.observers.result_file.notify()
コード例 #23
0
def write(file=None, dir=None, version='3.1', force=False):
    """Create a BMRB NMR-STAR formatted file.

    @keyword file:      The name of the file to create or a file object.
    @type file:         str or file object
    @keyword dir:       The optional directory to place the file into.  If set to 'pipe_name', then it will be placed in a directory with the same name as the current data pipe.
    @type dir:          str or None
    @keyword version:   The NMR-STAR version to create.  This can be either '2.1', '3.0', or '3.1'.
    @type version:      str
    @keyword force:     A flag which if True will allow a currently existing file to be overwritten.
    @type force:        bool
    """

    # Test if bmrblib is installed.
    if not dep_check.bmrblib_module:
        raise RelaxNoModuleInstallError('BMRB library', 'bmrblib')

    # Test if the current data pipe exists.
    pipe_name = cdp_name()
    if not pipe_name:
        raise RelaxNoPipeError

    # Check the file name.
    if file == None:
        raise RelaxError("The file name must be specified.")

    # A file object.
    if isinstance(file, str):
        # The special data pipe name directory.
        if dir == 'pipe_name':
            dir = pipe_name

        # Get the full file path.
        file = get_file_path(file, dir)

        # Fail if the file already exists and the force flag is False.
        if access(file, F_OK) and not force:
            raise RelaxFileOverwriteError(file, 'force flag')

        # Print out.
        print("Opening the file '%s' for writing." % file)

        # Create the directories.
        mkdir_nofail(dir, verbosity=0)

    # Get the info box.
    info = Info_box()

    # Add the relax citations.
    for id, key in zip(['relax_ref1', 'relax_ref2'],
                       ['dAuvergneGooley08a', 'dAuvergneGooley08b']):
        # Alias the bib entry.
        bib = info.bib[key]

        # Add.
        exp_info.citation(cite_id=id,
                          authors=bib.author2,
                          doi=bib.doi,
                          pubmed_id=bib.pubmed_id,
                          full_citation=bib.cite_short(doi=False, url=False),
                          title=bib.title,
                          status=bib.status,
                          type=bib.type,
                          journal_abbrev=bib.journal,
                          journal_full=bib.journal_full,
                          volume=bib.volume,
                          issue=bib.number,
                          page_first=bib.page_first,
                          page_last=bib.page_last,
                          year=bib.year)

    # Add the relax software package.
    exp_info.software(name=exp_info.SOFTWARE['relax'].name,
                      version=version_full(),
                      vendor_name=exp_info.SOFTWARE['relax'].authors,
                      url=exp_info.SOFTWARE['relax'].url,
                      cite_ids=['relax_ref1', 'relax_ref2'],
                      tasks=exp_info.SOFTWARE['relax'].tasks)

    # Execute the specific BMRB writing code.
    api = return_api(pipe_name=pipe_name)
    api.bmrb_write(file, version=version)

    # Add the file to the results file list.
    if isinstance(file, str):
        add_result_file(type='text', label='BMRB', file=file)
コード例 #24
0
ファイル: bmrb.py プロジェクト: belisario21/relax_trunk
def write(file=None, dir=None, version='3.1', force=False):
    """Create a BMRB NMR-STAR formatted file.

    @keyword file:      The name of the file to create or a file object.
    @type file:         str or file object
    @keyword dir:       The optional directory to place the file into.  If set to 'pipe_name', then it will be placed in a directory with the same name as the current data pipe.
    @type dir:          str or None
    @keyword version:   The NMR-STAR version to create.  This can be either '2.1', '3.0', or '3.1'.
    @type version:      str
    @keyword force:     A flag which if True will allow a currently existing file to be overwritten.
    @type force:        bool
    """

    # Test if bmrblib is installed.
    if not dep_check.bmrblib_module:
        raise RelaxNoModuleInstallError('BMRB library', 'bmrblib')

    # Test if the current data pipe exists.
    pipe_name = cdp_name()
    if not pipe_name:
        raise RelaxNoPipeError

    # Check the file name.
    if file == None:
        raise RelaxError("The file name must be specified.")

    # A file object.
    if isinstance(file, str):
        # The special data pipe name directory.
        if dir == 'pipe_name':
            dir = pipe_name

        # Get the full file path.
        file = get_file_path(file, dir)

        # Fail if the file already exists and the force flag is False.
        if access(file, F_OK) and not force:
            raise RelaxFileOverwriteError(file, 'force flag')

        # Print out.
        print("Opening the file '%s' for writing." % file)

        # Create the directories.
        mkdir_nofail(dir, verbosity=0)

    # Specific results writing function.
    write_function = specific_analyses.setup.get_specific_fn('bmrb_write', ds[pipe_name].pipe_type)

    # Get the info box.
    info = Info_box()

    # Add the relax citations.
    for id, key in zip(['relax_ref1', 'relax_ref2'], ['dAuvergneGooley08a', 'dAuvergneGooley08b']):
        # Alias the bib entry.
        bib = info.bib[key]

        # Add.
        exp_info.citation(cite_id=id, authors=bib.author2, doi=bib.doi, pubmed_id=bib.pubmed_id, full_citation=bib.cite_short(doi=False, url=False), title=bib.title, status=bib.status, type=bib.type, journal_abbrev=bib.journal, journal_full=bib.journal_full, volume=bib.volume, issue=bib.number, page_first=bib.page_first, page_last=bib.page_last, year=bib.year)

    # Add the relax software package.
    exp_info.software(name=exp_info.SOFTWARE['relax'].name, version=version_full(), vendor_name=exp_info.SOFTWARE['relax'].authors, url=exp_info.SOFTWARE['relax'].url, cite_ids=['relax_ref1', 'relax_ref2'], tasks=exp_info.SOFTWARE['relax'].tasks)

    # Execute the specific BMRB writing code.
    write_function(file, version=version)

    # Add the file to the results file list.
    if isinstance(file, str):
        add_result_file(type='text', label='BMRB', file=file)
コード例 #25
0
def write_xy(format='grace', x_data_type='res_num', y_data_type=None, spin_id=None, plot_data='value', norm_type='first', file=None, dir=None, force=False, norm=True):
    """Writing data to a file.

    @keyword format:        The specific backend to use.  The currently support backends are 'grace'.
    @type format:           str
    @keyword x_data_type:   The category of the X-axis data.
    @type x_data_type:      str
    @keyword y_data_type:   The category of the Y-axis data.
    @type y_data_type:      str
    @keyword spin_id:       The spin identification string.
    @type spin_id:          str
    @keyword plot_data:     The type of the plotted data, one of 'value', 'error', or 'sim'.
    @type plot_data:        str
    @keyword norm_type:     The point to normalise to 1.  This can be 'first' or 'last'.
    @type norm_type:        str
    @keyword file:          The name of the Grace file to create.
    @type file:             str
    @keyword dir:           The optional directory to place the file into.
    @type dir:              str
    @param force:           Boolean argument which if True causes the file to be overwritten if it already exists.
    @type force:            bool
    @keyword norm:          The normalisation flag which if set to True will cause all graphs to be normalised to a starting value of 1.
    @type norm:             bool
    """

    # Checks.
    check_pipe()
    check_mol_res_spin_data()

    # Test if the plot_data argument is one of 'value', 'error', or 'sim'.
    if plot_data not in ['value', 'error', 'sim']:
        raise RelaxError("The plot data argument " + repr(plot_data) + " must be set to either 'value', 'error', 'sim'.")

    # Test if the simulations exist.
    if plot_data == 'sim' and not hasattr(cdp, 'sim_number'):
        raise RelaxNoSimError

    # Open the file for writing.
    file_path = get_file_path(file, dir)
    file = open_write_file(file, dir, force)

    # Get the data.
    data, set_names, graph_type = assemble_data(spin_id, x_data_name=x_data_type, y_data_name=y_data_type, plot_data=plot_data)

    # Convert the graph type.
    if graph_type == 'X,Y':
        graph_type = 'xy'
    elif graph_type == 'X,Y,dX':
        graph_type = 'xydx'
    elif graph_type == 'X,Y,dY':
        graph_type = 'xydy'
    elif graph_type == 'X,Y,dX,dY':
        graph_type = 'xydxdy'

    # No data, so close the empty file and exit.
    if not len(data) or not len(data[0]) or not len(data[0][0]):
        warn(RelaxWarning("No data could be found, creating an empty file."))
        file.close()
        return

    # Get the axis information.
    data_type = [x_data_type, y_data_type]
    seq_type, axis_labels = axis_setup(data_type=data_type, norm=norm)

    # Write the header.
    write_xy_header(format=format, file=file, data_type=data_type, seq_type=seq_type, sets=[len(data[0])], set_names=[set_names], axis_labels=[axis_labels], norm=[norm])

    # Write the data.
    write_xy_data(format=format, data=data, file=file, graph_type=graph_type, norm_type=norm_type, norm=[norm])

    # Close the file.
    file.close()

    # Add the file to the results file list.
    label = None
    if format == 'grace':
        label = 'Grace'
    add_result_file(type=format, label='Grace', file=file_path)
コード例 #26
0
ファイル: geometric.py プロジェクト: tlinnet/relax
def create_rotor_pdb(file=None,
                     dir=None,
                     rotor_angle=None,
                     axis=None,
                     axis_pt=True,
                     centre=None,
                     span=2e-9,
                     blade_length=5e-10,
                     force=False,
                     staggered=False):
    """Create a PDB representation of a rotor motional model.

    @keyword file:          The name of the PDB file to create.
    @type file:             str
    @keyword dir:           The name of the directory to place the PDB file into.
    @type dir:              str
    @keyword rotor_angle:   The angle of the rotor motion in degrees.
    @type rotor_angle:      float
    @keyword axis:          The vector defining the rotor axis.
    @type axis:             numpy rank-1, 3D array
    @keyword axis_pt:       A point lying anywhere on the rotor axis.  This is used to define the position of the axis in 3D space.
    @type axis_pt:          numpy rank-1, 3D array
    @keyword centre:        The central point of the representation.  If this point is not on the rotor axis, then the closest point on the axis will be used for the centre.
    @type centre:           numpy rank-1, 3D array
    @keyword span:          The distance from the central point to the rotor blades (meters).
    @type span:             float
    @keyword blade_length:  The length of the representative rotor blades.
    @type blade_length:     float
    @keyword force:         A flag which if set will overwrite any pre-existing file.
    @type force:            bool
    @keyword staggered:     A flag which if True will cause the rotor blades to be staggered.  This is used to avoid blade overlap.
    @type staggered:        bool
    """

    # Test if the current pipe exists.
    check_pipe()

    # Convert the angle to radians.
    rotor_angle = rotor_angle / 360.0 * 2.0 * pi

    # Create the structural object.
    structure = Internal()

    # Generate the rotor object.
    rotor(structure=structure,
          rotor_angle=rotor_angle,
          axis=axis,
          axis_pt=axis_pt,
          centre=centre,
          span=span,
          blade_length=blade_length,
          staggered=staggered)

    # Print out.
    print("\nGenerating the PDB file.")

    # Open the PDB file for writing.
    tensor_pdb_file = open_write_file(file, dir, force=force)

    # Write the data.
    structure.write_pdb(tensor_pdb_file)

    # Close the file.
    tensor_pdb_file.close()

    # Add the file to the results file list.
    if not hasattr(cdp, 'result_files'):
        cdp.result_files = []
    if dir == None:
        dir = getcwd()
    cdp.result_files.append(
        ['rotor_pdb', 'Rotor PDB',
         get_file_path(file, dir)])
    status.observers.result_file.notify()
コード例 #27
0
ファイル: geometric.py プロジェクト: belisario21/relax_trunk
def create_vector_dist(length=None, symmetry=True, file=None, dir=None, force=False):
    """Create a PDB representation of the vector distribution.

    @keyword length:    The length to set the vectors to in the PDB file.
    @type length:       float
    @keyword symmetry:  The symmetry flag which if set will create a second PDB chain 'B' which is the same as chain 'A' but with the vectors reversed.
    @type symmetry:     bool
    @keyword file:      The name of the PDB file to create.
    @type file:         str
    @keyword dir:       The name of the directory to place the PDB file into.
    @type dir:          str
    @keyword force:     Flag which if set will overwrite any pre-existing file.
    @type force:        bool
    """

    # Test if the current pipe exists.
    pipes.test()

    # Test if a structure has been loaded.
    if not hasattr(cdp, 'structure') or not cdp.structure.num_models() > 0:
        raise RelaxNoPdbError

    # Test if sequence data is loaded.
    if not exists_mol_res_spin_data():
        raise RelaxNoSequenceError

    # Test if unit vectors exist.
    vectors = False
    for interatom in interatomic_loop():
        if hasattr(interatom, 'vector'):
            vectors = True
            break
    if not vectors:
        raise RelaxNoVectorsError


    # Initialise.
    #############

    # Create the structural object.
    structure = Internal()

    # Add a structure.
    structure.add_molecule(name='vector_dist')

    # Alias the single molecule from the single model.
    mol = structure.structural_data[0].mol[0]

    # Initialise the residue and atom numbers.
    res_num = 1
    atom_num = 1


    # Centre of mass.
    #################

    # Calculate the centre of mass.
    R = pipe_centre_of_mass()

    # Increment the residue number.
    res_num = res_num + 1


    # The vectors.
    ##############

    # Loop over the interatomic data containers.
    for interatom in interatomic_loop():
        # Get the spins.
        spin1 = return_spin(interatom.spin_id1)
        spin2 = return_spin(interatom.spin_id2)

        # Skip deselected spin systems.
        if not spin1.select or not spin2.select:
            continue

        # Skip containers missing vectors.
        if not hasattr(interatom, 'vector'):
            continue

        # Scale the vector.
        vector = interatom.vector * length * 1e10

        # Add the first spin as the central atom.
        mol.atom_add(pdb_record='ATOM', atom_num=atom_num, atom_name=spin1.name, res_name=spin1._res_name, chain_id='A', res_num=spin1._res_num, pos=R, segment_id=None, element=spin1.element)

        # Add the second spin as the end atom.
        mol.atom_add(pdb_record='ATOM', atom_num=atom_num+1, atom_name=spin2.name, res_name=spin2._res_name, chain_id='A', res_num=spin2._res_num, pos=R+vector, segment_id=None, element=spin2.element)

        # Connect the two atoms.
        mol.atom_connect(index1=atom_num-1, index2=atom_num)

        # Increment the atom number.
        atom_num = atom_num + 2

    # Symmetry chain.
    if symmetry:
        # Loop over the interatomic data containers.
        for interatom in interatomic_loop():
            # Get the spins.
            spin1 = return_spin(interatom.spin_id1)
            spin2 = return_spin(interatom.spin_id2)

            # Skip deselected spin systems.
            if not spin1.select or not spin2.select:
                continue

            # Skip containers missing vectors.
            if not hasattr(interatom, 'vector'):
                continue

            # Scale the vector.
            vector = interatom.vector * length * 1e10

            # Add the first spin as the central atom.
            mol.atom_add(pdb_record='ATOM', atom_num=atom_num, atom_name=spin1.name, res_name=spin1._res_name, chain_id='B', res_num=spin1._res_num, pos=R, segment_id=None, element=spin1.element)

            # Add the second spin as the end atom.
            mol.atom_add(pdb_record='ATOM', atom_num=atom_num+1, atom_name=spin2.name, res_name=spin2._res_name, chain_id='B', res_num=spin2._res_num, pos=R-vector, segment_id=None, element=spin2.element)

            # Connect the two atoms.
            mol.atom_connect(index1=atom_num-1, index2=atom_num)

            # Increment the atom number.
            atom_num = atom_num + 2


    # Create the PDB file.
    ######################

    # Print out.
    print("\nGenerating the PDB file.")

    # Open the PDB file for writing.
    tensor_pdb_file = open_write_file(file, dir, force=force)

    # Write the data.
    structure.write_pdb(tensor_pdb_file)

    # Close the file.
    tensor_pdb_file.close()

    # Add the file to the results file list.
    if not hasattr(cdp, 'result_files'):
        cdp.result_files = []
    if dir == None:
        dir = getcwd()
    cdp.result_files.append(['vector_dist_pdb', 'Vector distribution PDB', get_file_path(file, dir)])
    status.observers.result_file.notify()
コード例 #28
0
ファイル: main.py プロジェクト: belisario21/relax_trunk
def web_of_motion(file=None, dir=None, models=None, force=False):
    """Create a PDB representation of the motion between a set of models.

    This will create a PDB file containing the atoms of all models, with identical atoms links using CONECT records.  This function only supports the internal structural object.

    @keyword file:          The name of the PDB file to write.
    @type file:             str
    @keyword dir:           The directory where the PDB file will be placed.  If set to None, then the file will be placed in the current directory.
    @type dir:              str or None
    @keyword models:        The optional list of models to restrict this to.
    @type models:           list of int or None
    @keyword force:         The force flag which if True will cause the file to be overwritten.
    @type force:            bool
    """

    # Test if the current data pipe exists.
    pipes.test()

    # Test if the structure exists.
    if not hasattr(cdp, 'structure') or not cdp.structure.num_models() or not cdp.structure.num_molecules():
        raise RelaxNoPdbError

    # Validate the models.
    cdp.structure.validate_models()

    # Initialise the structural object.
    web = Internal()

    # The model list.
    if models == None:
        models = []
        for k in range(len(cdp.structure.structural_data)):
            models.append(cdp.structure.structural_data[k].num)

    # Loop over the molecules.
    for i in range(len(cdp.structure.structural_data[0].mol)):
        # Alias the molecule of the first model.
        mol1 = cdp.structure.structural_data[0].mol[i]

        # Loop over the atoms.
        for j in range(len(mol1.atom_name)):
            # Loop over the models.
            for k in range(len(cdp.structure.structural_data)):
                # Skip the model.
                if cdp.structure.structural_data[k].num not in models:
                    continue

                # Alias.
                mol = cdp.structure.structural_data[k].mol[i]

                # Add the atom.
                web.add_atom(mol_name=mol1.mol_name, atom_name=mol.atom_name[j], res_name=mol.res_name[j], res_num=mol.res_num[j], pos=[mol.x[j], mol.y[j], mol.z[j]], element=mol.element[j], chain_id=mol.chain_id[j], segment_id=mol.seg_id[j], pdb_record=mol.pdb_record[j])

            # Loop over the models again, this time twice.
            for k in range(len(models)):
                for l in range(len(models)):
                    # Skip identical atoms.
                    if k == l:
                        continue

                    # The atom index.
                    index1 = j*len(models) + k
                    index2 = j*len(models) + l

                    # Connect to the previous atoms.
                    web.connect_atom(mol_name=mol1.mol_name, index1=index1, index2=index2)

    # Append the PDB extension if needed.
    if isinstance(file, str):
        # The file path.
        file = get_file_path(file, dir)

        # Add '.pdb' to the end of the file path if it isn't there yet.
        if not search(".pdb$", file):
            file += '.pdb'

    # Open the file for writing.
    file = open_write_file(file, force=force)

    # Write the structure.
    web.write_pdb(file)
コード例 #29
0
def macro_write(data_type=None, style="classic", colour_start_name=None, colour_start_rgb=None, colour_end_name=None, colour_end_rgb=None, colour_list=None, file=None, dir=None, force=False):
    """Create a PyMOL macro file.

    @keyword data_type:         The data type to map to the structure.
    @type data_type:            str
    @keyword style:             The style of the macro.
    @type style:                str
    @keyword colour_start_name: The name of the starting colour of the linear gradient.
    @type colour_start_name:    str
    @keyword colour_start_rgb:  The RGB array starting colour of the linear gradient.
    @type colour_start_rgb:     RBG colour array (len 3 with vals from 0 to 1)
    @keyword colour_end_name:   The name of the ending colour of the linear gradient.
    @type colour_end_name:      str
    @keyword colour_end_rgb:    The RGB array ending colour of the linear gradient.
    @type colour_end_rgb:       RBG colour array (len 3 with vals from 0 to 1)
    @keyword colour_list:       The colour list to search for the colour names.  Can be either 'molmol' or 'x11'.
    @type colour_list:          str or None
    @keyword file:              The name of the macro file to create.
    @type file:                 str
    @keyword dir:               The name of the directory to place the macro file into.
    @type dir:                  str
    @keyword force:             Flag which if set to True will cause any pre-existing file to be overwritten.
    @type force:                bool
    """

    # Test if the current data pipe exists.
    pipes.test()

    # Test if sequence data exists.
    if not exists_mol_res_spin_data():
        raise RelaxNoSequenceError

    # Check the arguments.
    if colour_start_name != None and colour_start_rgb != None:
        raise RelaxError("The starting colour name and RGB colour array cannot both be supplied.")
    if colour_end_name != None and colour_end_rgb != None:
        raise RelaxError("The ending colour name and RGB colour array cannot both be supplied.")

    # Merge the colour args.
    if colour_start_name != None:
        colour_start = colour_start_name
    else:
        colour_start = colour_start_rgb
    if colour_end_name != None:
        colour_end = colour_end_name
    else:
        colour_end = colour_end_rgb

    # Create the macro.
    commands = create_macro(data_type=data_type, style=style, colour_start=colour_start, colour_end=colour_end, colour_list=colour_list)

    # File name.
    if file == None:
        file = data_type + '.pml'

    # Open the file for writing.
    file_path = get_file_path(file, dir)
    file = open_write_file(file, dir, force)

    # Loop over the commands and write them.
    for command in commands:
        file.write(command + "\n")

    # Close the file.
    file.close()

    # Add the file to the results file list.
    add_result_file(type='pymol', label='PyMOL', file=file_path)
コード例 #30
0
ファイル: gnuplot.py プロジェクト: tlinnet/relax
def correlation_matrix(matrix=None,
                       labels=None,
                       file=None,
                       dir=None,
                       force=False):
    """Gnuplot plotting function for representing correlation matrices.

    @keyword matrix:    The correlation matrix.  This must be a square matrix.
    @type matrix:       numpy rank-2 array.
    @keyword labels:    The labels for each element of the matrix.  The same label is assumed for each [i, i] pair in the matrix.
    @type labels:       list of str
    @keyword file:      The name of the file to create.
    @type file:         str
    @keyword dir:       The directory where the PDB file will be placed.  If set to None, then the file will be placed in the current directory.
    @type dir:          str or None
    """

    # The dimensions.
    n = len(matrix)

    # Generate the text file for loading into gnuplot.
    text.correlation_matrix(matrix=matrix,
                            labels=labels,
                            file=file,
                            dir=dir,
                            force=force)

    # The script file name with the extension swapped.
    file_name = swap_extension(file=file, ext='gnu')

    # Open the script file for writing.
    output = open_write_file(file_name, dir=dir, force=force)

    # Gnuplot script setup.
    output.write("#!/usr/bin/env gnuplot\n\n")

    # Set up the terminal type and make the plot square.
    output.write("# Set up the terminal type and make the plot square.\n")
    output.write(
        "set terminal postscript eps size 10,10 enhanced color font 'Helvetica,20' linewidth 0.1\n"
    )
    output.write("set size square\n")

    # The colour map.
    output.write("\n# Blue-red colour map.\n")
    output.write("set palette model RGB\n")
    output.write("set palette defined\n")

    # The labels.
    if labels != None:
        output.write("\n# Labels.\n")
        for axis in ['x', 'y']:
            output.write("set %stics out " % axis)
            if axis == 'x':
                output.write("rotate ")
            output.write("font \",8\" (")
            for i in range(n):
                if i != 0:
                    output.write(", ")
                output.write("\"%s\" %s" % (format_enhanced(labels[i]), i))
            output.write(")\n")

    # Output to EPS.
    output.write("\n# Output to EPS.\n")
    output.write("set output \"%s.eps\"\n" % file_root(file))

    # Load and show the text data.
    output.write("\n# Load and show the text data\n")
    output.write("plot \"%s\" matrix with image\n" % file)

    # Close the file.
    output.close()

    # Make the script executable.
    chmod(get_file_path(file_name=file_name, dir=dir),
          S_IRWXU | S_IRGRP | S_IROTH)
コード例 #31
0
ファイル: relax_disp.py プロジェクト: pombredanne/relax
    def optimise(self, model=None, model_path=None):
        """Optimise the model, taking model nesting into account.

        @keyword model:         The model to be optimised.
        @type model:            str
        @keyword model_path:    The folder name for the model, where possible spaces has been replaced with underscore.
        @type model_path:       str
        """

        # Printout. 
        section(file=sys.stdout, text="Optimisation", prespace=2)

        # Deselect insignificant spins.
        if model not in [MODEL_R2EFF, MODEL_NOREX]:
            self.interpreter.relax_disp.insignificance(level=self.insignificance)

        # Speed-up grid-search by using minium R2eff value.
        if self.set_grid_r20 and model != MODEL_R2EFF:
            self.interpreter.relax_disp.r20_from_min_r2eff(force=True)

        # Use pre-run results as the optimisation starting point.
        # Test if file exists.
        if self.pre_run_dir:
            path = self.pre_run_dir + sep + model_path
            # File path.
            file_path = get_file_path('results', path)

            # Test if the file exists and determine the compression type.
            try:
                compress_type, file_path = determine_compression(file_path)
                res_file_exists = True

            except RelaxFileError:
                res_file_exists = False

        if self.pre_run_dir and res_file_exists:
            self.pre_run_parameters(model=model, model_path=model_path)

        # Otherwise use the normal nesting check and grid search if not nested.
        else:
            # Nested model simplification.
            nested = self.nesting(model=model)

            # Otherwise use a grid search of default values to start optimisation with.
            if not nested:
                # Grid search.
                if self.grid_inc:
                    self.interpreter.minimise.grid_search(inc=self.grid_inc)

                # Default values.
                else:
                    # The standard parameters.
                    for param in MODEL_PARAMS[model]:
                        self.interpreter.value.set(param=param, index=None)

                    # The optional R1 parameter.
                    if is_r1_optimised(model=model):
                        self.interpreter.value.set(param='r1', index=None)

        # 'R2eff' model minimisation flags.
        do_minimise = False
        if model == MODEL_R2EFF:
            # The constraints flag.
            constraints = False

            # The minimisation algorithm to use.
            # Both the Jacobian and Hessian matrix has been specified for exponential curve-fitting, allowing for the much faster algorithms to be used.
            min_algor = 'Newton'

            # Check if all spins contains 'r2eff and it associated error.
            has_r2eff = False

            # Loop over all spins.
            for cur_spin, spin_id in spin_loop(return_id=True, skip_desel=True):
                # Check 'r2eff'
                if hasattr(cur_spin, 'r2eff') and hasattr(cur_spin, 'r2eff_err'):
                    has_r2eff = True
                else:
                    has_r2eff = False
                    break

            # Skip optimisation, if 'r2eff' + 'r2eff_err' is present and flag for forcing optimisation is not raised.
            if has_r2eff and not self.optimise_r2eff:
                pass

            # Do optimisation, if 'r2eff' + 'r2eff_err' is present and flag for forcing optimisation is raised.
            elif has_r2eff and self.optimise_r2eff:
                do_minimise = True

            # Optimise, if no R2eff and error is present.
            elif not has_r2eff:
                do_minimise = True

        # Dispersion model minimisation flags.
        else:
            do_minimise = True
            constraints = True
            # The minimisation algorithm to use. If the Jacobian and Hessian matrix have not been specified for fitting, 'simplex' should be used.
            min_algor = 'simplex'

        # Do the minimisation.
        if do_minimise:
            self.interpreter.minimise.execute(min_algor=min_algor, func_tol=self.opt_func_tol, max_iter=self.opt_max_iterations, constraints=constraints)

        # Model elimination.
        if self.eliminate:
            self.interpreter.eliminate()

        # Monte Carlo simulations.
        do_monte_carlo = False
        if model == MODEL_R2EFF:
            # The constraints flag.
            constraints = False

            # Both the Jacobian and Hessian matrix has been specified for exponential curve-fitting, allowing for the much faster algorithms to be used.
            min_algor = 'Newton'

            # Skip optimisation, if 'r2eff' + 'r2eff_err' is present and flag for forcing optimisation is not raised.
            if has_r2eff and not self.optimise_r2eff:
                pass

            # Do optimisation, if 'r2eff' + 'r2eff_err' is present and flag for forcing optimisation is raised.
            elif has_r2eff and self.optimise_r2eff:
                do_monte_carlo = True

            # Optimise, if no R2eff and error is present.
            elif not has_r2eff:
                do_monte_carlo = True

        elif self.mc_sim_all_models or len(self.models) < 2:
            do_monte_carlo = True
            # The constraints flag.
            constraints = True
            # The minimisation algorithm to use. If the Jacobian and Hessian matrix have not been specified for fitting, 'simplex' should be used.
            min_algor = 'simplex'

        # Error estimation by Monte Carlo simulations.
        if do_monte_carlo:
            # Set the number of Monte-Carlo simulations.
            monte_carlo_sim = self.mc_sim_num

            # If the number for exponential curve fitting has been set.
            if model == MODEL_R2EFF and self.exp_mc_sim_num != None:
                monte_carlo_sim = self.exp_mc_sim_num

            # When set to minus 1, estimation of the errors will be extracted from the covariance matrix.
            # This is HIGHLY likely to be wrong, but can be used in an initial test fase.
            if model == MODEL_R2EFF and self.exp_mc_sim_num == -1:
                # Print
                subsection(file=sys.stdout, text="Estimating errors from Covariance matrix", prespace=1)

                # Raise warning.
                text = 'Estimating errors from the Covariance matrix is highly likely to be "quite" wrong.  Use only with extreme care, and for initial rapid testing of your data.'
                warn(RelaxWarning(text))

                # Estimate errors
                self.interpreter.relax_disp.r2eff_err_estimate()
            else:
                self.interpreter.monte_carlo.setup(number=monte_carlo_sim)
                self.interpreter.monte_carlo.create_data()
                self.interpreter.monte_carlo.initial_values()
                self.interpreter.minimise.execute(min_algor=min_algor, func_tol=self.opt_func_tol, max_iter=self.opt_max_iterations, constraints=constraints)
                if self.eliminate:
                    self.interpreter.eliminate()
                self.interpreter.monte_carlo.error_analysis()
コード例 #32
0
ファイル: gnuplot.py プロジェクト: pombredanne/relax
def correlation_matrix(matrix=None, labels=None, file=None, dir=None, force=False):
    """Gnuplot plotting function for representing correlation matrices.

    @keyword matrix:    The correlation matrix.  This must be a square matrix.
    @type matrix:       numpy rank-2 array.
    @keyword labels:    The labels for each element of the matrix.  The same label is assumed for each [i, i] pair in the matrix.
    @type labels:       list of str
    @keyword file:      The name of the file to create.
    @type file:         str
    @keyword dir:       The directory where the PDB file will be placed.  If set to None, then the file will be placed in the current directory.
    @type dir:          str or None
    """

    # The dimensions.
    n = len(matrix)

    # Generate the text file for loading into gnuplot.
    text.correlation_matrix(matrix=matrix, labels=labels, file=file, dir=dir, force=force)

    # The script file name with the extension swapped.
    file_name = swap_extension(file=file, ext='gnu')

    # Open the script file for writing.
    output = open_write_file(file_name, dir=dir, force=force)

    # Gnuplot script setup. 
    output.write("#!/usr/bin/env gnuplot\n\n")


    # Set up the terminal type and make the plot square.
    output.write("# Set up the terminal type and make the plot square.\n")
    output.write("set terminal postscript eps size 10,10 enhanced color font 'Helvetica,20' linewidth 0.1\n")
    output.write("set size square\n")

    # The colour map.
    output.write("\n# Blue-red colour map.\n")
    output.write("set palette model RGB\n")
    output.write("set palette defined\n")

    # The labels.
    if labels != None:
        output.write("\n# Labels.\n")
        for axis in ['x', 'y']:
            output.write("set %stics out " % axis)
            if axis == 'x':
                output.write("rotate ")
            output.write("font \",8\" (")
            for i in range(n):
                if i != 0:
                    output.write(", ")
                output.write("\"%s\" %s" % (format_enhanced(labels[i]), i))
            output.write(")\n")

    # Output to EPS.
    output.write("\n# Output to EPS.\n")
    output.write("set output \"%s.eps\"\n" % file_root(file))

    # Load and show the text data.
    output.write("\n# Load and show the text data\n")
    output.write("plot \"%s\" matrix with image\n" % file)

    # Close the file.
    output.close()

    # Make the script executable.
    chmod(get_file_path(file_name=file_name, dir=dir), S_IRWXU|S_IRGRP|S_IROTH)
コード例 #33
0
def macro_write(data_type=None,
                style="classic",
                colour_start_name=None,
                colour_start_rgb=None,
                colour_end_name=None,
                colour_end_rgb=None,
                colour_list=None,
                file=None,
                dir=None,
                force=False):
    """Create a Molmol macro.

    @keyword data_type:         The data type to map to the structure.
    @type data_type:            str
    @keyword style:             The style of the macro.
    @type style:                str
    @keyword colour_start_name: The name of the starting colour of the linear gradient.
    @type colour_start_name:    str
    @keyword colour_start_rgb:  The RGB array starting colour of the linear gradient.
    @type colour_start_rgb:     RBG colour array (len 3 with vals from 0 to 1)
    @keyword colour_end_name:   The name of the ending colour of the linear gradient.
    @type colour_end_name:      str
    @keyword colour_end_rgb:    The RGB array ending colour of the linear gradient.
    @type colour_end_rgb:       RBG colour array (len 3 with vals from 0 to 1)
    @keyword colour_list:       The colour list to search for the colour names.  Can be either 'molmol' or 'x11'.
    @type colour_list:          str or None
    @keyword file:              The name of the macro file to create.
    @type file:                 str
    @keyword dir:               The name of the directory to place the macro file into.
    @type dir:                  str
    @keyword force:             Flag which if set to True will cause any pre-existing file to be overwritten.
    @type force:                bool
    """

    # Test if the current data pipe exists.
    check_pipe()

    # Test if sequence data exists.
    if not exists_mol_res_spin_data():
        raise RelaxNoSequenceError

    # Check the arguments.
    if colour_start_name != None and colour_start_rgb != None:
        raise RelaxError(
            "The starting colour name and RGB colour array cannot both be supplied."
        )
    if colour_end_name != None and colour_end_rgb != None:
        raise RelaxError(
            "The ending colour name and RGB colour array cannot both be supplied."
        )

    # Merge the colour args.
    if colour_start_name != None:
        colour_start = colour_start_name
    else:
        colour_start = colour_start_rgb
    if colour_end_name != None:
        colour_end = colour_end_name
    else:
        colour_end = colour_end_rgb

    # Create the macro.
    commands = create_macro(data_type=data_type,
                            style=style,
                            colour_start=colour_start,
                            colour_end=colour_end,
                            colour_list=colour_list)

    # File name.
    if file == None:
        file = data_type + '.mac'

    # Open the file for writing.
    file_path = get_file_path(file, dir)
    file = open_write_file(file, dir, force)

    # Loop over the commands and write them.
    for command in commands:
        file.write(command + "\n")

    # Close the file.
    file.close()

    # Add the file to the results file list.
    add_result_file(type='molmol', label='Molmol', file=file_path)
コード例 #34
0
ファイル: main.py プロジェクト: belisario21/relax_trunk
def create_diff_tensor_pdb(scale=1.8e-6, file=None, dir=None, force=False):
    """Create the PDB representation of the diffusion tensor.

    @keyword scale: The scaling factor for the diffusion tensor.
    @type scale:    float
    @keyword file:  The name of the PDB file to create.
    @type file:     str
    @keyword dir:   The name of the directory to place the PDB file into.
    @type dir:      str
    @keyword force: Flag which if set to True will overwrite any pre-existing file.
    @type force:    bool
    """

    # Test if the current data pipe exists.
    pipes.test()

    # Calculate the centre of mass.
    com = pipe_centre_of_mass()

    # Create the structural object.
    structure = Internal()

    # Create an array of data pipes to loop over (hybrid support).
    if cdp.pipe_type == 'hybrid':
        pipe_list = cdp.hybrid_pipes
    else:
        pipe_list = [pipes.cdp_name()]

    # The molecule names.
    if cdp.pipe_type == 'hybrid':
        mol_names = []
        for pipe in pipe_list:
            mol_names.append('diff_tensor_' % pipe)
    else:
        mol_names = ['diff_tensor']

    # Loop over the pipes.
    for pipe_index in range(len(pipe_list)):
        # Get the pipe container.
        pipe = pipes.get_pipe(pipe_list[pipe_index])

        # Test if the diffusion tensor data is loaded.
        if not hasattr(pipe, 'diff_tensor'):
            raise RelaxNoTensorError('diffusion')

        # Test if a structure has been loaded.
        if not hasattr(cdp, 'structure'):
            raise RelaxNoPdbError

        # Add a new structure.
        structure.add_molecule(name=mol_names[pipe_index])

        # Alias the single molecule from the single model.
        mol = structure.get_molecule(mol_names[pipe_index])

        # The diffusion tensor type.
        diff_type = pipe.diff_tensor.type
        if diff_type == 'spheroid':
            diff_type = pipe.diff_tensor.spheroid_type

        # Simulation info.
        sim_num = None
        if hasattr(pipe.diff_tensor, 'tm_sim'):
            # The number.
            sim_num = len(pipe.diff_tensor.tm_sim)

        # Tensor axes.
        axes = []
        sim_axes = []
        if diff_type in ['oblate', 'prolate']:
            axes.append(pipe.diff_tensor.Dpar * pipe.diff_tensor.Dpar_unit)
            if sim_num != None:
                sim_axes.append([])
                for i in range(sim_num):
                    sim_axes[0].append(pipe.diff_tensor.Dpar_sim[i] * pipe.diff_tensor.Dpar_unit_sim[i])

        if diff_type == 'ellipsoid':
            axes.append(pipe.diff_tensor.Dx * pipe.diff_tensor.Dx_unit)
            axes.append(pipe.diff_tensor.Dy * pipe.diff_tensor.Dy_unit)
            axes.append(pipe.diff_tensor.Dz * pipe.diff_tensor.Dz_unit)
            if sim_num != None:
                sim_axes.append([])
                sim_axes.append([])
                sim_axes.append([])
                for i in range(sim_num):
                    sim_axes[0].append(pipe.diff_tensor.Dx_sim[i] * pipe.diff_tensor.Dx_unit_sim[i])
                    sim_axes[1].append(pipe.diff_tensor.Dy_sim[i] * pipe.diff_tensor.Dy_unit_sim[i])
                    sim_axes[2].append(pipe.diff_tensor.Dz_sim[i] * pipe.diff_tensor.Dz_unit_sim[i])

        # Create the object.
        diffusion_tensor(mol=mol, tensor=pipe.diff_tensor.tensor, tensor_diag=pipe.diff_tensor.tensor_diag, diff_type=diff_type, rotation=pipe.diff_tensor.rotation, axes=axes, sim_axes=sim_axes, com=com, scale=scale)


    # Create the PDB file.
    ######################

    # Print out.
    print("\nGenerating the PDB file.")

    # Create the PDB file.
    tensor_pdb_file = open_write_file(file, dir, force=force)
    structure.write_pdb(tensor_pdb_file)
    tensor_pdb_file.close()

    # Add the file to the results file list.
    if not hasattr(cdp, 'result_files'):
        cdp.result_files = []
    if dir == None:
        dir = getcwd()
    cdp.result_files.append(['diff_tensor_pdb', 'Diffusion tensor PDB', get_file_path(file, dir)])
    status.observers.result_file.notify()
コード例 #35
0
ファイル: relax_fit.py プロジェクト: pombredanne/relax
    def run(self):
        """Set up and run the curve-fitting."""

        # Peak intensity error analysis.
        self.error_analysis()

        # Grid search.
        self.interpreter.minimise.grid_search(inc=self.grid_inc)

        # Minimise.
        self.interpreter.minimise.execute('newton', scaling=False, constraints=False)

        # Monte Carlo simulations.
        self.interpreter.monte_carlo.setup(number=self.mc_sim_num)
        self.interpreter.monte_carlo.create_data()
        self.interpreter.monte_carlo.initial_values()
        self.interpreter.minimise.execute('newton', scaling=False, constraints=False)
        self.interpreter.monte_carlo.error_analysis()

        # Determine the normalisation type and if the Iinf parameter exists.
        norm_type = 'last'
        iinf = True
        for spin in spin_loop(skip_desel=True):
            if spin.model not in ['sat', 'inv']:
                norm_type = 'first'
                iinf = False
                break

        # Save the relaxation rates and other parameter values.
        self.interpreter.value.write(param='i0', file='i0.out', dir=self.results_dir, force=True)
        if iinf:
            self.interpreter.value.write(param='iinf', file='iinf.out', dir=self.results_dir, force=True)
        self.interpreter.value.write(param='rx', file=self.file_root+'.out', dir=self.results_dir, force=True)

        # Save the results.
        self.interpreter.results.write(file='results', dir=self.results_dir, force=True)

        # Create Grace plots of the data.
        self.interpreter.grace.write(y_data_type='chi2', file='chi2.agr', dir=self.grace_dir, force=True)    # Minimised chi-squared value.
        self.interpreter.grace.write(y_data_type='i0', file='i0.agr', dir=self.grace_dir, force=True)    # Initial peak intensity.
        if iinf:
            self.interpreter.grace.write(y_data_type='iinf', file='iinf.agr', dir=self.grace_dir, force=True)    # Infinite peak intensity.
        self.interpreter.grace.write(y_data_type='rx', file=self.file_root+'.agr', dir=self.grace_dir, force=True)    # Relaxation rate.
        self.interpreter.grace.write(x_data_type='relax_times', y_data_type='peak_intensity', file='intensities.agr', dir=self.grace_dir, force=True)    # Average peak intensities.
        self.interpreter.grace.write(x_data_type='relax_times', y_data_type='peak_intensity', norm_type=norm_type, norm=True, file='intensities_norm.agr', dir=self.grace_dir, force=True)    # Average peak intensities (normalised).

        # Write a python "grace to PNG/EPS/SVG..." conversion script.
        # Open the file for writing.
        file_name = "grace2images.py"
        file_path = get_file_path(file_name=file_name, dir=self.grace_dir)
        file = open_write_file(file_name=file_name, dir=self.grace_dir, force=True)

        # Write the file.
        script_grace2images(file=file)

        # Close the batch script, then make it executable (expanding any ~ characters).
        file.close()

        if self.grace_dir:
            dir = expanduser(self.grace_dir)
            chmod(dir + sep + file_name, S_IRWXU|S_IRGRP|S_IROTH)
        else:
            file_name = expanduser(file_name)
            chmod(file_name, S_IRWXU|S_IRGRP|S_IROTH)


        # Display the Grace plots if selected.
        if self.view_plots:
            self.interpreter.grace.view(file='chi2.agr', dir=self.grace_dir)
            self.interpreter.grace.view(file='i0.agr', dir=self.grace_dir)
            self.interpreter.grace.view(file=self.file_root+'.agr', dir=self.grace_dir)
            self.interpreter.grace.view(file='intensities.agr', dir=self.grace_dir)
            self.interpreter.grace.view(file='intensities_norm.agr', dir=self.grace_dir)

        # Save the program state.
        self.interpreter.state.save(state=self.file_root+'.save', dir=self.results_dir, force=True)
コード例 #36
0
ファイル: plotting.py プロジェクト: pombredanne/relax
def write_xy(format='grace', x_data_type='res_num', y_data_type=None, spin_id=None, plot_data='value', norm_type='first', file=None, dir=None, force=False, norm=True):
    """Writing data to a file.

    @keyword format:        The specific backend to use.  The currently support backends are 'grace'.
    @type format:           str
    @keyword x_data_type:   The category of the X-axis data.
    @type x_data_type:      str
    @keyword y_data_type:   The category of the Y-axis data.
    @type y_data_type:      str
    @keyword spin_id:       The spin identification string.
    @type spin_id:          str
    @keyword plot_data:     The type of the plotted data, one of 'value', 'error', or 'sim'.
    @type plot_data:        str
    @keyword norm_type:     The point to normalise to 1.  This can be 'first' or 'last'.
    @type norm_type:        str
    @keyword file:          The name of the Grace file to create.
    @type file:             str
    @keyword dir:           The optional directory to place the file into.
    @type dir:              str
    @param force:           Boolean argument which if True causes the file to be overwritten if it already exists.
    @type force:            bool
    @keyword norm:          The normalisation flag which if set to True will cause all graphs to be normalised to a starting value of 1.
    @type norm:             bool
    """

    # Checks.
    check_pipe()
    check_mol_res_spin_data()

    # Test if the plot_data argument is one of 'value', 'error', or 'sim'.
    if plot_data not in ['value', 'error', 'sim']:
        raise RelaxError("The plot data argument " + repr(plot_data) + " must be set to either 'value', 'error', 'sim'.")

    # Test if the simulations exist.
    if plot_data == 'sim' and not hasattr(cdp, 'sim_number'):
        raise RelaxNoSimError

    # Open the file for writing.
    file_path = get_file_path(file, dir)
    file = open_write_file(file, dir, force)

    # Get the data.
    data, set_names, graph_type = assemble_data(spin_id, x_data_name=x_data_type, y_data_name=y_data_type, plot_data=plot_data)

    # Convert the graph type.
    if graph_type == 'X,Y':
        graph_type = 'xy'
    elif graph_type == 'X,Y,dX':
        graph_type = 'xydx'
    elif graph_type == 'X,Y,dY':
        graph_type = 'xydy'
    elif graph_type == 'X,Y,dX,dY':
        graph_type = 'xydxdy'

    # No data, so close the empty file and exit.
    if not len(data) or not len(data[0]) or not len(data[0][0]):
        warn(RelaxWarning("No data could be found, creating an empty file."))
        file.close()
        return

    # Get the axis information.
    data_type = [x_data_type, y_data_type]
    seq_type, axis_labels = axis_setup(data_type=data_type, norm=norm)

    # Write the header.
    write_xy_header(format=format, file=file, data_type=data_type, seq_type=seq_type, sets=[len(data[0])], set_names=[set_names], axis_labels=[axis_labels], norm=[norm])

    # Write the data.
    write_xy_data(format=format, data=data, file=file, graph_type=graph_type, norm_type=norm_type, norm=[norm])

    # Close the file.
    file.close()

    # Add the file to the results file list.
    label = None
    if format == 'grace':
        label = 'Grace'
    add_result_file(type=format, label='Grace', file=file_path)