Example #1
0
def write_param_files(global_file=None, kex=1500.0, pA=0.95, set_file=None, dir=None, r1=False, force=False):
    """Create the CATIA parameter files.

    @keyword global_file:   The name of the global parameter file.
    @type global_file:      str
    @keyword set_file:      The name of the parameter set file.
    @type set_file:         str
    @keyword dir:           The base directory to place the files into.
    @type dir:              str
    @keyword r1:            A flag which if True will cause the R1 data to be used for off-resonance effects.
    @type r1:               bool
    @keyword force:         A flag which if True will cause a pre-existing file to be overwritten.
    @type force:            bool
    """

    # Open the global parameter file.
    param_file = open_write_file(file_name=global_file, dir=dir, force=force)

    # Set the starting values.
    param_file.write("kex = %s\n" % kex)
    param_file.write("pb = %s\n" % (1.0-pA))

    # Close the file.
    param_file.close()

    # Open the 1st parameter set file.
    set_file = open_write_file(file_name=set_file, dir=dir, force=force)

    # The parameter format.
    params = ['Delta0']
    values = [0.5]
    if r1:
        for frq in loop_frq():
            params.append("R1iph_%s" % frq_label(frq))
            values.append(1.5)
    for frq in loop_frq():
        params.append("R0iph_%s" % frq_label(frq))
        values.append(5.0)
    if r1:
        params.append("Omega")
        values.append(0.0)

    # Write out the format.
    set_file.write("format = (")
    for i in range(len(params)):
        if i != 0:
            set_file.write(';')
        set_file.write(params[i])
    set_file.write(")\n")

    # Um?!?  The average values maybe?
    set_file.write("* = (")
    for i in range(len(values)):
        if i != 0:
            set_file.write(';')
        set_file.write("%s" % values[i])
    set_file.write(")\n")

    # Close the file.
    set_file.close()
Example #2
0
    def visualisation(self, model=None):
        """Create visual representations of the frame order results for the given model.

        This includes a PDB representation of the motions (the 'cone.pdb' file located in each model directory) together with a relax script for displaying the average domain positions together with the cone/motion representation in PyMOL (the 'pymol_display.py' file, also created in the model directory).

        @keyword model:     The frame order model to visualise.  This should match the model of the current data pipe, unless the special value of 'final' is used to indicate the visualisation of the final results.
        @type model:        str
        """

        # Sanity check.
        if model != 'final' and model != cdp.model:
            raise RelaxError("The model '%s' does not match the model '%s' of the current data pipe." % (model, cdp.model))

        # The PDB representation of the model.
        self.interpreter.frame_order.pdb_model(dir=self.results_dir+model, force=True)

        # Create the visualisation script.
        subsection(file=sys.stdout, text="Creating a PyMOL visualisation script.")
        script = open_write_file(file_name='pymol_display.py', dir=self.results_dir+model, force=True)

        # Add a comment for the user.
        script.write("# relax script for displaying the frame order results of this '%s' model in PyMOL.\n\n" % model)

        # The script contents.
        script.write("# PyMOL visualisation.\n")
        script.write("pymol.view()\n")
        script.write("pymol.command('show spheres')\n")
        script.write("pymol.frame_order(file='frame_order.pdb', dist_file='frame_order_distribution.pdb')\n")

        # Close the file.
        script.close()
Example #3
0
    def __init__(self):
        """Convert the final results into a LaTeX table."""

        # Create the data pipe.
        pipe.create('latex', 'mf')

        # Load the model-free results.
        results.read(file='final_results_trunc_1.3_v2', dir=DATA_PATH)

        # Open the file.
        self.file = open_write_file('devnull')

        # LaTeX header.
        self.latex_header()

        # Table headings.
        self.headings()

        # Table footer (longtable repeating footers come just after the headings).
        self.footer()

        # The table body.
        self.table_body()

        # LaTeX ending.
        self.latex_ending()

        # Close the file.
        self.file.close()
Example #4
0
def save_state(state=None, dir=None, compress_type=1, verbosity=1, force=False):
    """Function for saving the program state.

    @keyword state:         The saved state file.
    @type state:            str
    @keyword dir:           The path of the state file.
    @type dir:              str
    @keyword verbosity:     The verbosity level.
    @type verbosity:        int
    @keyword force:         Boolean argument which if True causes the file to be overwritten if it
                            already exists.
    @type force:            bool
    @keyword compress_type: The compression type.  The integer values correspond to the compression
                            type: 0, no compression; 1, Bzip2 compression; 2, Gzip compression.
    @type compress_type:    int
    """

    # Open the file for writing.
    file = open_write_file(file_name=state, dir=dir, verbosity=verbosity, force=force, compress_type=compress_type)

    # Save as XML.
    ds.to_xml(file)

    # Close the file.
    file.close()
Example #5
0
def save_state(state=None,
               dir=None,
               compress_type=1,
               verbosity=1,
               force=False):
    """Function for saving the program state.

    @keyword state:         The saved state file.
    @type state:            str
    @keyword dir:           The path of the state file.
    @type dir:              str
    @keyword verbosity:     The verbosity level.
    @type verbosity:        int
    @keyword force:         Boolean argument which if True causes the file to be overwritten if it
                            already exists.
    @type force:            bool
    @keyword compress_type: The compression type.  The integer values correspond to the compression
                            type: 0, no compression; 1, Bzip2 compression; 2, Gzip compression.
    @type compress_type:    int
    """

    # Open the file for writing.
    file = open_write_file(file_name=state,
                           dir=dir,
                           verbosity=verbosity,
                           force=force,
                           compress_type=compress_type)

    # Save as XML.
    ds.to_xml(file)

    # Close the file.
    file.close()
    def __init__(self):
        """Convert the final results into a LaTeX table."""

        # Create the data pipe.
        pipe.create('latex', 'mf')

        # Load the model-free results.
        results.read(file='final_results_trunc_1.3_v2', dir=DATA_PATH)

        # Open the file.
        self.file = open_write_file('devnull')

        # LaTeX header.
        self.latex_header()

        # Table headings.
        self.headings()

        # Table footer (longtable repeating footers come just after the headings).
        self.footer()

        # The table body.
        self.table_body()

        # LaTeX ending.
        self.latex_ending()

        # Close the file.
        self.file.close()
Example #7
0
def write(align_id=None, file=None, dir=None, bc=False, force=False):
    """Display the RDC data corresponding to the alignment ID.

    @keyword align_id:  The alignment tensor ID string.
    @type align_id:     str
    @keyword file:      The file name or object to write to.
    @type file:         str or file object
    @keyword dir:       The name of the directory to place the file into (defaults to the current directory).
    @type dir:          str
    @keyword bc:        The back-calculation flag which if True will cause the back-calculated rather than measured data to be written.
    @type bc:           bool
    @keyword force:     A flag which if True will cause any pre-existing file to be overwritten.
    @type force:        bool
    """

    # Check the pipe setup.
    check_pipe_setup(sequence=True, rdc_id=align_id, rdc=True)

    # Open the file for writing.
    file = open_write_file(file, dir, force)

    # Loop over the interatomic data containers and collect the data.
    data = []
    for interatom in interatomic_loop():
        # Skip deselected containers.
        if not interatom.select:
            continue

        # Skip containers with no RDCs.
        if not bc and (not hasattr(interatom, 'rdc') or align_id not in interatom.rdc.keys()):
            continue
        elif bc and (not hasattr(interatom, 'rdc_bc') or align_id not in interatom.rdc_bc.keys()):
            continue

        # Append the spin data.
        data.append([])
        data[-1].append(interatom.spin_id1)
        data[-1].append(interatom.spin_id2)

        # Handle the missing rdc_data_types variable.
        data_type = None
        if hasattr(interatom, 'rdc_data_types'):
            data_type = interatom.rdc_data_types[align_id]

        # The value.
        if bc:
            data[-1].append(repr(convert(interatom.rdc_bc[align_id], data_type, align_id)))
        else:
            data[-1].append(repr(convert(interatom.rdc[align_id], data_type, align_id)))

        # The error.
        if hasattr(interatom, 'rdc_err') and align_id in interatom.rdc_err.keys():
            data[-1].append(repr(convert(interatom.rdc_err[align_id], data_type, align_id)))
        else:
            data[-1].append(repr(None))

    # Write out.
    write_data(out=file, headings=["Spin_ID1", "Spin_ID2", "RDCs", "RDC_error"], data=data)
Example #8
0
def create_cone_pdb(mol=None, cone=None, start_res=1, apex=None, axis=None, R=None, inc=None, scale=30.0, distribution='regular', file=None, dir=None, force=False, axis_flag=True):
    """Create a PDB representation of the given cone object.

    @keyword mol:           The molecule container.
    @type mol:              MolContainer instance
    @keyword cone:          The cone object.  This should provide the limit_check() method with determines the limits of the distribution accepting two arguments, the polar angle phi and the azimuthal angle theta, and return True if the point is in the limits or False if outside.  It should also provide the theta_max() method for returning the theta value for the given phi, the phi_max() method for returning the phi value for the given theta.
    @type cone:             class instance
    @keyword start_res:     The starting residue number.
    @type start_res:        str
    @keyword apex:          The apex of the cone.
    @type apex:             rank-1, 3D numpy array
    @keyword axis:          The central axis of the cone.  If not supplied, the z-axis will be used.
    @type axis:             rank-1, 3D numpy array
    @keyword R:             The rotation matrix.
    @type R:                rank-2, 3D numpy array
    @keyword inc:           The increment number used to determine the number of latitude and longitude lines.
    @type inc:              int
    @keyword scale:         The scaling factor to stretch the unit cone by.
    @type scale:            float
    @keyword distribution:  The type of point distribution to use.  This can be 'uniform' or 'regular'.
    @type distribution:     str
    @keyword file:          The name of the PDB file to create.
    @type file:             str
    @keyword dir:           The name of the directory to place the PDB file into.
    @type dir:              str
    @keyword force:         Flag which if set to True will overwrite any pre-existing file.
    @type force:            bool
    @keyword axis_flag:     A flag which if True will create the cone's axis.
    @type axis_flag:        bool
    """

    # No molecule supplied.
    if mol == None:
        # Create the structural object.
        structure = Internal()

        # Add a molecule.
        structure.add_molecule(name='cone')

        # Alias the single molecule from the single model.
        mol = structure.structural_data[0].mol[0]

    # Create the object.
    cone(mol=mol, cone=cone, start_res=start_res, apex=apex, axis=axis, R=R, inc=inc, scale=scale, distribution=distribution, axis_flag=axis_flag)

    # Create the PDB file.
    if file != None:
        print("\nGenerating the PDB file.")
        pdb_file = open_write_file(file_name=file, dir=dir, force=force)
        structure.write_pdb(pdb_file)
        pdb_file.close()

    # Add the file to the results file list.
    if not hasattr(cdp, 'result_files'):
        cdp.result_files = []
    cdp.result_files.append(['cone_pdb', 'Cone PDB', get_file_path(file, dir)])
    status.observers.result_file.notify()
Example #9
0
def pdb(r=1.02, file_name='uniform.pdb', inc=None):
    """Create the bond vector distribution and save the PDB file."""

    # Create the structural object.
    structure = Internal()

    # Add a molecule.
    structure.add_molecule(name='dist')

    # Alias the single molecule from the single model.
    mol = structure.structural_data[0].mol[0]

    # Get the polar and azimuthal angles for the distribution.
    phi, theta = angles_uniform(inc)

    # Get the uniform vector distribution.
    vectors = vect_dist_spherical_angles(inc=inc, distribution='uniform')

    # Loop over the radial array of vectors (change in longitude).
    atom_num = 1
    new_vectors = []
    for i in range(len(theta)):
        # Loop over the vectors of the radial array (change in latitude).
        for j in range(len(phi)):
            # The index.
            index = i + j*len(theta)

            # The atomic positions.
            pos1 = vectors[index] * 10
            pos2 = pos1 + vectors[index] * r

            # Store the rearranged vector (truncated as in the PDB).
            trunc_vect = zeros(3, float64)
            for k in range(3):
                trunc_vect[k] = float("%.3f" % pos2[k]) - float("%.3f" % pos1[k])
            new_vectors.append(trunc_vect)

            # Residue number.
            res = (atom_num + 1) / 2

            # Add the vector as a N-H atom pair.
            mol.atom_add(pdb_record='ATOM', atom_num=atom_num,   atom_name='N', res_name=AA_TABLE[SEQ[index]].upper(), res_num=res, pos=pos1, element='N')
            mol.atom_add(pdb_record='ATOM', atom_num=atom_num+1, atom_name='H', res_name=AA_TABLE[SEQ[index]].upper(), res_num=res, pos=pos2,   element='H')

            # Connect.
            mol.atom_connect(atom_num-1, atom_num)

            # Move 2 atoms forwards.
            atom_num += 2

    # The PDB file.
    file = open_write_file(file_name, force=True)
    structure.write_pdb(file)
    file.close()

    # Return the vectors in the diffusion frame.
    return new_vectors
Example #10
0
def cpmgfit_input(dir=None, binary='cpmgfit', spin_id=None, force=False):
    """Create the CPMGFit input files.

    @keyword dir:               The optional directory to place the files into.  If None, then the files will be placed into a directory named after the dispersion model.
    @type dir:                  str or None
    @keyword binary:            The name of the CPMGFit binary file.  This can include the path to the binary.
    @type binary:               str
    @keyword spin_id:           The spin ID string to restrict the file creation to.
    @type spin_id:              str
    @keyword force:             A flag which if True will cause all pre-existing files to be overwritten.
    @type force:                bool
    """

    # Test if the current pipe exists.
    check_pipe()

    # Test if sequence data is loaded.
    if not exists_mol_res_spin_data():
        raise RelaxNoSequenceError

    # Test if the experiment type has been set.
    if not hasattr(cdp, 'exp_type'):
        raise RelaxError("The relaxation dispersion experiment type has not been specified.")

    # Test if the model has been set.
    if not hasattr(cdp, 'model_type'):
        raise RelaxError("The relaxation dispersion model has not been specified.")

    # Directory creation.
    if dir != None:
        mkdir_nofail(dir, verbosity=0)

    # The 'run.sh' script.
    batch = open_write_file('batch_run.sh', dir, force)
    batch.write("#! /bin/sh\n\n")

    # Generate the input files for each spin.
    for spin, spin_id in spin_loop(return_id=True, skip_desel=True):
        # Translate the model.
        function = translate_model(spin.model)

        # Create the input file.
        file_in = create_spin_input(function=function, spin=spin, spin_id=spin_id, dir=dir)

        # The output file name.
        file_out = spin_file_name(spin_id=spin_id, output=True)

        # Add the file to the batch script.
        batch.write("%s -grid -xmgr -f %s | tee %s\n" % (binary, file_in, file_out))

    # Close the batch script, then make it executable.
    batch.close()
    if dir:
        chmod(dir + sep + 'batch_run.sh', S_IRWXU|S_IRGRP|S_IROTH)
    else:
        chmod('batch_run.sh', S_IRWXU|S_IRGRP|S_IROTH)
Example #11
0
def cpmgfit_input(dir=None, binary='cpmgfit', spin_id=None, force=False):
    """Create the CPMGFit input files.

    @keyword dir:               The optional directory to place the files into.  If None, then the files will be placed into a directory named after the dispersion model.
    @type dir:                  str or None
    @keyword binary:            The name of the CPMGFit binary file.  This can include the path to the binary.
    @type binary:               str
    @keyword spin_id:           The spin ID string to restrict the file creation to.
    @type spin_id:              str
    @keyword force:             A flag which if True will cause all pre-existing files to be overwritten.
    @type force:                bool
    """

    # Test if the current pipe exists.
    check_pipe()

    # Test if sequence data is loaded.
    if not exists_mol_res_spin_data():
        raise RelaxNoSequenceError

    # Test if the experiment type has been set.
    if not hasattr(cdp, 'exp_type'):
        raise RelaxError("The relaxation dispersion experiment type has not been specified.")

    # Test if the model has been set.
    if not hasattr(cdp, 'model_type'):
        raise RelaxError("The relaxation dispersion model has not been specified.")

    # Directory creation.
    if dir != None:
        mkdir_nofail(dir, verbosity=0)

    # The 'run.sh' script.
    batch = open_write_file('batch_run.sh', dir, force)
    batch.write("#! /bin/sh\n\n")

    # Generate the input files for each spin.
    for spin, spin_id in spin_loop(return_id=True, skip_desel=True):
        # Translate the model.
        function = translate_model(spin.model)

        # Create the input file.
        file_in = create_spin_input(function=function, spin=spin, spin_id=spin_id, dir=dir)

        # The output file name.
        file_out = spin_file_name(spin_id=spin_id, output=True)

        # Add the file to the batch script.
        batch.write("%s -grid -xmgr -f %s | tee %s\n" % (binary, file_in, file_out))

    # Close the batch script, then make it executable.
    batch.close()
    if dir:
        chmod(dir + sep + 'batch_run.sh', S_IRWXU|S_IRGRP|S_IROTH)
    else:
        chmod('batch_run.sh', S_IRWXU|S_IRGRP|S_IROTH)
Example #12
0
def create_rotor_pdb(file=None, dir=None, rotor_angle=None, axis=None, axis_pt=True, centre=None, span=2e-9, blade_length=5e-10, force=False, staggered=False):
    """Create a PDB representation of a rotor motional model.

    @keyword file:          The name of the PDB file to create.
    @type file:             str
    @keyword dir:           The name of the directory to place the PDB file into.
    @type dir:              str
    @keyword rotor_angle:   The angle of the rotor motion in degrees.
    @type rotor_angle:      float
    @keyword axis:          The vector defining the rotor axis.
    @type axis:             numpy rank-1, 3D array
    @keyword axis_pt:       A point lying anywhere on the rotor axis.  This is used to define the position of the axis in 3D space.
    @type axis_pt:          numpy rank-1, 3D array
    @keyword centre:        The central point of the representation.  If this point is not on the rotor axis, then the closest point on the axis will be used for the centre.
    @type centre:           numpy rank-1, 3D array
    @keyword span:          The distance from the central point to the rotor blades (meters).
    @type span:             float
    @keyword blade_length:  The length of the representative rotor blades.
    @type blade_length:     float
    @keyword force:         A flag which if set will overwrite any pre-existing file.
    @type force:            bool
    @keyword staggered:     A flag which if True will cause the rotor blades to be staggered.  This is used to avoid blade overlap.
    @type staggered:        bool
    """

    # Test if the current pipe exists.
    pipes.test()

    # Convert the angle to radians.
    rotor_angle = rotor_angle / 360.0 * 2.0 * pi

    # Create the structural object.
    structure = Internal()

    # Generate the rotor object.
    rotor_pdb(structure=structure, rotor_angle=rotor_angle, axis=axis, axis_pt=axis_pt, centre=centre, span=span, blade_length=blade_length, staggered=staggered)

    # Print out.
    print("\nGenerating the PDB file.")

    # Open the PDB file for writing.
    tensor_pdb_file = open_write_file(file, dir, force=force)

    # Write the data.
    structure.write_pdb(tensor_pdb_file)

    # Close the file.
    tensor_pdb_file.close()

    # Add the file to the results file list.
    if not hasattr(cdp, 'result_files'):
        cdp.result_files = []
    if dir == None:
        dir = getcwd()
    cdp.result_files.append(['rotor_pdb', 'Rotor PDB', get_file_path(file, dir)])
    status.observers.result_file.notify()
Example #13
0
def create(algor='LM', dir=None, force=False):
    """Create the Dasha script file 'dasha_script' for controlling the program.

    @keyword algor: The optimisation algorithm to use.  This can be the Levenberg-Marquardt algorithm 'LM' or the Newton-Raphson algorithm 'NR'.
    @type algor:    str
    @keyword dir:   The optional directory to place the script into.
    @type dir:      str or None
    @keyword force: A flag which if True will cause any pre-existing file to be overwritten.
    @type force:    bool
    """

    # Test if the current pipe exists.
    check_pipe()

    # Test if sequence data is loaded.
    if not exists_mol_res_spin_data():
        raise RelaxNoSequenceError

    # Determine the parameter set.
    model_type = determine_model_type()

    # Test if diffusion tensor data for the data_pipe exists.
    if model_type != 'local_tm' and not hasattr(cdp, 'diff_tensor'):
        raise RelaxNoTensorError('diffusion')

    # Test if the PDB file has been loaded (for the spheroid and ellipsoid).
    if model_type != 'local_tm' and cdp.diff_tensor.type != 'sphere' and not hasattr(
            cdp, 'structure'):
        raise RelaxNoPdbError

    # Test the optimisation algorithm.
    if algor not in ['LM', 'NR']:
        raise RelaxError(
            "The Dasha optimisation algorithm '%s' is unknown, it should either be 'LM' or 'NR'."
            % algor)

    # Deselect certain spins.
    __deselect_spins()

    # Directory creation.
    if dir == None:
        dir = pipes.cdp_name()
    mkdir_nofail(dir, verbosity=0)

    # Calculate the angle alpha of the XH vector in the spheroid diffusion frame.
    if cdp.diff_tensor.type == 'spheroid':
        angles.spheroid_frame()

    # Calculate the angles theta and phi of the XH vector in the ellipsoid diffusion frame.
    elif cdp.diff_tensor.type == 'ellipsoid':
        angles.ellipsoid_frame()

    # The 'dasha_script' file.
    script = open_write_file(file_name='dasha_script', dir=dir, force=force)
    create_script(script, model_type, algor)
    script.close()
Example #14
0
def create(algor='LM', dir=None, force=False):
    """Create the Dasha script file 'dasha_script' for controlling the program.

    @keyword algor: The optimisation algorithm to use.  This can be the Levenberg-Marquardt algorithm 'LM' or the Newton-Raphson algorithm 'NR'.
    @type algor:    str
    @keyword dir:   The optional directory to place the script into.
    @type dir:      str or None
    @keyword force: A flag which if True will cause any pre-existing file to be overwritten.
    @type force:    bool
    """

    # Test if the current pipe exists.
    check_pipe()

    # Test if sequence data is loaded.
    if not exists_mol_res_spin_data():
        raise RelaxNoSequenceError

    # Determine the parameter set.
    model_type = determine_model_type()

    # Test if diffusion tensor data for the data_pipe exists.
    if model_type != 'local_tm' and not hasattr(cdp, 'diff_tensor'):
        raise RelaxNoTensorError('diffusion')

    # Test if the PDB file has been loaded (for the spheroid and ellipsoid).
    if model_type != 'local_tm' and cdp.diff_tensor.type != 'sphere' and not hasattr(cdp, 'structure'):
        raise RelaxNoPdbError

    # Test the optimisation algorithm.
    if algor not in ['LM', 'NR']:
        raise RelaxError("The Dasha optimisation algorithm '%s' is unknown, it should either be 'LM' or 'NR'." % algor)

    # Deselect certain spins.
    __deselect_spins()

    # Directory creation.
    if dir == None:
        dir = pipes.cdp_name()
    mkdir_nofail(dir, verbosity=0)

    # Calculate the angle alpha of the XH vector in the spheroid diffusion frame.
    if cdp.diff_tensor.type == 'spheroid':
        angles.spheroid_frame()

    # Calculate the angles theta and phi of the XH vector in the ellipsoid diffusion frame.
    elif cdp.diff_tensor.type == 'ellipsoid':
        angles.ellipsoid_frame()

    # The 'dasha_script' file.
    script = open_write_file(file_name='dasha_script', dir=dir, force=force)
    create_script(script, model_type, algor)
    script.close()
Example #15
0
    def create_par_chi2(self, file_prefix, par_chi2_vals):
        """Function for creating file with parameters and the chi2 value."""

        # Print out.
        print("\nCreating the file with parameters and the chi2 value.")

        # Open the file.
        par_file = open_write_file(file_name=file_prefix + '.par',
                                   dir=self.dir,
                                   force=True)

        # Copy the nested list to sort it.
        par_chi2_vals_sort = deepcopy(par_chi2_vals)

        # Then sort the value.
        par_chi2_vals_sort.sort(key=lambda values: values[4])

        # Collect the data structure, which is a list of list of strings.
        data = []
        for i, line in enumerate(par_chi2_vals):
            line_sort = par_chi2_vals_sort[i]

            # Convert values to strings.
            line_str = ["%3.5f" % j for j in line]
            line_sort_str = ["%3.5f" % j for j in line_sort]

            # Convert the index from float to index.
            line_str[0] = "%i" % line[0]
            line_sort_str[0] = "%i" % line_sort[0]

            # Merge the two lists and append to data.
            data_list = line_str + line_sort_str
            data.append(data_list)

        # Make the headings.
        headings = ['i'] + self.params + ['chi2']
        headings += headings

        # Add "_sort" to headings.
        headings[5] = headings[5] + "_sort"
        headings[6] = headings[6] + "_sort"
        headings[7] = headings[7] + "_sort"
        headings[8] = headings[8] + "_sort"
        headings[9] = headings[9] + "_sort"

        # Write the parameters and chi2 values to file.
        write_data(out=par_file, headings=headings, data=data)

        # Close the file.
        par_file.close()
Example #16
0
    def create_map(self):
        """Function for creating the map."""

        # Print out.
        print("\nCreating the map.")

        # Open the file.
        map_file = open_write_file(file_name=self.file_prefix, dir=self.dir, force=True)

        # Generate and write the text of the map.
        self.map_3D_text(map_file)

        # Close the file.
        map_file.close()
Example #17
0
def show_apod_rmsd_to_file(file_name=None, dir=None, path_to_command='showApod', outdir=None, force=False):
    """Extract showApod 'Noise Std Dev' from showApod, and write to file with same filename and ending '.rmsd'

    @keyword file:              The filename of the NMRPipe fourier transformed file.
    @type file:                 str
    @keyword dir:               The directory where the file is located.
    @type dir:                  str
    @keyword path_to_command:   If showApod not in PATH, then specify absolute path as: /path/to/showApod
    @type path_to_command:      str
    @keyword outdir:            The directory where to write the file.  If 'None', then write in same directory.
    @type outdir:               str
    @param force:               Boolean argument which if True causes the file to be overwritten if it already exists.
    @type force:                bool
    @return:                    Write the 'Noise Std Dev' from showApod to a file with same file filename, with ending '.rmsd'.  This will be a file path.
    @rtype:                     str
    """

    # Call extract function.
    apod_rmsd = show_apod_rmsd(file_name=file_name, dir=dir, path_to_command=path_to_command)

    # Get the filename striped of extension details.
    file_name_root = file_root(file_name)

    # Define extension.
    extension = ".rmsd"

    # Define file name for writing.
    file_name_out = file_name_root + extension

    # Define folder to write to.
    if outdir == None:
        write_outdir = dir
    else:
        write_outdir = outdir

    # Open file for writing,
    wfile, wfile_path = open_write_file(file_name=file_name_out, dir=write_outdir, force=force, verbosity=1, return_path=True)

    # Write to file.
    out_write_data = [['%s'%apod_rmsd]]

    # Write data
    write_data(out=wfile, headings=None, data=out_write_data, sep=None)

    # Close file.
    wfile.close()

    # Return path to file.
    return wfile_path
Example #18
0
def show_apod_rmsd_to_file(file_name=None, dir=None, path_to_command='showApod', outdir=None, force=False):
    """Extract showApod 'Noise Std Dev' from showApod, and write to file with same filename and ending '.rmsd'

    @keyword file:              The filename of the NMRPipe fourier transformed file.
    @type file:                 str
    @keyword dir:               The directory where the file is located.
    @type dir:                  str
    @keyword path_to_command:   If showApod not in PATH, then specify absolute path as: /path/to/showApod
    @type path_to_command:      str
    @keyword outdir:            The directory where to write the file.  If 'None', then write in same directory.
    @type outdir:               str
    @param force:               Boolean argument which if True causes the file to be overwritten if it already exists.
    @type force:                bool
    @return:                    Write the 'Noise Std Dev' from showApod to a file with same file filename, with ending '.rmsd'.  This will be a file path.
    @rtype:                     str
    """

    # Call extract function.
    apod_rmsd = show_apod_rmsd(file_name=file_name, dir=dir, path_to_command=path_to_command)

    # Get the filename striped of extension details.
    file_name_root = file_root(file_name)

    # Define extension.
    extension = ".rmsd"

    # Define file name for writing.
    file_name_out = file_name_root + extension

    # Define folder to write to.
    if outdir == None:
        write_outdir = dir
    else:
        write_outdir = outdir

    # Open file for writing,
    wfile, wfile_path = open_write_file(file_name=file_name_out, dir=write_outdir, force=force, verbosity=1, return_path=True)

    # Write to file.
    out_write_data = [['%s'%apod_rmsd]]

    # Write data
    write_data(out=wfile, headings=None, data=out_write_data, sep=None)

    # Close file.
    wfile.close()

    # Return path to file.
    return wfile_path
Example #19
0
    def create_par_chi2(self, file_prefix, par_chi2_vals):
        """Function for creating file with parameters and the chi2 value."""

        # Print out.
        print("\nCreating the file with parameters and the chi2 value.")

        # Open the file.
        par_file = open_write_file(file_name=file_prefix+'.par', dir=self.dir, force=True)

        # Copy the nested list to sort it.
        par_chi2_vals_sort = deepcopy(par_chi2_vals)

        # Then sort the value.
        par_chi2_vals_sort.sort(key=lambda values: values[4])

        # Collect the data structure, which is a list of list of strings.
        data = []
        for i, line in enumerate(par_chi2_vals):
            line_sort = par_chi2_vals_sort[i]

            # Convert values to strings.
            line_str = ["%3.5f"%j for j in line]
            line_sort_str = ["%3.5f"%j for j in line_sort]

            # Convert the index from float to index.
            line_str[0] = "%i" % line[0]
            line_sort_str[0] = "%i" % line_sort[0]

            # Merge the two lists and append to data.
            data_list = line_str + line_sort_str
            data.append(data_list)

        # Make the headings.
        headings = ['i'] + self.params + ['chi2']
        headings += headings

        # Add "_sort" to headings.
        headings[5] = headings[5] + "_sort"
        headings[6] = headings[6] + "_sort"
        headings[7] = headings[7] + "_sort"
        headings[8] = headings[8] + "_sort"
        headings[9] = headings[9] + "_sort"

        # Write the parameters and chi2 values to file.
        write_data(out=par_file, headings=headings, data=data)

        # Close the file.
        par_file.close()
Example #20
0
    def create_map(self):
        """Function for creating the map."""

        # Print out.
        print("\nCreating the map.")

        # Open the file.
        map_file = open_write_file(file_name=self.file_prefix,
                                   dir=self.dir,
                                   force=True)

        # Generate and write the text of the map.
        self.map_3D_text(map_file)

        # Close the file.
        map_file.close()
Example #21
0
def nessy_input(file='save.NESSY', dir=None, spin_id=None, force=False):
    """Create the NESSY input files.

    @keyword dir:               The optional directory to place the files into.  If None, then the files will be placed into the current directory.
    @type dir:                  str or None
    @keyword binary:            The name of the CPMGFit binary file.  This can include the path to the binary.
    @type binary:               str
    @keyword spin_id:           The spin ID string to restrict the file creation to.
    @type spin_id:              str
    @keyword force:             A flag which if True will cause all pre-existing files to be overwritten.
    @type force:                bool
    """

    # Test if the current pipe exists.
    check_pipe()

    # Test if sequence data is loaded.
    if not exists_mol_res_spin_data():
        raise RelaxNoSequenceError

    # Test if the experiment type has been set.
    if not hasattr(cdp, 'exp_type'):
        raise RelaxError(
            "The relaxation dispersion experiment type has not been specified."
        )

    # Directory creation.
    if dir != None:
        mkdir_nofail(dir, verbosity=0)

    # The save file.
    save_file = open_write_file(file, dir, force)

    # Create the NESSY data object.
    data = Nessy_data(spin_id=spin_id)

    # Create the NESSY file.
    write_program_setup(file=save_file, dir=dir, data=data)

    # Loop over the experiments.
    for ei in range(data.num_exp):
        write_sequence(file=save_file, data=data, ei=ei)
        write_cpmg_datasets(file=save_file, data=data, ei=ei)
        write_spinlock_datasets(file=save_file, data=data, ei=ei)
        write_experiment_setup(file=save_file, data=data, ei=ei)
Example #22
0
def write(file="results", dir=None, force=False, compress_type=1, verbosity=1):
    """Create the results file."""

    # Test if the current data pipe exists.
    check_pipe()

    # The special data pipe name directory.
    if dir == 'pipe_name':
        dir = pipes.cdp_name()

    # Open the file for writing.
    results_file = open_write_file(file_name=file, dir=dir, force=force, compress_type=compress_type, verbosity=verbosity)

    # Write the results.
    ds.to_xml(results_file, pipes=pipes.cdp_name())

    # Close the results file.
    results_file.close()
Example #23
0
def write(file="results", dir=None, force=False, compress_type=1, verbosity=1):
    """Create the results file."""

    # Test if the current data pipe exists.
    check_pipe()

    # The special data pipe name directory.
    if dir == 'pipe_name':
        dir = pipes.cdp_name()

    # Open the file for writing.
    results_file = open_write_file(file_name=file, dir=dir, force=force, compress_type=compress_type, verbosity=verbosity)

    # Write the results.
    ds.to_xml(results_file, pipes=pipes.cdp_name())

    # Close the results file.
    results_file.close()
Example #24
0
def write(file=None, dir=None, force=False):
    """Write the J coupling data to file.

    @keyword file:      The file name or object to write to.
    @type file:         str or file object
    @keyword dir:       The name of the directory to place the file into (defaults to the current directory).
    @type dir:          str
    @keyword force:     A flag which if True will cause any pre-existing file to be overwritten.
    @type force:        bool
    """

    # Check the pipe setup.
    check_pipe_setup(sequence=True, j=True)

    # Open the file for writing.
    file = open_write_file(file, dir, force)

    # Loop over the interatomic data containers and collect the data.
    data = []
    for interatom in interatomic_loop():
        # Skip deselected containers.
        if not interatom.select:
            continue

        # Skip containers with no J coupling.
        if not hasattr(interatom, 'j_coupling'):
            continue

        # Append the spin data.
        data.append([])
        data[-1].append(interatom.spin_id1)
        data[-1].append(interatom.spin_id2)

        # The value.
        data[-1].append(repr(interatom.j_coupling))

        # The error.
        if hasattr(interatom, 'j_coupling_err'):
            data[-1].append(repr(interatom.j_coupling_err))
        else:
            data[-1].append(repr(None))

    # Write out.
    write_data(out=file, headings=["Spin_ID1", "Spin_ID2", "J coupling", "J coupling"], data=data)
Example #25
0
def write(file=None, dir=None, force=False):
    """Write the J coupling data to file.

    @keyword file:      The file name or object to write to.
    @type file:         str or file object
    @keyword dir:       The name of the directory to place the file into (defaults to the current directory).
    @type dir:          str
    @keyword force:     A flag which if True will cause any pre-existing file to be overwritten.
    @type force:        bool
    """

    # Check the pipe setup.
    check_pipe_setup(sequence=True, j=True)

    # Open the file for writing.
    file = open_write_file(file, dir, force)

    # Loop over the interatomic data containers and collect the data.
    data = []
    for interatom in interatomic_loop():
        # Skip deselected containers.
        if not interatom.select:
            continue

        # Skip containers with no J coupling.
        if not hasattr(interatom, 'j_coupling'):
            continue

        # Append the spin data.
        data.append([])
        data[-1].append(interatom.spin_id1)
        data[-1].append(interatom.spin_id2)

        # The value.
        data[-1].append(repr(interatom.j_coupling))

        # The error.
        if hasattr(interatom, 'j_coupling_err'):
            data[-1].append(repr(interatom.j_coupling_err))
        else:
            data[-1].append(repr(None))

    # Write out.
    write_data(out=file, headings=["Spin_ID1", "Spin_ID2", "J coupling", "J coupling"], data=data)
Example #26
0
def nessy_input(file='save.NESSY', dir=None, spin_id=None, force=False):
    """Create the NESSY input files.

    @keyword dir:               The optional directory to place the files into.  If None, then the files will be placed into the current directory.
    @type dir:                  str or None
    @keyword binary:            The name of the CPMGFit binary file.  This can include the path to the binary.
    @type binary:               str
    @keyword spin_id:           The spin ID string to restrict the file creation to.
    @type spin_id:              str
    @keyword force:             A flag which if True will cause all pre-existing files to be overwritten.
    @type force:                bool
    """

    # Test if the current pipe exists.
    pipes.test()

    # Test if sequence data is loaded.
    if not exists_mol_res_spin_data():
        raise RelaxNoSequenceError

    # Test if the experiment type has been set.
    if not hasattr(cdp, 'exp_type'):
        raise RelaxError("The relaxation dispersion experiment type has not been specified.")

    # Directory creation.
    if dir != None:
        mkdir_nofail(dir, verbosity=0)

    # The save file.
    save_file = open_write_file(file, dir, force)

    # Create the NESSY data object.
    data = Nessy_data(spin_id=spin_id)

    # Create the NESSY file.
    write_program_setup(file=save_file, dir=dir, data=data)

    # Loop over the experiments.
    for ei in range(data.num_exp):
        write_sequence(file=save_file, data=data, ei=ei)
        write_cpmg_datasets(file=save_file, data=data, ei=ei)
        write_spinlock_datasets(file=save_file, data=data, ei=ei)
        write_experiment_setup(file=save_file, data=data, ei=ei)
Example #27
0
def write(param=None, file=None, dir=None, scaling=1.0, return_value=None, return_data_desc=None, comment=None, bc=False, force=False):
    """Write data to a file.

    @keyword param:             The name of the parameter to write to file.
    @type param:                str
    @keyword file:              The file to write the data to.
    @type file:                 str
    @keyword dir:               The name of the directory to place the file into (defaults to the current directory).
    @type dir:                  str
    @keyword scaling:           The value to scale the parameter by.
    @type scaling:              float
    @keyword return_value:      An optional function which if supplied will override the default value returning function.
    @type return_value:         None or func
    @keyword return_data_desc:  An optional function which if supplied will override the default parameter description returning function.
    @type return_data_desc:     None or func
    @keyword comment:           Text which will be added to the start of the file as comments.  All lines will be prefixed by '# '.
    @type comment:              str
    @keyword bc:                A flag which if True will cause the back calculated values to be written.
    @type bc:                   bool
    @keyword force:             A flag which if True will cause any pre-existing file to be overwritten.
    @type force:                bool
    """

    # Test if the current pipe exists.
    check_pipe()

    # Test if the sequence data is loaded.
    if not exists_mol_res_spin_data():
        raise RelaxNoSequenceError

    # Open the file for writing.
    file_path = get_file_path(file, dir)
    file = open_write_file(file, dir, force)

    # Write the data.
    write_data(param=param, file=file, scaling=scaling, return_value=return_value, return_data_desc=return_data_desc, comment=comment, bc=bc)

    # Close the file.
    file.close()

    # Add the file to the results file list.
    add_result_file(type='text', label='Text', file=file_path)
Example #28
0
def correlation_matrix(matrix=None, labels=None, file=None, dir=None, force=False):
    """Gnuplot plotting function for representing correlation matrices.

    @keyword matrix:    The correlation matrix.  This must be a square matrix.
    @type matrix:       numpy rank-2 array.
    @keyword labels:    The labels for each element of the matrix.  The same label is assumed for each [i, i] pair in the matrix.
    @type labels:       list of str
    @keyword file:      The name of the file to create.
    @type file:         str
    @keyword dir:       The directory where the PDB file will be placed.  If set to None, then the file will be placed in the current directory.
    @type dir:          str or None
    """

    # Open the text file for writing.
    output = open_write_file(file, dir=dir, force=force)

    # The dimensions.
    n = len(matrix)

    # The header line.
    output.write('#')
    for i in range(n):
        if i == 0:
            output.write(" %18s" % labels[i])
        else:
            output.write(" %20s" % labels[i])
    output.write('\n')

    # Output the matrix.
    for i in range(n):
        for j in range(n):
            # Output the matrix.
            if j == 0:
                output.write("%20.15f" % matrix[i, j])
            else:
                output.write(" %20.15f" % matrix[i, j])

        # End of the current line.
        output.write('\n')

    # Close the file.
    output.close()
Example #29
0
def write(param=None, file=None, dir=None, scaling=1.0, return_value=None, return_data_desc=None, comment=None, bc=False, force=False):
    """Write data to a file.

    @keyword param:             The name of the parameter to write to file.
    @type param:                str
    @keyword file:              The file to write the data to.
    @type file:                 str
    @keyword dir:               The name of the directory to place the file into (defaults to the current directory).
    @type dir:                  str
    @keyword scaling:           The value to scale the parameter by.
    @type scaling:              float
    @keyword return_value:      An optional function which if supplied will override the default value returning function.
    @type return_value:         None or func
    @keyword return_data_desc:  An optional function which if supplied will override the default parameter description returning function.
    @type return_data_desc:     None or func
    @keyword comment:           Text which will be added to the start of the file as comments.  All lines will be prefixed by '# '.
    @type comment:              str
    @keyword bc:                A flag which if True will cause the back calculated values to be written.
    @type bc:                   bool
    @keyword force:             A flag which if True will cause any pre-existing file to be overwritten.
    @type force:                bool
    """

    # Test if the current pipe exists.
    check_pipe()

    # Test if the sequence data is loaded.
    if not exists_mol_res_spin_data():
        raise RelaxNoSequenceError

    # Open the file for writing.
    file_path = get_file_path(file, dir)
    file = open_write_file(file, dir, force)

    # Write the data.
    write_data(param=param, file=file, scaling=scaling, return_value=return_value, return_data_desc=return_data_desc, comment=comment, bc=bc)

    # Close the file.
    file.close()

    # Add the file to the results file list.
    add_result_file(type='text', label='Text', file=file_path)
Example #30
0
def create_grace2images(dir=None):
    """Create the grace2images.py executable script.

    @keyword dir:   The directory to place the script into.
    @type dir:      str
    """

    # Expand any ~ characters.
    dir = expanduser(dir)

    # Open the file.
    print("\nCreating the Python \"grace to PNG/EPS/SVG...\" conversion script.")
    file_name = "grace2images.py"
    file_path = get_file_path(file_name=file_name, dir=dir)
    file = open_write_file(file_name=file_name, dir=dir, force=True)

    # Write the Python "grace to PNG/EPS/SVG..." conversion script.
    script_grace2images(file=file)

    # Close the batch script, then make it executable (expanding any ~ characters).
    file.close()
    chmod(file_path, S_IRWXU|S_IRGRP|S_IROTH)
Example #31
0
def write_pdb(file=None, dir=None, model_num=None, compress_type=0, force=False):
    """The PDB writing function.

    @keyword file:          The name of the PDB file to write.  This can also be a file instance.
    @type file:             str or file instance
    @keyword dir:           The directory where the PDB file will be placed.  If set to None, then the file will be placed in the current directory.
    @type dir:              str or None
    @keyword model_num:     The model to place into the PDB file.  If not supplied, then all models will be placed into the file.
    @type model_num:        None or int
    @keyword compress_type: The compression type.  The integer values correspond to the compression type: 0, no compression; 1, Bzip2 compression; 2, Gzip compression.
    @type compress_type:    int
    @keyword force:         The force flag which if True will cause the file to be overwritten.
    @type force:            bool
    """

    # Test if the current data pipe exists.
    pipes.test()

    # Check if the structural object exists.
    if not hasattr(cdp, 'structure'):
        raise RelaxError("No structural data is present in the current data pipe.")

    # Path handling.
    if isinstance(file, str):
        # The file path.
        file = get_file_path(file, dir)

        # Add '.pdb' to the end of the file path if it isn't there yet.
        if not search(".pdb$", file):
            file = file + '.pdb'

    # Open the file for writing.
    file = open_write_file(file, compress_type=compress_type, force=force)

    # Write the structures.
    cdp.structure.write_pdb(file, model_num=model_num)
Example #32
0
def write_r2eff_files(input_dir=None, base_dir=None, force=False):
    """Create the CATIA R2eff input files.

    @keyword input_dir: The special directory for the R2eff input files.
    @type input_dir:    str
    @keyword base_dir:  The base directory to place the files into.
    @type base_dir:     str
    @keyword force:     A flag which if True will cause a pre-existing file to be overwritten.
    @type force:        bool
    """

    # Create the directory for the R2eff files for each field and spin.
    dir = base_dir + sep + input_dir
    mkdir_nofail(dir, verbosity=0)

    # Determine the isotope information.
    isotope = None
    for spin in spin_loop(skip_desel=True):
        if hasattr(spin, 'isotope'):
            if isotope == None:
                isotope = spin.isotope
            elif spin.isotope != isotope:
                raise RelaxError("CATIA only supports one spin type.")
    if isotope == None:
        raise RelaxError("The spin isotopes have not been specified.")

    # Isotope translation.
    if isotope == '1H':
        isotope = 'H1'
    elif isotope == '13C':
        isotope = 'C13'
    elif isotope == '15N':
        isotope = 'N15'

    # Loop over the frequencies.
    for frq, mi in loop_frq(return_indices=True):
        # The frequency string in MHz.
        frq_string = int(frq*1e-6)

        # The set files.
        file_name = "data_set_%i.inp" % frq_string
        set_file = open_write_file(file_name=file_name, dir=base_dir, force=force)
        id = frq_string
        set_file.write("ID=%s\n" % id)
        set_file.write("Sfrq = %s\n" % frq_string)
        set_file.write("Temperature = %s\n" % 0.0)
        set_file.write("Nucleus = %s\n" % isotope)
        set_file.write("Couplednucleus = %s\n" % 'H1')
        set_file.write("Time_equil = %s\n" % 0.0)
        set_file.write("Pwx_cp = %s\n" % 0.0)
        set_file.write("Taub = %s\n" % 0.0)
        set_file.write("Time_T2 = %s\n"% cdp.relax_time_list[0])
        set_file.write("Xcar = %s\n" % 0.0)
        set_file.write("Seqfil = %s\n" % 'CW_CPMG')
        set_file.write("Minerror = %s\n" % "(2.%;0.5/s)")
        set_file.write("Basis = (%s)\n" % "Iph_7")
        set_file.write("Format = (%i;%i;%i)\n" % (0, 1, 2))
        set_file.write("DataDirectory = %s\n" % (dir+sep))
        set_file.write("Data = (\n")

        # Loop over the spins.
        for spin, mol_name, res_num, res_name, spin_id in spin_loop(full_info=True, return_id=True, skip_desel=True):
            # The file.
            file_name = "spin%s_%i.cpmg" % (spin_id.replace('#', '_').replace(':', '_').replace('@', '_'), frq_string)
            spin_file = open_write_file(file_name=file_name, dir=dir, force=force)

            # Write the header.
            spin_file.write("# %18s %20s %20s\n" % ("nu_cpmg(Hz)", "R2(1/s)", "Esd(R2)"))

            # Loop over the dispersion points.
            for offset, point, oi, di in loop_offset_point(exp_type=EXP_TYPE_CPMG_SQ, frq=frq, return_indices=True):
                # The key.
                key = return_param_key_from_data(exp_type=EXP_TYPE_CPMG_SQ, frq=frq, offset=offset, point=point)

                # No data.
                if key not in spin.r2eff:
                    continue

                # Write out the data.
                spin_file.write("%20.15f %20.15f %20.15f\n" % (point, spin.r2eff[key], spin.r2eff_err[key]))

            # Close the file.
            spin_file.close()

            # Add the file name to the set.
            catia_spin_id = "%i%s" % (res_num, spin.name)
            set_file.write(" [%s;%s];\n" % (catia_spin_id, file_name))

        # Terminate the set file.
        set_file.write(")\n")
        set_file.close()
Example #33
0
def create_vector_dist(length=None,
                       symmetry=True,
                       file=None,
                       dir=None,
                       force=False):
    """Create a PDB representation of the vector distribution.

    @keyword length:    The length to set the vectors to in the PDB file.
    @type length:       float
    @keyword symmetry:  The symmetry flag which if set will create a second PDB chain 'B' which is the same as chain 'A' but with the vectors reversed.
    @type symmetry:     bool
    @keyword file:      The name of the PDB file to create.
    @type file:         str
    @keyword dir:       The name of the directory to place the PDB file into.
    @type dir:          str
    @keyword force:     Flag which if set will overwrite any pre-existing file.
    @type force:        bool
    """

    # Test if the current pipe exists.
    check_pipe()

    # Test if a structure has been loaded.
    if not hasattr(cdp, 'structure') or not cdp.structure.num_models() > 0:
        raise RelaxNoPdbError

    # Test if sequence data is loaded.
    if not exists_mol_res_spin_data():
        raise RelaxNoSequenceError

    # Test if unit vectors exist.
    vectors = False
    for interatom in interatomic_loop():
        if hasattr(interatom, 'vector'):
            vectors = True
            break
    if not vectors:
        raise RelaxNoVectorsError

    # Initialise.
    #############

    # Create the structural object.
    structure = Internal()

    # Add a structure.
    structure.add_molecule(name='vector_dist')

    # Alias the single molecule from the single model.
    mol = structure.structural_data[0].mol[0]

    # Initialise the residue and atom numbers.
    res_num = 1
    atom_num = 1

    # Centre of mass.
    #################

    # Calculate the centre of mass.
    R = pipe_centre_of_mass()

    # Increment the residue number.
    res_num = res_num + 1

    # The vectors.
    ##############

    # Loop over the interatomic data containers.
    for interatom in interatomic_loop():
        # Get the spins.
        spin1 = return_spin(spin_hash=interatom._spin_hash1)
        spin2 = return_spin(spin_hash=interatom._spin_hash2)

        # Skip deselected spin systems.
        if not spin1.select or not spin2.select:
            continue

        # Skip containers missing vectors.
        if not hasattr(interatom, 'vector'):
            continue

        # Scale the vector.
        vector = interatom.vector * length * 1e10

        # Add the first spin as the central atom.
        mol.atom_add(pdb_record='ATOM',
                     atom_num=atom_num,
                     atom_name=spin1.name,
                     res_name=spin1._res_name,
                     chain_id='A',
                     res_num=spin1._res_num,
                     pos=R,
                     segment_id=None,
                     element=spin1.element)

        # Add the second spin as the end atom.
        mol.atom_add(pdb_record='ATOM',
                     atom_num=atom_num + 1,
                     atom_name=spin2.name,
                     res_name=spin2._res_name,
                     chain_id='A',
                     res_num=spin2._res_num,
                     pos=R + vector,
                     segment_id=None,
                     element=spin2.element)

        # Connect the two atoms.
        mol.atom_connect(index1=atom_num - 1, index2=atom_num)

        # Increment the atom number.
        atom_num = atom_num + 2

    # Symmetry chain.
    if symmetry:
        # Loop over the interatomic data containers.
        for interatom in interatomic_loop():
            # Get the spins.
            spin1 = return_spin(spin_hash=interatom._spin_hash1)
            spin2 = return_spin(spin_hash=interatom._spin_hash2)

            # Skip deselected spin systems.
            if not spin1.select or not spin2.select:
                continue

            # Skip containers missing vectors.
            if not hasattr(interatom, 'vector'):
                continue

            # Scale the vector.
            vector = interatom.vector * length * 1e10

            # Add the first spin as the central atom.
            mol.atom_add(pdb_record='ATOM',
                         atom_num=atom_num,
                         atom_name=spin1.name,
                         res_name=spin1._res_name,
                         chain_id='B',
                         res_num=spin1._res_num,
                         pos=R,
                         segment_id=None,
                         element=spin1.element)

            # Add the second spin as the end atom.
            mol.atom_add(pdb_record='ATOM',
                         atom_num=atom_num + 1,
                         atom_name=spin2.name,
                         res_name=spin2._res_name,
                         chain_id='B',
                         res_num=spin2._res_num,
                         pos=R - vector,
                         segment_id=None,
                         element=spin2.element)

            # Connect the two atoms.
            mol.atom_connect(index1=atom_num - 1, index2=atom_num)

            # Increment the atom number.
            atom_num = atom_num + 2

    # Create the PDB file.
    ######################

    # Print out.
    print("\nGenerating the PDB file.")

    # Open the PDB file for writing.
    tensor_pdb_file = open_write_file(file, dir, force=force)

    # Write the data.
    structure.write_pdb(tensor_pdb_file)

    # Close the file.
    tensor_pdb_file.close()

    # Add the file to the results file list.
    if not hasattr(cdp, 'result_files'):
        cdp.result_files = []
    if dir == None:
        dir = getcwd()
    cdp.result_files.append([
        'vector_dist_pdb', 'Vector distribution PDB',
        get_file_path(file, dir)
    ])
    status.observers.result_file.notify()
Example #34
0
def create_rotor_pdb(file=None,
                     dir=None,
                     rotor_angle=None,
                     axis=None,
                     axis_pt=True,
                     centre=None,
                     span=2e-9,
                     blade_length=5e-10,
                     force=False,
                     staggered=False):
    """Create a PDB representation of a rotor motional model.

    @keyword file:          The name of the PDB file to create.
    @type file:             str
    @keyword dir:           The name of the directory to place the PDB file into.
    @type dir:              str
    @keyword rotor_angle:   The angle of the rotor motion in degrees.
    @type rotor_angle:      float
    @keyword axis:          The vector defining the rotor axis.
    @type axis:             numpy rank-1, 3D array
    @keyword axis_pt:       A point lying anywhere on the rotor axis.  This is used to define the position of the axis in 3D space.
    @type axis_pt:          numpy rank-1, 3D array
    @keyword centre:        The central point of the representation.  If this point is not on the rotor axis, then the closest point on the axis will be used for the centre.
    @type centre:           numpy rank-1, 3D array
    @keyword span:          The distance from the central point to the rotor blades (meters).
    @type span:             float
    @keyword blade_length:  The length of the representative rotor blades.
    @type blade_length:     float
    @keyword force:         A flag which if set will overwrite any pre-existing file.
    @type force:            bool
    @keyword staggered:     A flag which if True will cause the rotor blades to be staggered.  This is used to avoid blade overlap.
    @type staggered:        bool
    """

    # Test if the current pipe exists.
    check_pipe()

    # Convert the angle to radians.
    rotor_angle = rotor_angle / 360.0 * 2.0 * pi

    # Create the structural object.
    structure = Internal()

    # Generate the rotor object.
    rotor(structure=structure,
          rotor_angle=rotor_angle,
          axis=axis,
          axis_pt=axis_pt,
          centre=centre,
          span=span,
          blade_length=blade_length,
          staggered=staggered)

    # Print out.
    print("\nGenerating the PDB file.")

    # Open the PDB file for writing.
    tensor_pdb_file = open_write_file(file, dir, force=force)

    # Write the data.
    structure.write_pdb(tensor_pdb_file)

    # Close the file.
    tensor_pdb_file.close()

    # Add the file to the results file list.
    if not hasattr(cdp, 'result_files'):
        cdp.result_files = []
    if dir == None:
        dir = getcwd()
    cdp.result_files.append(
        ['rotor_pdb', 'Rotor PDB',
         get_file_path(file, dir)])
    status.observers.result_file.notify()
Example #35
0
def sherekhan_input(spin_id=None, force=False, dir='ShereKhan'):
    """Create the ShereKhan input files.

    @keyword spin_id:           The spin ID string to restrict the file creation to.
    @type spin_id:              str
    @keyword force:             A flag which if True will cause all pre-existing files to be overwritten.
    @type force:                bool
    @keyword dir:               The optional directory to place the files into.  If None, then the files will be placed into the current directory.
    @type dir:                  str or None
    """

    # Test if the current pipe exists.
    check_pipe()

    # Test if sequence data is loaded.
    if not exists_mol_res_spin_data():
        raise RelaxNoSequenceError

    # Test if the experiment type has been set.
    if not hasattr(cdp, 'exp_type'):
        raise RelaxError("The relaxation dispersion experiment type has not been specified.")

    # Test if the model has been set.
    if not hasattr(cdp, 'model_type'):
        raise RelaxError("The relaxation dispersion model has not been specified.")

    # Directory creation.
    if dir != None:
        mkdir_nofail(dir, verbosity=0)

    # Loop over the spin blocks.
    cluster_index = 0
    for spin_ids in loop_cluster():
        # The spin containers.
        spins = spin_ids_to_containers(spin_ids)

        # Loop over the magnetic fields.
        for exp_type, frq, ei, mi in loop_exp_frq(return_indices=True):
            # Loop over the time, and count it.
            time_i = 0
            for time, ti in loop_time(exp_type=exp_type, frq=frq, return_indices=True):
                time_i += 1

            # Check that not more than one time point is returned.
            if time_i > 1:
                raise RelaxError("Number of returned time poins is %i. Only 1 time point is expected."%time_i)

            # The ShereKhan input file for the spin cluster.
            file_name = 'sherekhan_frq%s.in' % (mi+1)
            if dir != None:
                dir_name = dir + sep + 'cluster%s' % (cluster_index+1)
            else:
                dir_name = 'cluster%s' % (cluster_index+1)
            file = open_write_file(file_name=file_name, dir=dir_name, force=force)

            # The B0 field for the nuclei of interest in MHz (must be positive to be accepted by the server).
            file.write("%.10f\n" % abs(frq / periodic_table.gyromagnetic_ratio('1H') * periodic_table.gyromagnetic_ratio('15N') / 1e6))

            # The constant relaxation time for the CPMG experiment in seconds.
            file.write("%s\n" % (time))

            # The comment line.
            file.write("# %-18s %-20s %-20s\n" % ("nu_cpmg (Hz)", "R2eff (rad/s)", "Error"))

            # Loop over the spins of the cluster.
            for i in range(len(spins)):
                # Get the residue container.
                res = return_residue(spin_ids[i])

                # Name the residue if needed.
                res_name = res.name
                if res_name == None:
                    res_name = 'X'

                # Initialise the lines to output (to be able to catch missing data).
                lines = []

                # The residue ID line.
                lines.append("# %s%s\n" % (res_name, res.num))

                # Loop over the dispersion points.
                for offset, point in loop_offset_point(exp_type=exp_type, frq=frq, skip_ref=True):
                    # The parameter key.
                    param_key = return_param_key_from_data(exp_type=exp_type, frq=frq, offset=offset, point=point)

                    # No data.
                    if param_key not in spins[i].r2eff:
                        continue

                    # Store the data.
                    lines.append("%20.15g %20.13g %20.13g\n" % (point, spins[i].r2eff[param_key], spins[i].r2eff_err[param_key]))

                # No data.
                if len(lines) == 1:
                    continue

                # Write out the data.
                for line in lines:
                    file.write(line)

            # Close the file.
            file.close()

        # Increment the cluster index.
        cluster_index += 1
Example #36
0
def correlation_matrix(matrix=None,
                       labels=None,
                       file=None,
                       dir=None,
                       force=False):
    """Gnuplot plotting function for representing correlation matrices.

    @keyword matrix:    The correlation matrix.  This must be a square matrix.
    @type matrix:       numpy rank-2 array.
    @keyword labels:    The labels for each element of the matrix.  The same label is assumed for each [i, i] pair in the matrix.
    @type labels:       list of str
    @keyword file:      The name of the file to create.
    @type file:         str
    @keyword dir:       The directory where the PDB file will be placed.  If set to None, then the file will be placed in the current directory.
    @type dir:          str or None
    """

    # The dimensions.
    n = len(matrix)

    # Generate the text file for loading into gnuplot.
    text.correlation_matrix(matrix=matrix,
                            labels=labels,
                            file=file,
                            dir=dir,
                            force=force)

    # The script file name with the extension swapped.
    file_name = swap_extension(file=file, ext='gnu')

    # Open the script file for writing.
    output = open_write_file(file_name, dir=dir, force=force)

    # Gnuplot script setup.
    output.write("#!/usr/bin/env gnuplot\n\n")

    # Set up the terminal type and make the plot square.
    output.write("# Set up the terminal type and make the plot square.\n")
    output.write(
        "set terminal postscript eps size 10,10 enhanced color font 'Helvetica,20' linewidth 0.1\n"
    )
    output.write("set size square\n")

    # The colour map.
    output.write("\n# Blue-red colour map.\n")
    output.write("set palette model RGB\n")
    output.write("set palette defined\n")

    # The labels.
    if labels != None:
        output.write("\n# Labels.\n")
        for axis in ['x', 'y']:
            output.write("set %stics out " % axis)
            if axis == 'x':
                output.write("rotate ")
            output.write("font \",8\" (")
            for i in range(n):
                if i != 0:
                    output.write(", ")
                output.write("\"%s\" %s" % (format_enhanced(labels[i]), i))
            output.write(")\n")

    # Output to EPS.
    output.write("\n# Output to EPS.\n")
    output.write("set output \"%s.eps\"\n" % file_root(file))

    # Load and show the text data.
    output.write("\n# Load and show the text data\n")
    output.write("plot \"%s\" matrix with image\n" % file)

    # Close the file.
    output.close()

    # Make the script executable.
    chmod(get_file_path(file_name=file_name, dir=dir),
          S_IRWXU | S_IRGRP | S_IROTH)
Example #37
0
def macro_write(data_type=None,
                style="classic",
                colour_start_name=None,
                colour_start_rgb=None,
                colour_end_name=None,
                colour_end_rgb=None,
                colour_list=None,
                file=None,
                dir=None,
                force=False):
    """Create a Molmol macro.

    @keyword data_type:         The data type to map to the structure.
    @type data_type:            str
    @keyword style:             The style of the macro.
    @type style:                str
    @keyword colour_start_name: The name of the starting colour of the linear gradient.
    @type colour_start_name:    str
    @keyword colour_start_rgb:  The RGB array starting colour of the linear gradient.
    @type colour_start_rgb:     RBG colour array (len 3 with vals from 0 to 1)
    @keyword colour_end_name:   The name of the ending colour of the linear gradient.
    @type colour_end_name:      str
    @keyword colour_end_rgb:    The RGB array ending colour of the linear gradient.
    @type colour_end_rgb:       RBG colour array (len 3 with vals from 0 to 1)
    @keyword colour_list:       The colour list to search for the colour names.  Can be either 'molmol' or 'x11'.
    @type colour_list:          str or None
    @keyword file:              The name of the macro file to create.
    @type file:                 str
    @keyword dir:               The name of the directory to place the macro file into.
    @type dir:                  str
    @keyword force:             Flag which if set to True will cause any pre-existing file to be overwritten.
    @type force:                bool
    """

    # Test if the current data pipe exists.
    check_pipe()

    # Test if sequence data exists.
    if not exists_mol_res_spin_data():
        raise RelaxNoSequenceError

    # Check the arguments.
    if colour_start_name != None and colour_start_rgb != None:
        raise RelaxError(
            "The starting colour name and RGB colour array cannot both be supplied."
        )
    if colour_end_name != None and colour_end_rgb != None:
        raise RelaxError(
            "The ending colour name and RGB colour array cannot both be supplied."
        )

    # Merge the colour args.
    if colour_start_name != None:
        colour_start = colour_start_name
    else:
        colour_start = colour_start_rgb
    if colour_end_name != None:
        colour_end = colour_end_name
    else:
        colour_end = colour_end_rgb

    # Create the macro.
    commands = create_macro(data_type=data_type,
                            style=style,
                            colour_start=colour_start,
                            colour_end=colour_end,
                            colour_list=colour_list)

    # File name.
    if file == None:
        file = data_type + '.mac'

    # Open the file for writing.
    file_path = get_file_path(file, dir)
    file = open_write_file(file, dir, force)

    # Loop over the commands and write them.
    for command in commands:
        file.write(command + "\n")

    # Close the file.
    file.close()

    # Add the file to the results file list.
    add_result_file(type='molmol', label='Molmol', file=file_path)
Example #38
0
def macro_write(data_type=None, style="classic", colour_start_name=None, colour_start_rgb=None, colour_end_name=None, colour_end_rgb=None, colour_list=None, file=None, dir=None, force=False):
    """Create a PyMOL macro file.

    @keyword data_type:         The data type to map to the structure.
    @type data_type:            str
    @keyword style:             The style of the macro.
    @type style:                str
    @keyword colour_start_name: The name of the starting colour of the linear gradient.
    @type colour_start_name:    str
    @keyword colour_start_rgb:  The RGB array starting colour of the linear gradient.
    @type colour_start_rgb:     RBG colour array (len 3 with vals from 0 to 1)
    @keyword colour_end_name:   The name of the ending colour of the linear gradient.
    @type colour_end_name:      str
    @keyword colour_end_rgb:    The RGB array ending colour of the linear gradient.
    @type colour_end_rgb:       RBG colour array (len 3 with vals from 0 to 1)
    @keyword colour_list:       The colour list to search for the colour names.  Can be either 'molmol' or 'x11'.
    @type colour_list:          str or None
    @keyword file:              The name of the macro file to create.
    @type file:                 str
    @keyword dir:               The name of the directory to place the macro file into.
    @type dir:                  str
    @keyword force:             Flag which if set to True will cause any pre-existing file to be overwritten.
    @type force:                bool
    """

    # Test if the current data pipe exists.
    pipes.test()

    # Test if sequence data exists.
    if not exists_mol_res_spin_data():
        raise RelaxNoSequenceError

    # Check the arguments.
    if colour_start_name != None and colour_start_rgb != None:
        raise RelaxError("The starting colour name and RGB colour array cannot both be supplied.")
    if colour_end_name != None and colour_end_rgb != None:
        raise RelaxError("The ending colour name and RGB colour array cannot both be supplied.")

    # Merge the colour args.
    if colour_start_name != None:
        colour_start = colour_start_name
    else:
        colour_start = colour_start_rgb
    if colour_end_name != None:
        colour_end = colour_end_name
    else:
        colour_end = colour_end_rgb

    # Create the macro.
    commands = create_macro(data_type=data_type, style=style, colour_start=colour_start, colour_end=colour_end, colour_list=colour_list)

    # File name.
    if file == None:
        file = data_type + '.pml'

    # Open the file for writing.
    file_path = get_file_path(file, dir)
    file = open_write_file(file, dir, force)

    # Loop over the commands and write them.
    for command in commands:
        file.write(command + "\n")

    # Close the file.
    file.close()

    # Add the file to the results file list.
    add_result_file(type='pymol', label='PyMOL', file=file_path)
Example #39
0
def create_spin_input(function=None, spin=None, spin_id=None, dir=None):
    """Generate the CPMGFit file for the given spin.

    @keyword function:  The CPMGFit model or function name.
    @type function:     str
    @keyword spin:      The spin container to generate the input file for.
    @type spin:         SpinContainer instance
    @keyword spin_id:   The spin ID string corresponding to the spin container.
    @type spin_id:      str
    @keyword dir:       The directory to place the file into.
    @type dir:          str or None
    @return:            The name of the file created.
    @rtype:             str
    """

    # The output file.
    file_name = spin_file_name(spin_id=spin_id)
    file = open_write_file(file_name=file_name, dir=dir, force=True)

    # The title.
    file.write("title %s\n" % spin_id)

    # The proton frequencies.
    frq = get_frequencies(units='T')

    # The frequency info.
    file.write("fields %s" % len(frq))
    for i in range(len(frq)):
        file.write(" %.10f" % frq[i])
    file.write("\n")

    # The function and parameters.
    if function == 'CPMG':
        # Function.
        file.write("function CPMG\n")

        # Parameters.
        file.write("R2 1 10 20\n")
        file.write("Rex 0 100.0 100\n")
        file.write("Tau 0 10.0 100\n")

    # The function and parameters.
    elif function == 'Full_CPMG':
        # Function.
        file.write("function Full_CPMG\n")

        # Parameters.
        file.write("R2 1 10 20\n")
        file.write("papb 0.01 0.49 20\n")
        file.write("dw 0 10.0 100\n")
        file.write("kex 0.1 1.0 100\n")

    # The function and parameters.
    elif function == "Ishima":
        # Function.
        file.write("function Ishima\n")

        # Parameters.
        file.write("R2 1 10 20\n")
        file.write("Rex 0 100.0 50\n")
        file.write("PaDw 2 10.0 50\n")
        file.write("Tau 0.1 10.0 50\n")

    # The function and parameters.
    if function == '3-site_CPMG':
        # Function.
        file.write("function 3-site_CPMG\n")

        # Parameters.
        file.write("R2 1 10 20\n")
        file.write("Rex1 0 100.0 20\n")
        file.write("Tau1 0 10.0 20\n")
        file.write("Rex2 0 100.0 20\n")
        file.write("Tau2 0 10.0 20\n")

    # The Grace setup.
    file.write("xmgr\n")
    file.write("@ xaxis label \"1/tcp (1/ms)\"\n")
    file.write("@ yaxis label \"R2(tcp) (rad/s)\"\n")
    file.write("@ xaxis ticklabel format decimal\n")
    file.write("@ yaxis ticklabel format decimal\n")
    file.write("@ xaxis ticklabel char size 0.8\n")
    file.write("@ yaxis ticklabel char size 0.8\n")
    file.write("@ world xmin 0.0\n")

    # The data.
    file.write("data\n")
    for exp_type, frq, offset, point in loop_exp_frq_offset_point():
        # The parameter key.
        param_key = return_param_key_from_data(exp_type=exp_type, frq=frq, offset=offset, point=point)

        # No data.
        if param_key not in spin.r2eff:
            continue

        # Tesla units.
        B0 = frq * 2.0 * pi / periodic_table.gyromagnetic_ratio('1H')

        # The X value of 1/tcp (or 1/tau_CPMG) in ms.  This assumes Art's usage of the definition that nu_CPMG = 1 / (2 * tau_CPMG).
        x = 2.0 * point / 1000.0

        # Write out the data and error.
        file.write("%-20f %-20f %-20f %-20f\n" % (x, spin.r2eff[param_key], spin.r2eff_err[param_key], B0))

    # Close the file and return its name.
    file.close()
    return file_name
Example #40
0
def create_geometric_rep(format='PDB',
                         file=None,
                         dir=None,
                         compress_type=0,
                         size=30.0,
                         inc=36,
                         force=False):
    """Create a PDB file containing a geometric object representing the frame order dynamics.

    @keyword format:        The format for outputting the geometric representation.  Currently only the 'PDB' format is supported.
    @type format:           str
    @keyword file:          The name of the file of the PDB representation of the frame order dynamics to create.
    @type file:             str
    @keyword dir:           The name of the directory to place the PDB file into.
    @type dir:              str
    @keyword compress_type: The compression type.  The integer values correspond to the compression type: 0, no compression; 1, Bzip2 compression; 2, Gzip compression.
    @type compress_type:    int
    @keyword size:          The size of the geometric object in Angstroms.
    @type size:             float
    @keyword inc:           The number of increments for the filling of the cone objects.
    @type inc:              int
    @keyword force:         Flag which if set to True will cause any pre-existing file to be overwritten.
    @type force:            bool
    """

    # Printout.
    subsection(
        file=sys.stdout,
        text=
        "Creating a PDB file containing a geometric object representing the frame order dynamics."
    )

    # Checks.
    check_parameters(escalate=2)

    # Initialise.
    titles = []
    structures = []
    representation = []
    sims = []
    file_root = []

    # Symmetry for inverted representations?
    sym = True
    if cdp.model in [MODEL_ROTOR, MODEL_FREE_ROTOR, MODEL_DOUBLE_ROTOR]:
        sym = False

    # The standard representation.
    titles.append("Representation A")
    structures.append(Internal())
    if sym:
        representation.append('A')
        file_root.append("%s_A" % file)
    else:
        representation.append(None)
        file_root.append(file)
    sims.append(False)

    # The inverted representation.
    if sym:
        titles.append("Representation A")
        structures.append(Internal())
        representation.append('B')
        file_root.append("%s_B" % file)
        sims.append(False)

    # The standard MC simulation representation.
    if hasattr(cdp, 'sim_number'):
        titles.append("MC simulation representation A")
        structures.append(Internal())
        if sym:
            representation.append('A')
            file_root.append("%s_sim_A" % file)
        else:
            representation.append(None)
            file_root.append("%s_sim" % file)
        sims.append(True)

    # The inverted MC simulation representation.
    if hasattr(cdp, 'sim_number') and sym:
        titles.append("MC simulation representation B")
        structures.append(Internal())
        representation.append('B')
        file_root.append("%s_sim_B" % file)
        sims.append(True)

    # Loop over each structure and add the contents.
    for i in range(len(structures)):
        # Printout.
        subsubsection(file=sys.stdout, text="Creating the %s." % titles[i])

        # Create a model for each Monte Carlo simulation.
        if sims[i]:
            for sim_i in range(cdp.sim_number):
                structures[i].add_model(model=sim_i + 1)

        # Add the pivots.
        add_pivots(structure=structures[i], sims=sims[i])

        # Add all rotor objects.
        add_rotors(structure=structures[i],
                   representation=representation[i],
                   size=size,
                   sims=sims[i])

        # Add the axis systems.
        add_axes(structure=structures[i],
                 representation=representation[i],
                 size=size,
                 sims=sims[i])

        # Add the cone objects.
        if cdp.model not in [
                MODEL_ROTOR, MODEL_FREE_ROTOR, MODEL_DOUBLE_ROTOR
        ]:
            add_cones(structure=structures[i],
                      representation=representation[i],
                      size=size,
                      inc=inc,
                      sims=sims[i])

        # Add atoms for creating titles.
        add_titles(structure=structures[i],
                   representation=representation[i],
                   displacement=size + 10,
                   sims=sims[i])

        # Create the PDB file.
        if format == 'PDB':
            pdb_file = open_write_file(file_root[i] + '.pdb',
                                       dir,
                                       compress_type=compress_type,
                                       force=force)
            structures[i].write_pdb(pdb_file)
            pdb_file.close()
Example #41
0
def corr_plot(format=None, file=None, dir=None, force=False):
    """Generate a correlation plot of the measured vs. back-calculated RDCs.

    @keyword format:    The format for the plot file.  The following values are accepted: 'grace', a Grace plot; None, a plain text file.
    @type format:       str or None
    @keyword file:      The file name or object to write to.
    @type file:         str or file object
    @keyword dir:       The name of the directory to place the file into (defaults to the current directory).
    @type dir:          str
    @keyword force:     A flag which if True will cause any pre-existing file to be overwritten.
    @type force:        bool
    """

    # Check the pipe setup.
    check_pipe_setup(sequence=True)

    # Does RDC data exist?
    if not hasattr(cdp, 'rdc_ids') or not cdp.rdc_ids:
        warn(RelaxWarning("No RDC data exists, skipping file creation."))
        return

    # Open the file for writing.
    file = open_write_file(file, dir, force)

    # Init.
    data = []

    # The diagonal.
    data.append([[-100, -100, 0], [100, 100, 0]])

    # Loop over the RDC data.
    for align_id in cdp.rdc_ids:
        # Append a new list for this alignment.
        data.append([])

        # Errors present?
        err_flag = False
        for interatom in interatomic_loop():
            # Error present.
            if hasattr(interatom, 'rdc_err') and align_id in interatom.rdc_err.keys():
                err_flag = True
                break

        # Loop over the interatomic data.
        for interatom in interatomic_loop():
            # Skip if data is missing.
            if not hasattr(interatom, 'rdc') or not hasattr(interatom, 'rdc_bc') or not align_id in interatom.rdc.keys() or not align_id in interatom.rdc_bc.keys():
                continue

            # Append the data.
            rdc_bc = convert(interatom.rdc_bc[align_id], interatom.rdc_data_types[align_id], align_id)
            rdc = convert(interatom.rdc[align_id], interatom.rdc_data_types[align_id], align_id)
            if hasattr(interatom, 'rdc_data_types') and align_id in interatom.rdc_data_types and interatom.rdc_data_types[align_id] == 'T':
                rdc_bc -= interatom.j_coupling
                rdc -= interatom.j_coupling
            data[-1].append([rdc_bc, rdc])

            # Errors.
            if err_flag:
                if hasattr(interatom, 'rdc_err') and align_id in interatom.rdc_err.keys():
                    data[-1][-1].append(convert(interatom.rdc_err[align_id], interatom.rdc_data_types[align_id], align_id))
                else:
                    data[-1][-1].append(None)

            # Label.
            data[-1][-1].append("%s-%s" % (interatom.spin_id1, interatom.spin_id2))

    # The data size.
    size = len(data)

    # Only one data set.
    data = [data]

    # Graph type.
    if err_flag:
        graph_type = 'xydy'
    else:
        graph_type = 'xy'

    # Grace file.
    if format == 'grace':
        # The header.
        grace.write_xy_header(file=file, title="RDC correlation plot", sets=[size], set_names=[[None]+cdp.rdc_ids], linestyle=[[2]+[0]*size], data_type=['rdc_bc', 'rdc'], legend_pos=[[1, 0.5]])

        # The main data.
        grace.write_xy_data(data=data, file=file, graph_type=graph_type)
Example #42
0
def write_spin_data(file, dir=None, sep=None, spin_ids=None, mol_names=None, res_nums=None, res_names=None, spin_nums=None, spin_names=None, force=False, data=None, data_name=None, error=None, error_name=None, float_format="%20.15g"):
    """Generator function for reading the spin specific data from file.

    Description
    ===========

    This function writes a columnar formatted file where each line corresponds to a spin system.  Spin identification is either through a spin ID string or through columns containing the molecule name, residue name and number, and/or spin name and number.


    @param file:            The name of the file to write the data to (or alternatively an already opened file object).
    @type file:             str or file object
    @keyword dir:           The directory to place the file into (defaults to the current directory if None and the file argument is not a file object).
    @type dir:              str or None
    @keyword sep:           The column separator which, if None, defaults to whitespace.
    @type sep:              str or None
    @keyword spin_ids:      The list of spin ID strings.
    @type spin_ids:         None or list of str
    @keyword mol_names:     The list of molecule names.
    @type mol_names:        None or list of str
    @keyword res_nums:      The list of residue numbers.
    @type res_nums:         None or list of int
    @keyword res_names:     The list of residue names.
    @type res_names:        None or list of str
    @keyword spin_nums:     The list of spin numbers.
    @type spin_nums:        None or list of int
    @keyword spin_names:    The list of spin names.
    @type spin_names:       None or list of str
    @keyword force:         A flag which if True will cause an existing file to be overwritten.
    @type force:            bool
    @keyword data:          A list of the data to write out.  The first dimension corresponds to the spins.  A second dimension can also be given if multiple data sets across multiple columns are desired.
    @type data:             list or list of lists
    @keyword data_name:     A name corresponding to the data argument.  If the data argument is a list of lists, then this must also be a list with the same length as the second dimension of the data arg.
    @type data_name:        str or list of str
    @keyword error:         A list of the errors to write out.  The first dimension corresponds to the spins.  A second dimension can also be given if multiple data sets across multiple columns are desired.  These will be inter-dispersed between the data columns, if the data is given.  If the data arg is not None, then this must have the same dimensions as that object.
    @type error:            list or list of lists
    @keyword error_name:    A name corresponding to the error argument.  If the error argument is a list of lists, then this must also be a list with the same length at the second dimension of the error arg.
    @type error_name:       str or list of str
    @keyword float_format:  A float formatting string to use for the data and error whenever a float is found.
    @type float_format:     str
    """

    # Data argument tests.
    if data:
        # Data is a list of lists.
        if isinstance(data[0], list):
            # Data and data_name don't match.
            if not isinstance(data_name, list):
                raise RelaxError("The data_name arg '%s' must be a list as the data argument is a list of lists." % data_name)

            # Error doesn't match.
            if error and (len(data) != len(error) or len(data[0]) != len(error[0])):
                raise RelaxError("The data arg:\n%s\n\ndoes not have the same dimensions as the error arg:\n%s." % (data, error))

        # Data is a simple list.
        else:
            # Data and data_name don't match.
            if not isinstance(data_name, str):
                raise RelaxError("The data_name arg '%s' must be a string as the data argument is a simple list." % data_name)

            # Error doesn't match.
            if error and len(data) != len(error):
                raise RelaxError("The data arg:\n%s\n\ndoes not have the same dimensions as the error arg:\n%s." % (data, error))

    # Error argument tests.
    if error:
        # Error is a list of lists.
        if isinstance(error[0], list):
            # Error and error_name don't match.
            if not isinstance(error_name, list):
                raise RelaxError("The error_name arg '%s' must be a list as the error argument is a list of lists." % error_name)

        # Error is a simple list.
        else:
            # Error and error_name don't match.
            if not isinstance(error_name, str):
                raise RelaxError("The error_name arg '%s' must be a string as the error argument is a simple list." % error_name)

    # Number of spins check.
    args = [spin_ids, mol_names, res_nums, res_names, spin_nums, spin_names]
    arg_names = ['spin_ids', 'mol_names', 'res_nums', 'res_names', 'spin_nums', 'spin_names']
    N = None
    first_arg = None
    first_arg_name = None
    for i in range(len(args)):
        if isinstance(args[i], list):
            # First list match.
            if N == None:
                N = len(args[i])
                first_arg = args[i]
                first_arg_name = arg_names[i]

            # Length check.
            if len(args[i]) != N:
                raise RelaxError("The %s and %s arguments do not have the same number of spins ('%s' vs. '%s' respectively)." % (first_arg_name, arg_names[i], len(first_arg), len(args[i])))

    # Nothing?!?
    if N == None:
        raise RelaxError("No spin ID data is present.")

    # Data and error length check.
    if data and len(data) != N:
        raise RelaxError("The %s and data arguments do not have the same number of spins ('%s' vs. '%s' respectively)." % (first_arg_name, len(first_arg), len(data)))
    if error and len(error) != N:
        raise RelaxError("The %s and error arguments do not have the same number of spins ('%s' vs. '%s' respectively)." % (first_arg_name, len(first_arg), len(error)))

    # The spin arguments.
    args = [spin_ids, mol_names, res_nums, res_names, spin_nums, spin_names]
    arg_names = ['spin_id', 'mol_name', 'res_num', 'res_name', 'spin_num', 'spin_name']


    # Init.
    headings = []
    file_data = []

    # Headers - the spin ID info.
    for i in range(len(args)):
        if args[i]:
            headings.append(arg_names[i])

    # Headers - the data.
    if data:
        # List of lists.
        if isinstance(data[0], list):
            # Loop over the list.
            for i in range(len(data[0])):
                # The data.
                headings.append(data_name[i])

                # The error.
                if error:
                    headings.append(error_name[i])

        # Simple list.
        else:
            # The data.
            headings.append(data_name)

            # The error.
            if error:
                headings.append(error_name)

    # Headers - only errors.
    elif error:
        # List of lists.
        if isinstance(error[0], list):
            for i in range(len(error[0])):
                headings.append(error_name[i])

        # Simple list.
        else:
            headings.append(error_name)

    # No headings.
    if headings == []:
        headings = None

    # Spin specific data.
    for spin_index in range(N):
        # Append a new data row.
        file_data.append([])

        # The spin ID info.
        for i in range(len(args)):
            if args[i]:
                value = args[i][spin_index]
                if not isinstance(value, str):
                    value = repr(value)
                file_data[-1].append(value)

        # The data.
        if data:
            # List of lists.
            if isinstance(data[0], list):
                # Loop over the list.
                for i in range(len(data[0])):
                    # The data.
                    if is_float(data[spin_index][i]):
                        file_data[-1].append(float_format % data[spin_index][i])
                    else:
                        file_data[-1].append(repr(data[spin_index][i]))

                    # The error.
                    if error:
                        if is_float(error[spin_index][i]):
                            file_data[-1].append(float_format % error[spin_index][i])
                        else:
                            file_data[-1].append(repr(error[spin_index][i]))

            # Simple list.
            else:
                # The data.
                if is_float(data[spin_index]):
                    file_data[-1].append(float_format % data[spin_index])
                else:
                    file_data[-1].append(repr(data[spin_index]))

                # The error.
                if error:
                    if is_float(error[spin_index]):
                        file_data[-1].append(float_format % error[spin_index])
                    else:
                        file_data[-1].append(repr(error[spin_index]))

        # Only errors.
        elif error:
            # List of lists.
            if isinstance(error[0], list):
                for i in range(len(error[0])):
                    file_data[-1].append(repr(error[spin_index][i]))

            # Simple list.
            else:
                file_data[-1].append(repr(error[spin_index]))

    # No data to write, so do nothing!
    if file_data == [] or file_data == [[]]:
        return

    # Open the file for writing.
    file = open_write_file(file_name=file, dir=dir, force=force)

    # Write out the file data.
    write_data(out=file, headings=headings, data=file_data, sep=sep)
Example #43
0
def create(dir=None,
           binary=None,
           diff_search=None,
           sims=None,
           sim_type=None,
           trim=None,
           steps=None,
           heteronuc_type=None,
           atom1=None,
           atom2=None,
           spin_id=None,
           force=False,
           constraints=True):
    """Create the Modelfree4 input files.

    The following files are created:
        - dir/mfin
        - dir/mfdata
        - dir/mfpar
        - dir/mfmodel
        - dir/run.sh

    @keyword dir:               The optional directory to place the files into.  If None, then the files will be placed into a directory named after the current data pipe.
    @type dir:                  str or None
    @keyword binary:            The name of the Modelfree4 binary file.  This can include the path to the binary.
    @type binary:               str
    @keyword diff_search:       The diffusion tensor search algorithm (see the Modelfree4 manual for details).
    @type diff_search:          str
    @keyword sims:              The number of Monte Carlo simulations to perform.
    @type sims:                 int
    @keyword sim_type:          The type of simulation to perform (see the Modelfree4 manual for details).
    @type sim_type:             str
    @keyword trim:              Trimming of the Monte Carlo simulations (see the Modelfree4 manual for details).
    @type trim:                 int
    @keyword steps:             The grid search size (see the Modelfree4 manual for details).
    @type steps:                int
    @keyword heteronuc_type:    The Modelfree4 three letter code for the heteronucleus type, e.g. '15N', '13C', etc.
    @type heteronuc_type:       str
    @keyword atom1:             The name of the heteronucleus in the PDB file.
    @type atom1:                str
    @keyword atom2:             The name of the proton in the PDB file.
    @type atom2:                str
    @keyword spin_id:           The spin identification string.
    @type spin_id:              str
    @keyword force:             A flag which if True will cause all pre-existing files to be overwritten.
    @type force:                bool
    @keyword constraints:       A flag which if True will result in constrained optimisation.
    @type constraints:          bool
    """

    # Test if the current pipe exists.
    check_pipe()

    # Test if sequence data is loaded.
    if not exists_mol_res_spin_data():
        raise RelaxNoSequenceError

    # Test if the PDB file is loaded (for the spheroid and ellipsoid).
    if hasattr(cdp, 'diff_tensor'
               ) and not cdp.diff_tensor.type == 'sphere' and not hasattr(
                   cdp, 'structure'):
        raise RelaxNoPdbError

    # Deselect certain spins.
    __deselect_spins()

    # Directory creation.
    if dir == None:
        dir = pipes.cdp_name()
    mkdir_nofail(dir, verbosity=0)

    # Number of field strengths and values.
    frq = []
    for ri_id in cdp.ri_ids:
        # New frequency.
        if cdp.spectrometer_frq[ri_id] not in frq:
            frq.append(cdp.spectrometer_frq[ri_id])

    # The 'mfin' file.
    mfin = open_write_file('mfin', dir, force)
    create_mfin(mfin,
                diff_search=diff_search,
                sims=sims,
                sim_type=sim_type,
                trim=trim,
                num_frq=len(frq),
                frq=frq)
    mfin.close()

    # Open the 'mfdata', 'mfmodel', and 'mfpar' files.
    mfdata = open_write_file('mfdata', dir, force)
    mfmodel = open_write_file('mfmodel', dir, force)
    mfpar = open_write_file('mfpar', dir, force)

    # Loop over the sequence.
    for spin, mol_name, res_num, res_name, id in spin_loop(spin_id,
                                                           full_info=True,
                                                           return_id=True):
        # Skip deselected spins.
        if not spin.select:
            continue

        # The 'mfdata' file.
        if not create_mfdata(
                mfdata, spin=spin, spin_id=id, num_frq=len(frq), frq=frq):
            continue

        # The 'mfmodel' file.
        create_mfmodel(mfmodel,
                       spin=spin,
                       spin_id=id,
                       steps=steps,
                       constraints=constraints)

        # The 'mfpar' file.
        create_mfpar(mfpar,
                     spin=spin,
                     spin_id=id,
                     res_num=res_num,
                     atom1=atom1,
                     atom2=atom2)

    # Close the 'mfdata', 'mfmodel', and 'mfpar' files.
    mfdata.close()
    mfmodel.close()
    mfpar.close()

    # The 'run.sh' script.
    run = open_write_file('run.sh', dir, force)
    create_run(run, binary=binary, dir=dir)
    run.close()
    chmod(dir + sep + 'run.sh', S_IRWXU | S_IRGRP | S_IROTH)
Example #44
0
def simulate(file="simulation.pdb.bz2", dir=None, step_size=2.0, snapshot=10, total=1000, model=1, force=True):
    """Pseudo-Brownian dynamics simulation of the frame order motions.

    @keyword file:      The PDB file for storing the frame order pseudo-Brownian dynamics simulation.  The compression is determined automatically by the file extensions '*.pdb', '*.pdb.gz', and '*.pdb.bz2'.
    @type file:         str
    @keyword dir:       The directory name to place the file into.
    @type dir:          str or None
    @keyword step_size: The rotation will be of a random direction but with this fixed angle.  The value is in degrees.
    @type step_size:    float
    @keyword snapshot:  The number of steps in the simulation when snapshots will be taken.
    @type snapshot:     int
    @keyword total:     The total number of snapshots to take before stopping the simulation.
    @type total:        int
    @keyword model:     Only one model from an analysed ensemble of structures can be used for the pseudo-Brownian simulation, as the simulation and corresponding PDB file consists of one model per simulation.
    @type model:        int
    @keyword force:     A flag which, if set to True, will overwrite the any pre-existing file.
    @type force:        bool
    """

    # Printout.
    print("Pseudo-Brownian dynamics simulation of the frame order motions.")

    # Checks.
    check_pipe()
    check_model()
    check_domain()
    check_parameters()
    check_pivot()

    # Skip the rigid model.
    if cdp.model == MODEL_RIGID:
        print("Skipping the rigid model.")
        return

    # Open the output file.
    file = open_write_file(file_name=file, dir=dir, force=force)

    # The parameter values.
    values = assemble_param_vector()
    params = {}
    i = 0
    for name in cdp.params:
        params[name] = values[i]
        i += 1

    # The structure.
    structure = deepcopy(cdp.structure)
    if structure.num_models() > 1:
        structure.collapse_ensemble(model_num=model)

    # The pivot points.
    num_states = 1
    if cdp.model == MODEL_DOUBLE_ROTOR:
        num_states = 2
    pivot = zeros((num_states, 3), float64)
    for i in range(num_states):
        pivot[i] = generate_pivot(order=i+1, pdb_limit=True)

    # Shift to the average position.
    average_position(structure=structure, models=[None])

    # The motional eigenframe.
    frame = generate_axis_system()

    # Create the distribution.
    brownian(file=file, model=cdp.model, structure=structure, parameters=params, eigenframe=frame, pivot=pivot, atom_id=domain_moving(), step_size=step_size, snapshot=snapshot, total=total)

    # Close the file.
    file.close()
Example #45
0
def distribute(file="distribution.pdb.bz2", dir=None, atom_id=None, total=1000, max_rotations=100000, model=1, force=True):
    """Create a uniform distribution of structures for the frame order motions.

    @keyword file:          The PDB file for storing the frame order motional distribution.  The compression is determined automatically by the file extensions '*.pdb', '*.pdb.gz', and '*.pdb.bz2'.
    @type file:             str
    @keyword dir:           The directory name to place the file into.
    @type dir:              str or None
    @keyword atom_id:       The atom identification string to allow the distribution to be a subset of all atoms.
    @type atom_id:          None or str
    @keyword total:         The total number of states/model/structures in the distribution.
    @type total:            int
    @keyword max_rotations: The maximum number of rotations to generate the distribution from.  This prevents an execution for an infinite amount of time when a frame order amplitude parameter is close to zero so that the subset of all rotations within the distribution is close to zero.
    @type max_rotations:    int
    @keyword model:         Only one model from an analysed ensemble of structures can be used for the distribution, as the corresponding PDB file consists of one model per state.
    @type model:            int
    @keyword force:         A flag which, if set to True, will overwrite the any pre-existing file.
    @type force:            bool
    """

    # Printout.
    print("Uniform distribution of structures representing the frame order motions.")

    # Check the total.
    if total > 9999:
        raise RelaxError("A maximum of 9999 models is allowed in the PDB format.")

    # Checks.
    check_pipe()
    check_model()
    check_domain()
    check_parameters()
    check_pivot()

    # Skip the rigid model.
    if cdp.model == MODEL_RIGID:
        print("Skipping the rigid model.")
        return

    # Open the output file.
    file = open_write_file(file_name=file, dir=dir, force=force)

    # The parameter values.
    values = assemble_param_vector()
    params = {}
    i = 0
    for name in cdp.params:
        params[name] = values[i]
        i += 1

    # The structure.
    structure = deepcopy(cdp.structure)
    if structure.num_models() > 1:
        structure.collapse_ensemble(model_num=model)

    # The pivot points.
    num_states = 1
    if cdp.model == MODEL_DOUBLE_ROTOR:
        num_states = 2
    pivot = zeros((num_states, 3), float64)
    for i in range(num_states):
        pivot[i] = generate_pivot(order=i+1, pdb_limit=True)

    # Shift to the average position.
    average_position(structure=structure, models=[None])

    # The motional eigenframe.
    frame = generate_axis_system()

    # Only work with a subset.
    if atom_id:
        # The inverted selection.
        selection = structure.selection(atom_id=atom_id, inv=True)

        # Delete the data.
        structure.delete(selection=selection, verbosity=0)

    # Create the distribution.
    uniform_distribution(file=file, model=cdp.model, structure=structure, parameters=params, eigenframe=frame, pivot=pivot, atom_id=domain_moving(), total=total, max_rotations=max_rotations)

    # Close the file.
    file.close()
Example #46
0
def create_spin_input(function=None, spin=None, spin_id=None, dir=None):
    """Generate the CPMGFit file for the given spin.

    @keyword function:  The CPMGFit model or function name.
    @type function:     str
    @keyword spin:      The spin container to generate the input file for.
    @type spin:         SpinContainer instance
    @keyword spin_id:   The spin ID string corresponding to the spin container.
    @type spin_id:      str
    @keyword dir:       The directory to place the file into.
    @type dir:          str or None
    @return:            The name of the file created.
    @rtype:             str
    """

    # The output file.
    file_name = spin_file_name(spin_id=spin_id)
    file = open_write_file(file_name=file_name, dir=dir, force=True)

    # The title.
    file.write("title %s\n" % spin_id)

    # The proton frequencies.
    frq = get_frequencies(units='T')

    # The frequency info.
    file.write("fields %s" % len(frq))
    for i in range(len(frq)):
        file.write(" %.10f" % frq[i])
    file.write("\n")

    # The function and parameters.
    if function == 'CPMG':
        # Function.
        file.write("function CPMG\n")

        # Parameters.
        file.write("R2 1 10 20\n")
        file.write("Rex 0 100.0 100\n")
        file.write("Tau 0 10.0 100\n")

    # The function and parameters.
    elif function == 'Full_CPMG':
        # Function.
        file.write("function Full_CPMG\n")

        # Parameters.
        file.write("R2 1 10 20\n")
        file.write("papb 0.01 0.49 20\n")
        file.write("dw 0 10.0 100\n")
        file.write("kex 0.1 1.0 100\n")

    # The function and parameters.
    elif function == "Ishima":
        # Function.
        file.write("function Ishima\n")

        # Parameters.
        file.write("R2 1 10 20\n")
        file.write("Rex 0 100.0 50\n")
        file.write("PaDw 2 10.0 50\n")
        file.write("Tau 0.1 10.0 50\n")

    # The function and parameters.
    if function == '3-site_CPMG':
        # Function.
        file.write("function 3-site_CPMG\n")

        # Parameters.
        file.write("R2 1 10 20\n")
        file.write("Rex1 0 100.0 20\n")
        file.write("Tau1 0 10.0 20\n")
        file.write("Rex2 0 100.0 20\n")
        file.write("Tau2 0 10.0 20\n")

    # The Grace setup.
    file.write("xmgr\n")
    file.write("@ xaxis label \"1/tcp (1/ms)\"\n")
    file.write("@ yaxis label \"R2(tcp) (rad/s)\"\n")
    file.write("@ xaxis ticklabel format decimal\n")
    file.write("@ yaxis ticklabel format decimal\n")
    file.write("@ xaxis ticklabel char size 0.8\n")
    file.write("@ yaxis ticklabel char size 0.8\n")
    file.write("@ world xmin 0.0\n")

    # The data.
    file.write("data\n")
    for exp_type, frq, offset, point in loop_exp_frq_offset_point():
        # The parameter key.
        param_key = return_param_key_from_data(exp_type=exp_type, frq=frq, offset=offset, point=point)

        # No data.
        if param_key not in spin.r2eff:
            continue

        # Tesla units.
        B0 = frq * 2.0 * pi / periodic_table.gyromagnetic_ratio('1H')

        # The X value of 1/tcp (or 1/tau_CPMG) in ms.  This assumes Art's usage of the definition that nu_CPMG = 1 / (2 * tau_CPMG).
        x = 2.0 * point / 1000.0

        # Write out the data and error.
        file.write("%-20f %-20f %-20f %-20f\n" % (x, spin.r2eff[param_key], spin.r2eff_err[param_key], B0))

    # Close the file and return its name.
    file.close()
    return file_name
Example #47
0
def distribute(file="distribution.pdb.bz2",
               dir=None,
               atom_id=None,
               total=1000,
               max_rotations=100000,
               model=1,
               force=True):
    """Create a uniform distribution of structures for the frame order motions.

    @keyword file:          The PDB file for storing the frame order motional distribution.  The compression is determined automatically by the file extensions '*.pdb', '*.pdb.gz', and '*.pdb.bz2'.
    @type file:             str
    @keyword dir:           The directory name to place the file into.
    @type dir:              str or None
    @keyword atom_id:       The atom identification string to allow the distribution to be a subset of all atoms.
    @type atom_id:          None or str
    @keyword total:         The total number of states/model/structures in the distribution.
    @type total:            int
    @keyword max_rotations: The maximum number of rotations to generate the distribution from.  This prevents an execution for an infinite amount of time when a frame order amplitude parameter is close to zero so that the subset of all rotations within the distribution is close to zero.
    @type max_rotations:    int
    @keyword model:         Only one model from an analysed ensemble of structures can be used for the distribution, as the corresponding PDB file consists of one model per state.
    @type model:            int
    @keyword force:         A flag which, if set to True, will overwrite the any pre-existing file.
    @type force:            bool
    """

    # Printout.
    print(
        "Uniform distribution of structures representing the frame order motions."
    )

    # Check the total.
    if total > 9999:
        raise RelaxError(
            "A maximum of 9999 models is allowed in the PDB format.")

    # Checks.
    check_pipe()
    check_model()
    check_domain()
    check_parameters()
    check_pivot()

    # Skip the rigid model.
    if cdp.model == MODEL_RIGID:
        print("Skipping the rigid model.")
        return

    # Open the output file.
    file = open_write_file(file_name=file, dir=dir, force=force)

    # The parameter values.
    values = assemble_param_vector()
    params = {}
    i = 0
    for name in cdp.params:
        params[name] = values[i]
        i += 1

    # The structure.
    structure = deepcopy(cdp.structure)
    if structure.num_models() > 1:
        structure.collapse_ensemble(model_num=model)

    # The pivot points.
    num_states = 1
    if cdp.model == MODEL_DOUBLE_ROTOR:
        num_states = 2
    pivot = zeros((num_states, 3), float64)
    for i in range(num_states):
        pivot[i] = generate_pivot(order=i + 1, pdb_limit=True)

    # Shift to the average position.
    average_position(structure=structure, models=[None])

    # The motional eigenframe.
    frame = generate_axis_system()

    # Only work with a subset.
    if atom_id:
        # The inverted selection.
        selection = structure.selection(atom_id=atom_id, inv=True)

        # Delete the data.
        structure.delete(selection=selection, verbosity=0)

    # Create the distribution.
    uniform_distribution(file=file,
                         model=cdp.model,
                         structure=structure,
                         parameters=params,
                         eigenframe=frame,
                         pivot=pivot,
                         atom_id=domain_moving(),
                         total=total,
                         max_rotations=max_rotations)

    # Close the file.
    file.close()
Example #48
0
def sherekhan_input(spin_id=None, force=False, dir='ShereKhan'):
    """Create the ShereKhan input files.

    @keyword spin_id:           The spin ID string to restrict the file creation to.
    @type spin_id:              str
    @keyword force:             A flag which if True will cause all pre-existing files to be overwritten.
    @type force:                bool
    @keyword dir:               The optional directory to place the files into.  If None, then the files will be placed into the current directory.
    @type dir:                  str or None
    """

    # Test if the current pipe exists.
    check_pipe()

    # Test if sequence data is loaded.
    if not exists_mol_res_spin_data():
        raise RelaxNoSequenceError

    # Test if the experiment type has been set.
    if not hasattr(cdp, 'exp_type'):
        raise RelaxError("The relaxation dispersion experiment type has not been specified.")

    # Test if the model has been set.
    if not hasattr(cdp, 'model_type'):
        raise RelaxError("The relaxation dispersion model has not been specified.")

    # Directory creation.
    if dir != None:
        mkdir_nofail(dir, verbosity=0)

    # Loop over the spin blocks.
    cluster_index = 0
    for spin_ids in loop_cluster():
        # The spin containers.
        spins = spin_ids_to_containers(spin_ids)

        # Loop over the magnetic fields.
        for exp_type, frq, ei, mi in loop_exp_frq(return_indices=True):
            # Loop over the time, and count it.
            time_i = 0
            for time, ti in loop_time(exp_type=exp_type, frq=frq, return_indices=True):
                time_i += 1

            # Check that not more than one time point is returned.
            if time_i > 1:
                raise RelaxError("Number of returned time poins is %i. Only 1 time point is expected."%time_i)

            # The ShereKhan input file for the spin cluster.
            file_name = 'sherekhan_frq%s.in' % (mi+1)
            if dir != None:
                dir_name = dir + sep + 'cluster%s' % (cluster_index+1)
            else:
                dir_name = 'cluster%s' % (cluster_index+1)
            file = open_write_file(file_name=file_name, dir=dir_name, force=force)

            # The B0 field for the nuclei of interest in MHz (must be positive to be accepted by the server).
            file.write("%.10f\n" % abs(frq / periodic_table.gyromagnetic_ratio('1H') * periodic_table.gyromagnetic_ratio('15N') / 1e6))

            # The constant relaxation time for the CPMG experiment in seconds.
            file.write("%s\n" % (time))

            # The comment line.
            file.write("# %-18s %-20s %-20s\n" % ("nu_cpmg (Hz)", "R2eff (rad/s)", "Error"))

            # Loop over the spins of the cluster.
            for i in range(len(spins)):
                # Get the residue container.
                res = return_residue(spin_ids[i])

                # Name the residue if needed.
                res_name = res.name
                if res_name == None:
                    res_name = 'X'

                # Initialise the lines to output (to be able to catch missing data).
                lines = []

                # The residue ID line.
                lines.append("# %s%s\n" % (res_name, res.num))

                # Loop over the dispersion points.
                for offset, point in loop_offset_point(exp_type=exp_type, frq=frq, skip_ref=True):
                    # The parameter key.
                    param_key = return_param_key_from_data(exp_type=exp_type, frq=frq, offset=offset, point=point)

                    # No data.
                    if param_key not in spins[i].r2eff:
                        continue

                    # Store the data.
                    lines.append("%20.15g %20.13g %20.13g\n" % (point, spins[i].r2eff[param_key], spins[i].r2eff_err[param_key]))

                # No data.
                if len(lines) == 1:
                    continue

                # Write out the data.
                for line in lines:
                    file.write(line)

            # Close the file.
            file.close()

        # Increment the cluster index.
        cluster_index += 1
Example #49
0
    def matplotlib_surface_plot(self):
        """Function to write matplotlib script to plot surfaces of parameters."""

        # Add ".par" to file prefix
        mapfile_name = '"%s.par"' % self.file_prefix

        # If point file_file is different from None
        if self.point_file != None:
            pointfile_name = '"%s.par"' % self.point_file
        else:
            pointfile_name = "None"

        # Open the file.
        plot_file = open_write_file(file_name=self.file_prefix+'.py', dir=self.dir, force=True)

        matplotlib_file = [
            'from copy import deepcopy'+"\n",
            'import numpy as np'+"\n",
            'import scipy.interpolate'+"\n",
            'from numpy.ma import masked_where'+"\n",
            ''+"\n",
            'from mpl_toolkits.mplot3d import axes3d'+"\n",
            'import matplotlib.pyplot as plt'+"\n",
            'from matplotlib import cm'+"\n",
            ''+"\n",
            '# Open file and get header.'+"\n",
            'mapfile_name = %s'%mapfile_name+"\n",
            'pointfile_name = %s'%pointfile_name+"\n",
            ''+"\n",
            'mapfile = open(mapfile_name, "r")'+"\n",
            'lines = mapfile.readlines()'+"\n",
            'mapfile.close()'+"\n",
            'header = lines[0].split()[1:]'+"\n",
            ''+"\n",
            '# Prepare the dtype for reading file.'+"\n",
            'dtype_str = "i8,f8,f8,f8,f8,i8,f8,f8,f8,f8"'+"\n",
            ''+"\n",
            'print("Fileheader is: %s"%header)'+"\n",
            'print("Value types are: %s"%dtype_str)'+"\n",
            ''+"\n",
            '# Load the data.'+"\n",
            'data = np.genfromtxt(fname=mapfile_name, dtype=dtype_str, names=header)'+"\n",
            ''+"\n",
            '# Load the point data'+"\n",
            'if pointfile_name:'+"\n",
            '    # Load the point data.'+"\n",
            '    data_p = np.genfromtxt(fname=pointfile_name, dtype=dtype_str, names=header)'+"\n",
            '    '+"\n",
            ''+"\n",
            '# Define where to cut the data, as the minimum.'+"\n",
            'header_min = header[6:10]'+"\n",
            ''+"\n",
            '# Define to cut at min map point.'+"\n",
            'map_min_par0 = data[header_min[0]][0]'+"\n",
            'map_min_par1 = data[header_min[1]][0]'+"\n",
            'map_min_par2 = data[header_min[2]][0]'+"\n",
            'map_min_chi2 = data[header_min[3]][0]'+"\n",
            ''+"\n",
            '# Now get the headers for the data.'+"\n",
            'header_val = header[1:5]'+"\n",
            ''+"\n",
            '# Define which 2D maps to create, as a list of 2 parameters, and at which third parameter to cut the values.'+"\n",
            'maps_xy = [header_val[0], header_val[1], header_val[2], map_min_par2]'+"\n",
            'maps_xz = [header_val[0], header_val[2], header_val[1], map_min_par1]'+"\n",
            'maps_yz = [header_val[1], header_val[2], header_val[0], map_min_par0]'+"\n",
            ''+"\n",
            'maps = [maps_xy, maps_xz, maps_yz]'+"\n",
            ''+"\n",
            '# Nr of columns is number of maps.'+"\n",
            'nr_cols = 1'+"\n",
            '# Nr of rows, is 2, for 3d projection and imshow'+"\n",
            'nr_rows = 2'+"\n",
            ''+"\n",
            '# Loop over the maps:'+"\n",
            'for x_par, y_par, z_par, z_cut in maps:'+"\n",
            '    # Define figure'+"\n",
            '    fig = plt.figure()'+"\n",
            ''+"\n",
            '    # Define c_par'+"\n",
            '    c_par = header_val[3]'+"\n",
            ''+"\n",
            '    # Now get the values for the map.'+"\n",
            '    map_x = data[x_par]'+"\n",
            '    map_y = data[y_par]'+"\n",
            '    map_z = data[z_par]'+"\n",
            '    map_c = data[c_par]'+"\n",
            ''+"\n",
            '    # Now define which map to create.'+"\n",
            '    mask_xy = masked_where(map_z == z_cut, map_z)'+"\n",
            '    map_mask_x = map_x[mask_xy.mask]'+"\n",
            '    map_mask_y = map_y[mask_xy.mask]'+"\n",
            '    map_mask_c = map_c[mask_xy.mask]'+"\n",
            ''+"\n",
            '    # Define min and max values.'+"\n",
            '    map_mask_x_min = map_mask_x.min()'+"\n",
            '    map_mask_x_max = map_mask_x.max()'+"\n",
            '    map_mask_y_min = map_mask_y.min()'+"\n",
            '    map_mask_y_max = map_mask_y.max()'+"\n",
            '    map_mask_c_min = map_mask_c.min()'+"\n",
            '    map_mask_c_max = map_mask_c.max()'+"\n",
            ''+"\n",
            '    # Set up a regular grid of interpolation points'+"\n",
            '    int_points = 300'+"\n",
            '    xi, yi = np.linspace(map_mask_x_min, map_mask_x_max, int_points), np.linspace(map_mask_y_min, map_mask_y_max, int_points)'+"\n",
            '    xi, yi = np.meshgrid(xi, yi)'+"\n",
            ''+"\n",
            '    # Interpolate to create grid'+"\n",
            '    ci = scipy.interpolate.griddata((map_mask_x, map_mask_y), map_mask_c, (xi, yi), method="linear")'+"\n",
            ''+"\n",
            '    # Set which x, y, z to plot'+"\n",
            '    x_p = xi'+"\n",
            '    y_p = yi'+"\n",
            '    c_p = deepcopy(ci)'+"\n",
            ''+"\n",
            '    # Cut map at a certain height.'+"\n",
            '    # First get index os largest values'+"\n",
            '    #z_max = map_mask_c_max'+"\n",
            '    z_max = map_mask_c_min + 0.5*map_mask_c_min'+"\n",
            '    ci_mask = masked_where(ci >= z_max, ci)'+"\n",
            ''+"\n",
            '    # Replace with 0.0'+"\n",
            '    c_p[ci_mask.mask] = 0.0'+"\n",
            '    # Find new max'+"\n",
            '    new_max = np.max(c_p)'+"\n",
            ''+"\n",
            '    # Insert values in array.'+"\n",
            '    c_p[ci_mask.mask] = new_max'+"\n",
            ''+"\n",
            '    # Define min.'+"\n",
            '    z_min = map_mask_c_min - 0.5*map_mask_c_min'+"\n",
            ''+"\n",
            '    # Create figure and plot'+"\n",
            '    ax = fig.add_subplot(nr_rows, nr_cols, 1, projection="3d")'+"\n",
            '    ax.plot_surface(x_p, y_p, c_p, rstride=8, cstride=8, alpha=0.3)'+"\n",
            ''+"\n",
            '    # Possible add scatter points for map.'+"\n",
            '    #ax.scatter(map_x, map_y, map_c, c="b", marker="o", s=5)'+"\n",
            ''+"\n",
            '    # One could also make the mesh just from the values, but this require much memory.'+"\n",
            '    ##ax.scatter(x_p, y_p, c_p, c="y", marker="o", s=5)'+"\n",
            ''+"\n",
            '    # Add contour levels on sides.'+"\n",
            '    ax.contour(x_p, y_p, c_p, zdir="z", offset=z_min, cmap=cm.coolwarm)'+"\n",
            '    ax.contour(x_p, y_p, c_p, zdir="x", offset=map_mask_x_min, cmap=cm.coolwarm)'+"\n",
            '    ax.contour(x_p, y_p, c_p, zdir="y", offset=map_mask_y_min, cmap=cm.coolwarm)'+"\n",
            ''+"\n",
            '    # Add scatter values, for 5 lowest values.'+"\n",
            '    x_par_min = x_par + "_sort"'+"\n",
            '    y_par_min = y_par + "_sort"'+"\n",
            '    c_par_min = c_par + "_sort"'+"\n",
            '    mp_x = data[x_par_min][0:5]'+"\n",
            '    mp_y = data[y_par_min][0:5]'+"\n",
            '    mp_c = data[c_par_min][0:5]'+"\n",
            '    ax.scatter(mp_x[0], mp_y[0], mp_c[0], c="r", marker="o", s=200)'+"\n",
            '    ax.scatter(mp_x[1:], mp_y[1:], mp_c[1:], c="g", marker="o", s=100)'+"\n",
            ''+"\n",
            '    # Add points from file, as the closest point in map.'+"\n",
            '    if pointfile_name:'+"\n",
            '        if data_p[x_par].ndim == 0:'+"\n",
            '            points_x = np.asarray([data_p[x_par]])'+"\n",
            '            points_y = np.asarray([data_p[y_par]])'+"\n",
            '        else:'+"\n",
            '            points_x = data_p[x_par]'+"\n",
            '            points_y = data_p[y_par]'+"\n",
            ''+"\n",
            '        # Normalize, by division of largest number of map'+"\n",
            '        points_x_norm = points_x / map_mask_x_max'+"\n",
            '        points_y_norm = points_y / map_mask_y_max'+"\n",
            '        map_mask_x_norm = map_mask_x / map_mask_x_max'+"\n",
            '        map_mask_y_norm = map_mask_y / map_mask_y_max'+"\n",
            ''+"\n",
            '        p_x = []'+"\n",
            '        p_y = []'+"\n",
            '        p_c = []'+"\n",
            '        # Now calculate the Euclidean distance in the space, to find map point best represents the point.'+"\n",
            '        for i, point_x_norm in enumerate(points_x_norm):'+"\n",
            '            point_y_norm = points_y_norm[i]'+"\n",
            ''+"\n",
            '            # Get the distance.'+"\n",
            '            dist = np.sqrt( (map_mask_x_norm - point_x_norm)**2 + (map_mask_y_norm - point_y_norm)**2)'+"\n",
            ''+"\n",
            '            # Return the indices of the minimum values along an axis.'+"\n",
            '            min_index = np.argmin(dist)'+"\n",
            '            p_x.append(map_mask_x[min_index])'+"\n",
            '            p_y.append(map_mask_y[min_index])'+"\n",
            '            p_c.append(map_mask_c[min_index])'+"\n",
            ''+"\n",
            '        # Convert to numpy array'+"\n",
            '        p_x = np.asarray(p_x)'+"\n",
            '        p_y = np.asarray(p_y)'+"\n",
            '        p_c = np.asarray(p_c)'+"\n",
            ''+"\n",
            '        # Plot points'+"\n",
            '        ax.scatter(p_x, p_y, p_c, c="m", marker="o", s=50)'+"\n",
            ''+"\n",
            ''+"\n",
            '    # Set label'+"\n",
            '    ax.set_xlabel("%s"%x_par)'+"\n",
            '    ax.set_ylabel("%s"%y_par)'+"\n",
            '    ax.set_zlabel("%s"%c_par)'+"\n",
            ''+"\n",
            ''+"\n",
            '    # Set limits'+"\n",
            '    ax.set_zlim(z_min, z_max)'+"\n",
            ''+"\n",
            ''+"\n",
            '    # Create figure and plot'+"\n",
            '    ax = fig.add_subplot(nr_rows, nr_cols, 2)'+"\n",
            '    fig_imshow = ax.imshow(ci, vmin=map_mask_c_min, vmax=map_mask_c_max, origin="lower", extent=[map_mask_x_min, map_mask_x_max, map_mask_y_min, map_mask_y_max])'+"\n",
            ''+"\n",
            '    # Add scatter values, for 5 lowest values.'+"\n",
            '    ax.scatter(mp_x[0], mp_y[0], c=mp_c[0], marker="o", s=200)'+"\n",
            '    ax.scatter(mp_x[1:], mp_y[1:], c="g", marker="o", s=100)'+"\n",
            ''+"\n",
            '    # Also add point to this map.'+"\n",
            '    if pointfile_name:'+"\n",
            '        # Plot points'+"\n",
            '        ax.scatter(p_x, p_y, c="m", marker="o", s=50)'+"\n",
            ''+"\n",
            '    # Set label'+"\n",
            '    ax.set_xlabel("%s"%x_par)'+"\n",
            '    ax.set_ylabel("%s"%y_par)'+"\n",
            ''+"\n",
            '    # Add colorbar.'+"\n",
            '    fig.subplots_adjust(right=0.8)'+"\n",
            '    cbar_ax = fig.add_axes([0.85, 0.15, 0.05, 0.3])'+"\n",
            '    fig.colorbar(fig_imshow, cax=cbar_ax)'+"\n",
            ''+"\n",
            '# Show plot.'+"\n",
            'plt.show()'+"\n",
            ''+"\n",
        ]

        # Loop over the lines and write.
        for line in matplotlib_file:
            plot_file.write(line)

        # Close the file.
        plot_file.close()
Example #50
0
def simulate(file="simulation.pdb.bz2",
             dir=None,
             step_size=2.0,
             snapshot=10,
             total=1000,
             model=1,
             force=True):
    """Pseudo-Brownian dynamics simulation of the frame order motions.

    @keyword file:      The PDB file for storing the frame order pseudo-Brownian dynamics simulation.  The compression is determined automatically by the file extensions '*.pdb', '*.pdb.gz', and '*.pdb.bz2'.
    @type file:         str
    @keyword dir:       The directory name to place the file into.
    @type dir:          str or None
    @keyword step_size: The rotation will be of a random direction but with this fixed angle.  The value is in degrees.
    @type step_size:    float
    @keyword snapshot:  The number of steps in the simulation when snapshots will be taken.
    @type snapshot:     int
    @keyword total:     The total number of snapshots to take before stopping the simulation.
    @type total:        int
    @keyword model:     Only one model from an analysed ensemble of structures can be used for the pseudo-Brownian simulation, as the simulation and corresponding PDB file consists of one model per simulation.
    @type model:        int
    @keyword force:     A flag which, if set to True, will overwrite the any pre-existing file.
    @type force:        bool
    """

    # Printout.
    print("Pseudo-Brownian dynamics simulation of the frame order motions.")

    # Checks.
    check_pipe()
    check_model()
    check_domain()
    check_parameters()
    check_pivot()

    # Skip the rigid model.
    if cdp.model == MODEL_RIGID:
        print("Skipping the rigid model.")
        return

    # Open the output file.
    file = open_write_file(file_name=file, dir=dir, force=force)

    # The parameter values.
    values = assemble_param_vector()
    params = {}
    i = 0
    for name in cdp.params:
        params[name] = values[i]
        i += 1

    # The structure.
    structure = deepcopy(cdp.structure)
    if structure.num_models() > 1:
        structure.collapse_ensemble(model_num=model)

    # The pivot points.
    num_states = 1
    if cdp.model == MODEL_DOUBLE_ROTOR:
        num_states = 2
    pivot = zeros((num_states, 3), float64)
    for i in range(num_states):
        pivot[i] = generate_pivot(order=i + 1, pdb_limit=True)

    # Shift to the average position.
    average_position(structure=structure, models=[None])

    # The motional eigenframe.
    frame = generate_axis_system()

    # Create the distribution.
    brownian(file=file,
             model=cdp.model,
             structure=structure,
             parameters=params,
             eigenframe=frame,
             pivot=pivot,
             atom_id=domain_moving(),
             step_size=step_size,
             snapshot=snapshot,
             total=total)

    # Close the file.
    file.close()
Example #51
0
File: uf.py Project: tlinnet/relax
def cone_pdb(cone_type=None, scale=1.0, file=None, dir=None, force=False):
    """Create a PDB file containing a geometric object representing the various cone models.

    Currently the only cone types supported are 'diff in cone' and 'diff on cone'.


    @param cone_type:   The type of cone model to represent.
    @type cone_type:    str
    @param scale:       The size of the geometric object is eqaul to the average pivot-CoM
                        vector length multiplied by this scaling factor.
    @type scale:        float
    @param file:        The name of the PDB file to create.
    @type file:         str
    @param dir:         The name of the directory to place the PDB file into.
    @type dir:          str
    @param force:       Flag which if set to True will cause any pre-existing file to be
                        overwritten.
    @type force:        int
    """

    # Test if the cone models have been determined.
    if cone_type == 'diff in cone':
        if not hasattr(cdp, 'S_diff_in_cone'):
            raise RelaxError(
                "The diffusion in a cone model has not yet been determined.")
    elif cone_type == 'diff on cone':
        if not hasattr(cdp, 'S_diff_on_cone'):
            raise RelaxError(
                "The diffusion on a cone model has not yet been determined.")
    else:
        raise RelaxError("The cone type " + repr(cone_type) + " is unknown.")

    # The number of increments for the filling of the cone objects.
    inc = 20

    # The rotation matrix.
    R = zeros((3, 3), float64)
    two_vect_to_R(array([0, 0, 1], float64),
                  cdp.ave_pivot_CoM / norm(cdp.ave_pivot_CoM), R)

    # The isotropic cone object.
    if cone_type == 'diff in cone':
        angle = cdp.theta_diff_in_cone
    elif cone_type == 'diff on cone':
        angle = cdp.theta_diff_on_cone
    cone_obj = Iso_cone(angle)

    # Create the structural object.
    structure = Internal()

    # Add a structure.
    structure.add_molecule(name='cone')

    # Alias the single molecule from the single model.
    mol = structure.structural_data[0].mol[0]

    # Add the pivot point.
    mol.atom_add(pdb_record='HETATM',
                 atom_num=1,
                 atom_name='R',
                 res_name='PIV',
                 res_num=1,
                 pos=cdp.pivot_point,
                 element='C')

    # Generate the average pivot-CoM vectors.
    print("\nGenerating the average pivot-CoM vectors.")
    sim_vectors = None
    if hasattr(cdp, 'ave_pivot_CoM_sim'):
        sim_vectors = cdp.ave_pivot_CoM_sim
    res_num = generate_vector_residues(mol=mol,
                                       vector=cdp.ave_pivot_CoM,
                                       atom_name='Ave',
                                       res_name_vect='AVE',
                                       sim_vectors=sim_vectors,
                                       res_num=2,
                                       origin=cdp.pivot_point,
                                       scale=scale)

    # Generate the cone outer edge.
    print("\nGenerating the cone outer edge.")
    cap_start_atom = mol.atom_num[-1] + 1
    cone_edge(mol=mol,
              cone_obj=cone_obj,
              res_name='CON',
              res_num=3,
              apex=cdp.pivot_point,
              R=R,
              scale=norm(cdp.pivot_CoM),
              inc=inc)

    # Generate the cone cap, and stitch it to the cone edge.
    if cone_type == 'diff in cone':
        print("\nGenerating the cone cap.")
        cone_start_atom = mol.atom_num[-1] + 1
        generate_vector_dist(mol=mol,
                             res_name='CON',
                             res_num=3,
                             centre=cdp.pivot_point,
                             R=R,
                             phi_max_fn=cone_obj.phi_max,
                             scale=norm(cdp.pivot_CoM),
                             inc=inc)

    # Create the PDB file.
    print("\nGenerating the PDB file.")
    pdb_file = open_write_file(file, dir, force=force)
    structure.write_pdb(pdb_file)
    pdb_file.close()
Example #52
0
def correlation_matrix(matrix=None, labels=None, file=None, dir=None, force=False):
    """Gnuplot plotting function for representing correlation matrices.

    @keyword matrix:    The correlation matrix.  This must be a square matrix.
    @type matrix:       numpy rank-2 array.
    @keyword labels:    The labels for each element of the matrix.  The same label is assumed for each [i, i] pair in the matrix.
    @type labels:       list of str
    @keyword file:      The name of the file to create.
    @type file:         str
    @keyword dir:       The directory where the PDB file will be placed.  If set to None, then the file will be placed in the current directory.
    @type dir:          str or None
    """

    # The dimensions.
    n = len(matrix)

    # Generate the text file for loading into gnuplot.
    text.correlation_matrix(matrix=matrix, labels=labels, file=file, dir=dir, force=force)

    # The script file name with the extension swapped.
    file_name = swap_extension(file=file, ext='gnu')

    # Open the script file for writing.
    output = open_write_file(file_name, dir=dir, force=force)

    # Gnuplot script setup. 
    output.write("#!/usr/bin/env gnuplot\n\n")


    # Set up the terminal type and make the plot square.
    output.write("# Set up the terminal type and make the plot square.\n")
    output.write("set terminal postscript eps size 10,10 enhanced color font 'Helvetica,20' linewidth 0.1\n")
    output.write("set size square\n")

    # The colour map.
    output.write("\n# Blue-red colour map.\n")
    output.write("set palette model RGB\n")
    output.write("set palette defined\n")

    # The labels.
    if labels != None:
        output.write("\n# Labels.\n")
        for axis in ['x', 'y']:
            output.write("set %stics out " % axis)
            if axis == 'x':
                output.write("rotate ")
            output.write("font \",8\" (")
            for i in range(n):
                if i != 0:
                    output.write(", ")
                output.write("\"%s\" %s" % (format_enhanced(labels[i]), i))
            output.write(")\n")

    # Output to EPS.
    output.write("\n# Output to EPS.\n")
    output.write("set output \"%s.eps\"\n" % file_root(file))

    # Load and show the text data.
    output.write("\n# Load and show the text data\n")
    output.write("plot \"%s\" matrix with image\n" % file)

    # Close the file.
    output.close()

    # Make the script executable.
    chmod(get_file_path(file_name=file_name, dir=dir), S_IRWXU|S_IRGRP|S_IROTH)
Example #53
0
def write_list(file_prefix=None,
               dir=None,
               res_names=None,
               res_nums=None,
               atom1_names=None,
               atom2_names=None,
               w1=None,
               w2=None,
               data_height=None,
               force=True):
    """Create a Sparky .list file.

    @keyword file_prefix:   The base part of the file name without the .list extension.
    @type file_prefix:      str
    @keyword dir:           The directory to place the file in.
    @type dir:              str or None
    @keyword res_names:     The residue name list for each peak entry.
    @type res_names:        list of str
    @keyword res_nums:      The residue number list for each peak entry.
    @type res_nums:         list of int
    @keyword atom1_names:   The atom name list for w1 for each peak entry.
    @type atom1_names:      list of str
    @keyword atom2_names:   The atom name list for w2 for each peak entry.
    @type atom2_names:      list of str
    @keyword w1:            The w1 chemical shift list in ppm for each peak entry.
    @type w1:               list of float
    @keyword w2:            The w2 chemical shift list in ppm for each peak entry.
    @type w2:               list of float
    @keyword data_height:   The optional data height list for each peak entry.
    @type data_height:      None or list of float
    @keyword force:         A flag which if True will cause any pre-existing files to be overwritten.
    @type force:            bool
    """

    # Checks.
    N = len(w1)
    if len(res_names) != N:
        raise RelaxError(
            "The %s residue names does not match the %s number of entries." %
            (len(res_names), N))
    if len(res_nums) != N:
        raise RelaxError(
            "The %s residue numbers does not match the %s number of entries." %
            (len(res_nums), N))
    if len(atom1_names) != N:
        raise RelaxError(
            "The %s w1 atom names does not match the %s number of entries." %
            (len(atom1_names), N))
    if len(atom2_names) != N:
        raise RelaxError(
            "The %s w2 atom names does not match the %s number of entries." %
            (len(atom2_names), N))
    if len(w1) != N:
        raise RelaxError(
            "The %s w1 chemical shifts does not match the %s number of entries."
            % (len(w1), N))
    if len(w2) != N:
        raise RelaxError(
            "The %s w2 chemical shifts does not match the %s number of entries."
            % (len(w2), N))
    if data_height and len(data_height) != N:
        raise RelaxError(
            "The %s data heights does not match the %s number of entries." %
            (len(data_height), N))

    # Printout.
    print("Creating the Sparky list file.")

    # Open the file.
    if isinstance(file_prefix, str):
        file = open_write_file(file_name=file_prefix + ".list",
                               dir=dir,
                               force=force)
    else:
        file = file_prefix

    # The header.
    file.write("%17s %10s %10s" % ("Assignment ", "w1 ", "w2 "))
    if data_height != None:
        file.write(" %12s" % "Data Height")
    file.write("\n\n")

    # The data.
    for i in range(N):
        # Generate the assignment.
        assign = "%s%i%s-%s" % (res_names[i], res_nums[i], atom1_names[i],
                                atom2_names[i])

        # Write out the line.
        file.write("%17s %10.3f %10.3f" % (assign, w1[i], w2[i]))
        if data_height != None:
            file.write(" %12i" % data_height[i])
        file.write("\n")
Example #54
0
####### Now doing the back calculation of R2eff values.
# First create fake data and read it in.
for exp_type, frq, ei, mi in loop_exp_frq(return_indices=True):
    exp_id = exp_ids[mi]
    exp = exps[mi]
    sfrq, time_T2, ncycs, r2eff_errs = exp

    # Then loop over the spins.
    for res_name, res_num, spin_name, params in cur_spins:
        cur_spin_id = ":%i@%s"%(res_num, spin_name)
        cur_spin = return_spin(spin_id=cur_spin_id)

        ## First do a fake R2eff structure.
        # Define file name
        file_name = "%s%s.txt" % (exp_id, cur_spin_id .replace('#', '_').replace(':', '_').replace('@', '_'))
        file = open_write_file(file_name=file_name, dir=ds.tmpdir, force=True)

        # Then loop over the points, make a fake R2eff value.
        for offset, point, oi, di in loop_offset_point(exp_type=EXP_TYPE_CPMG_SQ, frq=frq, return_indices=True):
            string = "%.15f 1.0 %.3f\n"%(point, ds.r2eff_err)
            file.write(string)

        # Close file.
        file.close()

        # Read in the R2eff file to create the structure.
        # This is a trick, or else relax complains.
        relax_disp.r2eff_read_spin(id=exp_id, spin_id=cur_spin_id, file=file_name, dir=ds.tmpdir, disp_point_col=1, data_col=2, error_col=3)


# Now back-calculate.
Example #55
0
def pdb(r=1.02, file_name='uniform.pdb', inc=None):
    """Create the bond vector distribution and save the PDB file."""

    # Create the structural object.
    structure = Internal()

    # Add a molecule.
    structure.add_molecule(name='dist')

    # Alias the single molecule from the single model.
    mol = structure.structural_data[0].mol[0]

    # Get the polar and azimuthal angles for the distribution.
    phi, theta = angles_uniform(inc)

    # Get the uniform vector distribution.
    vectors = vect_dist_spherical_angles(inc=inc, distribution='uniform')

    # Loop over the radial array of vectors (change in longitude).
    atom_num = 1
    new_vectors = []
    for i in range(len(theta)):
        # Loop over the vectors of the radial array (change in latitude).
        for j in range(len(phi)):
            # The index.
            index = i + j * len(theta)

            # The atomic positions.
            pos1 = vectors[index] * 10
            pos2 = pos1 + vectors[index] * r

            # Store the rearranged vector (truncated as in the PDB).
            trunc_vect = zeros(3, float64)
            for k in range(3):
                trunc_vect[k] = float("%.3f" % pos2[k]) - float(
                    "%.3f" % pos1[k])
            new_vectors.append(trunc_vect)

            # Residue number.
            res = (atom_num + 1) / 2

            # Add the vector as a N-H atom pair.
            mol.atom_add(pdb_record='ATOM',
                         atom_num=atom_num,
                         atom_name='N',
                         res_name=AA_TABLE[SEQ[index]].upper(),
                         res_num=res,
                         pos=pos1,
                         element='N')
            mol.atom_add(pdb_record='ATOM',
                         atom_num=atom_num + 1,
                         atom_name='H',
                         res_name=AA_TABLE[SEQ[index]].upper(),
                         res_num=res,
                         pos=pos2,
                         element='H')

            # Connect.
            mol.atom_connect(atom_num - 1, atom_num)

            # Move 2 atoms forwards.
            atom_num += 2

    # The PDB file.
    file = open_write_file(file_name, force=True)
    structure.write_pdb(file)
    file.close()

    # Return the vectors in the diffusion frame.
    return new_vectors
        # Save all values of chi2. To help find reasonale level for the Innermost, Inner, Middle and Outer Isosurface.
        all_chi.append(chi2)

        # Increment the value of the second parameter.
        values[1] = values[1] + step_size[1]

        counter += 1

    # Increment the value of the first parameter.
    values[0] = values[0] + step_size[0]

print("\nMin cluster point %s=%3.3f, %s=%3.3f, with chi2=%3.3f" % (params[0], pcm[0], params[1], pcm[1], pre_chi2))

# Open file
file_name = '3_simulate_graphs_S65_dw_r2a_FT128.txt'
surface_file = open_write_file(file_name=file_name, dir=None, force=True)
write_data(out=surface_file, headings=headings, data=data)

# Close file
surface_file.close()

# Check spins.
display_spin()

# Now de-select spins from cluster.
for spin_id in cur_spin_ids:
    deselect.spin(spin_id=spin_id)

relax_disp.plot_disp_curves(dir='grace', y_axis='r2_eff', x_axis='disp', num_points=1000, extend_hz=500.0, extend_ppm=500.0, interpolate='disp', force=True)
Example #57
0
File: uf.py Project: tlinnet/relax
def decompose(root="decomposed", dir=None, atom_id=None, model=1, force=True):
    """Structural representation of the individual frame order motional components.

    @keyword root:          The file root for the PDB files created.  Each motional component will be represented by a different PDB file appended with '_mode1.pdb', '_mode2.pdb', '_mode3.pdb', etc.
    @type root:             str
    @keyword dir:           The directory name to place the file into.
    @type dir:              str or None
    @keyword atom_id:       The atom identification string to allow the decomposition to be applied to subset of all atoms.
    @type atom_id:          None or str
    @keyword model:         Only one model from an analysed ensemble of structures can be used for the decomposition, as the corresponding PDB file consists of one model per state.
    @type model:            int
    @keyword force:         A flag which, if set to True, will overwrite the any pre-existing file.
    @type force:            bool
    """

    # Printout.
    print(
        "PDB representation of the individual components of the frame order motions."
    )

    # Checks.
    check_pipe()
    check_model()
    check_domain()
    check_parameters()
    check_pivot()

    # Skip any unsupported models.
    unsupported = [MODEL_RIGID, MODEL_DOUBLE_ROTOR]
    if cdp.model in unsupported:
        print("Skipping the unsupported '%s' model." % cdp.model)
        return

    # Initialise the angle vector (cone opening angle 1, cone opening angle 2, torsion angle).
    angles = zeros(3, float64)

    # Cone opening.
    if cdp.model in MODEL_LIST_ISO_CONE:
        angles[0] = angles[1] = cdp.cone_theta
    elif cdp.model in MODEL_LIST_PSEUDO_ELLIPSE:
        angles[0] = cdp.cone_theta_y
        angles[1] = cdp.cone_theta_x

    # Non-zero torsion angle.
    if cdp.model in MODEL_LIST_FREE_ROTORS:
        angles[2] = pi
    elif cdp.model in MODEL_LIST_RESTRICTED_TORSION:
        angles[2] = cdp.cone_sigma_max

    # The motional eigenframe.
    frame = generate_axis_system()

    # Mode ordering from largest to smallest.
    indices = argsort(angles)
    angles = angles[indices[::-1]]
    frame = transpose(transpose(frame)[indices[::-1]])

    # The pivot point.
    pivot = generate_pivot(order=1, pdb_limit=True)

    # Loop over each mode.
    for i in range(3):
        # Skip modes with no motion.
        if angles[i] < 1e-7:
            continue

        # Open the output file.
        file_name = "%s_mode%i.pdb" % (root, i + 1)
        file = open_write_file(file_name=file_name, dir=dir, force=force)

        # The structure.
        structure = deepcopy(cdp.structure)
        if structure.num_models() > 1:
            structure.collapse_ensemble(model_num=model)

        # Shift to the average position.
        average_position(structure=structure, models=[None])

        # Create the representation.
        mode_distribution(file=file,
                          structure=structure,
                          axis=frame[:, i],
                          angle=angles[i],
                          pivot=pivot,
                          atom_id=domain_moving())

        # Close the file.
        file.close()
Example #58
0
def write_main_file(file=None, dir=None, output_dir=None, f_tol=1e-25, max_iter=10000000, r1=False, force=False):
    """Create the main CATIA execution file.

    @keyword file:          The main CATIA execution file.
    @type file:             str
    @keyword dir:           The directory to place the files into.
    @type dir:              str or None
    @keyword output_dir:    The CATIA output directory, located within the directory specified by the dir argument.  This directory will be created.
    @type output_dir:       str
    @keyword r1:            A flag which if True will cause the R1 data to be used for off-resonance effects.
    @type r1:               bool
    @keyword force:         A flag which if True will cause a pre-existing file to be overwritten.
    @type force:            bool
    """

    # The file.
    catia_in = open_write_file(file_name=file, dir=dir, force=force)

    # The R2eff input sets.
    for frq in loop_frq():
        frq_label = int(frq*1e-6)
        file_name = "data_set_%i.inp" % frq_label
        catia_in.write("ReadDataset(%s)\n" % file_name)

    # Write out the data.
    catia_in.write("ReadParam_Global(ParamGlobal.inp)\n")
    catia_in.write("ReadParam_Local(ParamSet1.inp)\n")
    catia_in.write("\n")

    # The R1 data for off-resonance effects.
    if r1:
        catia_in.write("ReadParam(Omega;%s;0;1)\n" % shift_file)
        for frq in loop_frq():
            frq_label = int(frq*1e-6)
            param = "R1iph_%s" % frq_label
            r1_file = "R1_%s.out" % frq_label
            catia_in.write("ReadParam(%s;%s;0;1)\n" % (param, r1_file))
        catia_in.write("\n")

    # Fix these off-resonance parameters.
    catia_in.write("FreeLocalParam(all;Omega;false)\n")
    for frq in loop_frq():
        frq_label = int(frq*1e-6)
        param = "R1iph_%s" % frq_label
        catia_in.write("FreeLocalParam(all;%s;false)\n" % param)
    catia_in.write("\n")

    # Minimisation.
    catia_in.write("Minimize(print=y;tol=%s;maxiter=%i)\n" % (f_tol, max_iter))
    catia_in.write("\n")

    # Plotting.
    catia_in.write("PrintParam(%s/GlobalParam.fit;global)\n" % output_dir)
    catia_in.write("PrintParam(%s/DeltaOmega.fit;DeltaO)\n" % output_dir)
    catia_in.write("PrintData(%s/)\n" % output_dir)
    catia_in.write("\n")

    # Calculate the chi-squared value (not sure why, it's calculated in the minimisation).
    catia_in.write("ChiSq(all;all)\n")

    # Exit the program.
    catia_in.write("exit(0)\n")

    # Close the file.
    catia_in.close()
Example #59
0
    def matplotlib_surface_plot(self):
        """Function to write matplotlib script to plot surfaces of parameters."""

        # Add ".par" to file prefix
        mapfile_name = '"%s.par"' % self.file_prefix

        # If point file_file is different from None
        if self.point_file != None:
            pointfile_name = '"%s.par"' % self.point_file
        else:
            pointfile_name = "None"

        # Open the file.
        plot_file = open_write_file(file_name=self.file_prefix + '.py',
                                    dir=self.dir,
                                    force=True)

        matplotlib_file = [
            'from copy import deepcopy' + "\n",
            'import numpy as np' + "\n",
            'import scipy.interpolate' + "\n",
            'from numpy.ma import masked_where' + "\n",
            '' + "\n",
            'from mpl_toolkits.mplot3d import axes3d' + "\n",
            'import matplotlib.pyplot as plt' + "\n",
            'from matplotlib import cm' + "\n",
            '' + "\n",
            '# Open file and get header.' + "\n",
            'mapfile_name = %s' % mapfile_name + "\n",
            'pointfile_name = %s' % pointfile_name + "\n",
            '' + "\n",
            'mapfile = open(mapfile_name, "r")' + "\n",
            'lines = mapfile.readlines()' + "\n",
            'mapfile.close()' + "\n",
            'header = lines[0].split()[1:]' + "\n",
            '' + "\n",
            '# Prepare the dtype for reading file.' + "\n",
            'dtype_str = "i8,f8,f8,f8,f8,i8,f8,f8,f8,f8"' + "\n",
            '' + "\n",
            'print("Fileheader is: %s"%header)' + "\n",
            'print("Value types are: %s"%dtype_str)' + "\n",
            '' + "\n",
            '# Load the data.' + "\n",
            'data = np.genfromtxt(fname=mapfile_name, dtype=dtype_str, names=header)'
            + "\n",
            '' + "\n",
            '# Load the point data' + "\n",
            'if pointfile_name:' + "\n",
            '    # Load the point data.' + "\n",
            '    data_p = np.genfromtxt(fname=pointfile_name, dtype=dtype_str, names=header)'
            + "\n",
            '    ' + "\n",
            '' + "\n",
            '# Define where to cut the data, as the minimum.' + "\n",
            'header_min = header[6:10]' + "\n",
            '' + "\n",
            '# Define to cut at min map point.' + "\n",
            'map_min_par0 = data[header_min[0]][0]' + "\n",
            'map_min_par1 = data[header_min[1]][0]' + "\n",
            'map_min_par2 = data[header_min[2]][0]' + "\n",
            'map_min_chi2 = data[header_min[3]][0]' + "\n",
            '' + "\n",
            '# Now get the headers for the data.' + "\n",
            'header_val = header[1:5]' + "\n",
            '' + "\n",
            '# Define which 2D maps to create, as a list of 2 parameters, and at which third parameter to cut the values.'
            + "\n",
            'maps_xy = [header_val[0], header_val[1], header_val[2], map_min_par2]'
            + "\n",
            'maps_xz = [header_val[0], header_val[2], header_val[1], map_min_par1]'
            + "\n",
            'maps_yz = [header_val[1], header_val[2], header_val[0], map_min_par0]'
            + "\n",
            '' + "\n",
            'maps = [maps_xy, maps_xz, maps_yz]' + "\n",
            '' + "\n",
            '# Nr of columns is number of maps.' + "\n",
            'nr_cols = 1' + "\n",
            '# Nr of rows, is 2, for 3d projection and imshow' + "\n",
            'nr_rows = 2' + "\n",
            '' + "\n",
            '# Loop over the maps:' + "\n",
            'for x_par, y_par, z_par, z_cut in maps:' + "\n",
            '    # Define figure' + "\n",
            '    fig = plt.figure()' + "\n",
            '' + "\n",
            '    # Define c_par' + "\n",
            '    c_par = header_val[3]' + "\n",
            '' + "\n",
            '    # Now get the values for the map.' + "\n",
            '    map_x = data[x_par]' + "\n",
            '    map_y = data[y_par]' + "\n",
            '    map_z = data[z_par]' + "\n",
            '    map_c = data[c_par]' + "\n",
            '' + "\n",
            '    # Now define which map to create.' + "\n",
            '    mask_xy = masked_where(map_z == z_cut, map_z)' + "\n",
            '    map_mask_x = map_x[mask_xy.mask]' + "\n",
            '    map_mask_y = map_y[mask_xy.mask]' + "\n",
            '    map_mask_c = map_c[mask_xy.mask]' + "\n",
            '' + "\n",
            '    # Define min and max values.' + "\n",
            '    map_mask_x_min = map_mask_x.min()' + "\n",
            '    map_mask_x_max = map_mask_x.max()' + "\n",
            '    map_mask_y_min = map_mask_y.min()' + "\n",
            '    map_mask_y_max = map_mask_y.max()' + "\n",
            '    map_mask_c_min = map_mask_c.min()' + "\n",
            '    map_mask_c_max = map_mask_c.max()' + "\n",
            '' + "\n",
            '    # Set up a regular grid of interpolation points' + "\n",
            '    int_points = 300' + "\n",
            '    xi, yi = np.linspace(map_mask_x_min, map_mask_x_max, int_points), np.linspace(map_mask_y_min, map_mask_y_max, int_points)'
            + "\n",
            '    xi, yi = np.meshgrid(xi, yi)' + "\n",
            '' + "\n",
            '    # Interpolate to create grid' + "\n",
            '    ci = scipy.interpolate.griddata((map_mask_x, map_mask_y), map_mask_c, (xi, yi), method="linear")'
            + "\n",
            '' + "\n",
            '    # Set which x, y, z to plot' + "\n",
            '    x_p = xi' + "\n",
            '    y_p = yi' + "\n",
            '    c_p = deepcopy(ci)' + "\n",
            '' + "\n",
            '    # Cut map at a certain height.' + "\n",
            '    # First get index os largest values' + "\n",
            '    #z_max = map_mask_c_max' + "\n",
            '    z_max = map_mask_c_min + 0.5*map_mask_c_min' + "\n",
            '    ci_mask = masked_where(ci >= z_max, ci)' + "\n",
            '' + "\n",
            '    # Replace with 0.0' + "\n",
            '    c_p[ci_mask.mask] = 0.0' + "\n",
            '    # Find new max' + "\n",
            '    new_max = np.max(c_p)' + "\n",
            '' + "\n",
            '    # Insert values in array.' + "\n",
            '    c_p[ci_mask.mask] = new_max' + "\n",
            '' + "\n",
            '    # Define min.' + "\n",
            '    z_min = map_mask_c_min - 0.5*map_mask_c_min' + "\n",
            '' + "\n",
            '    # Create figure and plot' + "\n",
            '    ax = fig.add_subplot(nr_rows, nr_cols, 1, projection="3d")' +
            "\n",
            '    ax.plot_surface(x_p, y_p, c_p, rstride=8, cstride=8, alpha=0.3)'
            + "\n",
            '' + "\n",
            '    # Possible add scatter points for map.' + "\n",
            '    #ax.scatter(map_x, map_y, map_c, c="b", marker="o", s=5)' +
            "\n",
            '' + "\n",
            '    # One could also make the mesh just from the values, but this require much memory.'
            + "\n",
            '    ##ax.scatter(x_p, y_p, c_p, c="y", marker="o", s=5)' + "\n",
            '' + "\n",
            '    # Add contour levels on sides.' + "\n",
            '    ax.contour(x_p, y_p, c_p, zdir="z", offset=z_min, cmap=cm.coolwarm)'
            + "\n",
            '    ax.contour(x_p, y_p, c_p, zdir="x", offset=map_mask_x_min, cmap=cm.coolwarm)'
            + "\n",
            '    ax.contour(x_p, y_p, c_p, zdir="y", offset=map_mask_y_min, cmap=cm.coolwarm)'
            + "\n",
            '' + "\n",
            '    # Add scatter values, for 5 lowest values.' + "\n",
            '    x_par_min = x_par + "_sort"' + "\n",
            '    y_par_min = y_par + "_sort"' + "\n",
            '    c_par_min = c_par + "_sort"' + "\n",
            '    mp_x = data[x_par_min][0:5]' + "\n",
            '    mp_y = data[y_par_min][0:5]' + "\n",
            '    mp_c = data[c_par_min][0:5]' + "\n",
            '    ax.scatter(mp_x[0], mp_y[0], mp_c[0], c="r", marker="o", s=200)'
            + "\n",
            '    ax.scatter(mp_x[1:], mp_y[1:], mp_c[1:], c="g", marker="o", s=100)'
            + "\n",
            '' + "\n",
            '    # Add points from file, as the closest point in map.' + "\n",
            '    if pointfile_name:' + "\n",
            '        if data_p[x_par].ndim == 0:' + "\n",
            '            points_x = np.asarray([data_p[x_par]])' + "\n",
            '            points_y = np.asarray([data_p[y_par]])' + "\n",
            '        else:' + "\n",
            '            points_x = data_p[x_par]' + "\n",
            '            points_y = data_p[y_par]' + "\n",
            '' + "\n",
            '        # Normalize, by division of largest number of map' + "\n",
            '        points_x_norm = points_x / map_mask_x_max' + "\n",
            '        points_y_norm = points_y / map_mask_y_max' + "\n",
            '        map_mask_x_norm = map_mask_x / map_mask_x_max' + "\n",
            '        map_mask_y_norm = map_mask_y / map_mask_y_max' + "\n",
            '' + "\n",
            '        p_x = []' + "\n",
            '        p_y = []' + "\n",
            '        p_c = []' + "\n",
            '        # Now calculate the Euclidean distance in the space, to find map point best represents the point.'
            + "\n",
            '        for i, point_x_norm in enumerate(points_x_norm):' + "\n",
            '            point_y_norm = points_y_norm[i]' + "\n",
            '' + "\n",
            '            # Get the distance.' + "\n",
            '            dist = np.sqrt( (map_mask_x_norm - point_x_norm)**2 + (map_mask_y_norm - point_y_norm)**2)'
            + "\n",
            '' + "\n",
            '            # Return the indices of the minimum values along an axis.'
            + "\n",
            '            min_index = np.argmin(dist)' + "\n",
            '            p_x.append(map_mask_x[min_index])' + "\n",
            '            p_y.append(map_mask_y[min_index])' + "\n",
            '            p_c.append(map_mask_c[min_index])' + "\n",
            '' + "\n",
            '        # Convert to numpy array' + "\n",
            '        p_x = np.asarray(p_x)' + "\n",
            '        p_y = np.asarray(p_y)' + "\n",
            '        p_c = np.asarray(p_c)' + "\n",
            '' + "\n",
            '        # Plot points' + "\n",
            '        ax.scatter(p_x, p_y, p_c, c="m", marker="o", s=50)' +
            "\n",
            '' + "\n",
            '' + "\n",
            '    # Set label' + "\n",
            '    ax.set_xlabel("%s"%x_par)' + "\n",
            '    ax.set_ylabel("%s"%y_par)' + "\n",
            '    ax.set_zlabel("%s"%c_par)' + "\n",
            '' + "\n",
            '' + "\n",
            '    # Set limits' + "\n",
            '    ax.set_zlim(z_min, z_max)' + "\n",
            '' + "\n",
            '' + "\n",
            '    # Create figure and plot' + "\n",
            '    ax = fig.add_subplot(nr_rows, nr_cols, 2)' + "\n",
            '    fig_imshow = ax.imshow(ci, vmin=map_mask_c_min, vmax=map_mask_c_max, origin="lower", extent=[map_mask_x_min, map_mask_x_max, map_mask_y_min, map_mask_y_max])'
            + "\n",
            '' + "\n",
            '    # Add scatter values, for 5 lowest values.' + "\n",
            '    ax.scatter(mp_x[0], mp_y[0], c=mp_c[0], marker="o", s=200)' +
            "\n",
            '    ax.scatter(mp_x[1:], mp_y[1:], c="g", marker="o", s=100)' +
            "\n",
            '' + "\n",
            '    # Also add point to this map.' + "\n",
            '    if pointfile_name:' + "\n",
            '        # Plot points' + "\n",
            '        ax.scatter(p_x, p_y, c="m", marker="o", s=50)' + "\n",
            '' + "\n",
            '    # Set label' + "\n",
            '    ax.set_xlabel("%s"%x_par)' + "\n",
            '    ax.set_ylabel("%s"%y_par)' + "\n",
            '' + "\n",
            '    # Add colorbar.' + "\n",
            '    fig.subplots_adjust(right=0.8)' + "\n",
            '    cbar_ax = fig.add_axes([0.85, 0.15, 0.05, 0.3])' + "\n",
            '    fig.colorbar(fig_imshow, cax=cbar_ax)' + "\n",
            '' + "\n",
            '# Show plot.' + "\n",
            'plt.show()' + "\n",
            '' + "\n",
        ]

        # Loop over the lines and write.
        for line in matplotlib_file:
            plot_file.write(line)

        # Close the file.
        plot_file.close()
        #print(k_stat, n_stat, chi2, "point is %s=%3.3f, %s=%3.3f"% (params[0], values[0], params[1], values[1]))

        # Progress incrementation and printout.
        percent = percent + percent_inc
        print(
            "%-10s%8.3f%-8s%-8g" %
            ("Progress:", percent, "%,  " + repr(values) + ",  f(x): ", chi2))

        # Append to data.
        data.append(["%3.3f" % values[0], "%3.3f" % values[1], "%3.3f" % chi2])

        # Save all values of chi2. To help find reasonale level for the Innermost, Inner, Middle and Outer Isosurface.
        all_chi.append(chi2)

        # Increment the value of the second parameter.
        values[1] = values[1] + step_size[1]

    # Increment the value of the first parameter.
    values[0] = values[0] + step_size[0]

print("\nMin cluster point %s=%3.3f, %s=%3.3f, with chi2=%3.3f" %
      (params[0], pcm[0], params[1], pcm[1], pre_chi2))

# Open file
file_name = '1_create_surface_data_S65_dw_r2a_FT128.txt'
surface_file = open_write_file(file_name=file_name, dir=None, force=True)
write_data(out=surface_file, headings=headings, data=data)

# Close file
surface_file.close()