Exemplo n.º 1
0
    def test_count_spins(self):
        """Test that the number of spins can be properly counted.

        The function tested is pipe_control.mol_res_spin.count_spins().
        """

        # Test the number of spins counted.
        self.assertEqual(mol_res_spin.count_spins(), 4)
        self.assertEqual(mol_res_spin.count_spins(skip_desel=False), 8)
        self.assertEqual(mol_res_spin.count_spins(selection='@N5'), 1)
        self.assertEqual(mol_res_spin.count_spins(selection='@N5', skip_desel=False), 2)
Exemplo n.º 2
0
    def test_count_spins(self):
        """Test that the number of spins can be properly counted.

        The function tested is pipe_control.mol_res_spin.count_spins().
        """

        # Test the number of spins counted.
        self.assertEqual(mol_res_spin.count_spins(), 4)
        self.assertEqual(mol_res_spin.count_spins(skip_desel=False), 8)
        self.assertEqual(mol_res_spin.count_spins(selection='@N5'), 1)
        self.assertEqual(
            mol_res_spin.count_spins(selection='@N5', skip_desel=False), 2)
Exemplo n.º 3
0
def compare_sequence(pipe1=None, pipe2=None, fail=True):
    """Compare the sequence in two data pipes.

    @keyword pipe1:     The name of the first data pipe.
    @type pipe1:        str
    @keyword pipe2:     The name of the second data pipe.
    @type pipe2:        str
    @keyword fail:      A flag which if True causes a RelaxError to be raised.
    @type fail:         bool
    @return:            1 if the sequence is the same, 0 if different.
    @rtype:             int
    @raises RelaxError: If the sequence is different and the fail flag is True.
    """

    # Failure status.
    status = 1

    # Molecule number.
    if count_molecules(pipe=pipe1) != count_molecules(pipe=pipe2):
        status = 0
        if fail:
            raise RelaxDiffMolNumError(pipe1, pipe2)

    # Residue number.
    if count_residues(pipe=pipe1) != count_residues(pipe=pipe2):
        status = 0
        if fail:
            raise RelaxDiffResNumError(pipe1, pipe2)

    # Spin number.
    if count_spins(pipe=pipe1) != count_spins(pipe=pipe2):
        status = 0
        if fail:
            raise RelaxDiffSpinNumError(pipe1, pipe2)

    # Create a string representation of the 2 sequences.
    seq1 = ''
    seq2 = ''
    for spin, spin_id in spin_loop(return_id=True, pipe=pipe1):
        seq1 = seq1 + spin_id + '\n'
    for spin, spin_id in spin_loop(return_id=True, pipe=pipe2):
        seq2 = seq2 + spin_id + '\n'

    # Sequence check.
    if seq1 != seq2:
        status = 0
        if fail:
            raise RelaxDiffSeqError(pipe1, pipe2)

    # Return the status.
    return status
Exemplo n.º 4
0
def compare_sequence(pipe1=None, pipe2=None, fail=True):
    """Compare the sequence in two data pipes.

    @keyword pipe1:     The name of the first data pipe.
    @type pipe1:        str
    @keyword pipe2:     The name of the second data pipe.
    @type pipe2:        str
    @keyword fail:      A flag which if True causes a RelaxError to be raised.
    @type fail:         bool
    @return:            1 if the sequence is the same, 0 if different.
    @rtype:             int
    @raises RelaxError: If the sequence is different and the fail flag is True.
    """

    # Failure status.
    status = 1

    # Molecule number.
    if count_molecules(pipe=pipe1) != count_molecules(pipe=pipe2):
        status = 0
        if fail:
            raise RelaxDiffMolNumError(pipe1, pipe2)

    # Residue number.
    if count_residues(pipe=pipe1) != count_residues(pipe=pipe2):
        status = 0
        if fail:
            raise RelaxDiffResNumError(pipe1, pipe2)

    # Spin number.
    if count_spins(pipe=pipe1) != count_spins(pipe=pipe2):
        status = 0
        if fail:
            raise RelaxDiffSpinNumError(pipe1, pipe2)

    # Create a string representation of the 2 sequences.
    seq1 = ''
    seq2 = ''
    for spin, spin_id in spin_loop(return_id=True, pipe=pipe1):
        seq1 = seq1 + spin_id + '\n'
    for spin, spin_id in spin_loop(return_id=True, pipe=pipe2):
        seq2 = seq2 + spin_id + '\n'

    # Sequence check.
    if seq1 != seq2:
        status = 0
        if fail:
            raise RelaxDiffSeqError(pipe1, pipe2)

    # Return the status.
    return status
Exemplo n.º 5
0
def determine_seq_type(spin_id=None):
    """Determine the spin sequence data type.

    The purpose is to identify systems whereby only spins or only residues exist.

    @keyword spin_id:   The spin identification string.
    @type spin_id:      str
    @return:            The spin sequence data type.  This can be one of 'spin', 'res,' or 'mixed'.
    @rtype:             str
    """

    # Count the molecules, residues, and spins.
    num_mol = count_molecules(spin_id)
    num_res = count_residues(spin_id)
    num_spin = count_spins(spin_id)

    # Only residues.
    if num_mol == 1 and num_spin == 1:
        return 'res'

    # Only spins.
    if num_mol == 1 and num_res == 1:
        return 'spin'

    # Mixed.
    return 'mixed'
Exemplo n.º 6
0
def determine_seq_type(spin_id=None):
    """Determine the spin sequence data type.

    The purpose is to identify systems whereby only spins or only residues exist.

    @keyword spin_id:   The spin identification string.
    @type spin_id:      str
    @return:            The spin sequence data type.  This can be one of 'spin', 'res,' or 'mixed'.
    @rtype:             str
    """

    # Count the molecules, residues, and spins.
    num_mol = count_molecules(spin_id)
    num_res = count_residues(spin_id)
    num_spin = count_spins(spin_id)

    # Only residues.
    if num_mol == 1 and num_spin == 1:
        return 'res'

    # Only spins.
    if num_mol == 1 and num_res == 1:
        return 'spin'

    # Mixed.
    return 'mixed'
Exemplo n.º 7
0
    def test_pcs_load(self):
        """Test for the loading of some PCS data with the spin ID format."""

        # Create a data pipe.
        self.interpreter.pipe.create('test', 'N-state')

        # Data directory.
        dir = status.install_path + sep+'test_suite'+sep+'shared_data'+sep+'align_data'+sep

        # Load the spins.
        self.interpreter.sequence.read(file='pcs.txt', dir=dir, spin_name_col=1)
        self.interpreter.sequence.display()

        # Load the PCSs.
        self.interpreter.pcs.read(align_id='tb', file='pcs.txt', dir=dir, spin_name_col=1, data_col=2)
        self.interpreter.sequence.display()

        # The PCSs.
        pcs = [0.004, 0.008, 0.021, 0.029, 0.016, 0.010, 0.008, 0.003, 0.006, 0.003, 0.007, 0.005, 0.001, 0.070, None, 0.025, 0.098, 0.054, 0.075, 0.065, None, 0.070, 0.015, 0.098, 0.060, 0.120]

        # Checks.
        self.assertEqual(count_spins(), 26)
        self.assertEqual(len(cdp.interatomic), 0)
        i = 0
        for spin in spin_loop():
            self.assertEqual(pcs[i], spin.pcs['tb'])
            i += 1
Exemplo n.º 8
0
def display(sep=None, mol_name_flag=False, res_num_flag=False, res_name_flag=False, spin_num_flag=False, spin_name_flag=False):
    """Display the molecule, residue, and/or spin sequence data.

    This calls the write() function to do most of the work.


    @keyword sep:               The column seperator which, if None, defaults to whitespace.
    @type sep:                  str or None
    @keyword mol_name_flag:     A flag which if True will cause the molecule name column to be
                                written.
    @type mol_name_flag:        bool
    @keyword res_num_flag:      A flag which if True will cause the residue number column to be
                                written.
    @type res_num_flag:         bool
    @keyword res_name_flag:     A flag which if True will cause the residue name column to be
                                written.
    @type res_name_flag:        bool
    @keyword spin_name_flag:    A flag which if True will cause the spin name column to be written.
    @type spin_name_flag:       bool
    @keyword spin_num_flag:     A flag which if True will cause the spin number column to be
                                written.
    @type spin_num_flag:        bool
    @param mol_name_flag:    The column to contain the molecule name information.
    """

    # Test if the sequence data is loaded.
    if not count_spins():
        raise RelaxNoSequenceError

    # Write the data.
    write(file=sys.stdout, sep=sep, mol_name_flag=mol_name_flag, res_num_flag=res_num_flag, res_name_flag=res_name_flag, spin_num_flag=spin_num_flag, spin_name_flag=spin_name_flag)
Exemplo n.º 9
0
def display(sep=None, mol_name_flag=False, res_num_flag=False, res_name_flag=False, spin_num_flag=False, spin_name_flag=False):
    """Display the molecule, residue, and/or spin sequence data.

    This calls the write() function to do most of the work.


    @keyword sep:               The column seperator which, if None, defaults to whitespace.
    @type sep:                  str or None
    @keyword mol_name_flag:     A flag which if True will cause the molecule name column to be
                                written.
    @type mol_name_flag:        bool
    @keyword res_num_flag:      A flag which if True will cause the residue number column to be
                                written.
    @type res_num_flag:         bool
    @keyword res_name_flag:     A flag which if True will cause the residue name column to be
                                written.
    @type res_name_flag:        bool
    @keyword spin_name_flag:    A flag which if True will cause the spin name column to be written.
    @type spin_name_flag:       bool
    @keyword spin_num_flag:     A flag which if True will cause the spin number column to be
                                written.
    @type spin_num_flag:        bool
    @param mol_name_flag:    The column to contain the molecule name information.
    """

    # Test if the sequence data is loaded.
    if not count_spins():
        raise RelaxNoSequenceError

    # Write the data.
    write(file=sys.stdout, sep=sep, mol_name_flag=mol_name_flag, res_num_flag=res_num_flag, res_name_flag=res_name_flag, spin_num_flag=spin_num_flag, spin_name_flag=spin_name_flag)
Exemplo n.º 10
0
    def test_rdc_load(self):
        """Test for the loading of some RDC data with the spin ID format."""

        # Create a data pipe.
        self.interpreter.pipe.create('test', 'N-state')

        # Data directory.
        dir = status.install_path + sep+'test_suite'+sep+'shared_data'+sep+'align_data'+sep

        # Load the spins.
        self.interpreter.sequence.read(file='tb.txt', dir=dir, spin_id_col=1)
        self.interpreter.sequence.attach_protons()
        self.interpreter.sequence.display()

        # Load the RDCs.
        self.interpreter.rdc.read(align_id='tb', file='tb.txt', dir=dir, spin_id1_col=1, spin_id2_col=2, data_col=3, error_col=4)
        self.interpreter.sequence.display()

        # The RDCs.
        rdcs = [ -26.2501958629, 9.93081766942, 7.26317614156, -1.24840526981, 5.31803314334, 14.0362909456, 1.33652530397, -1.6021670281]

        # Checks.
        self.assertEqual(count_spins(), 16)
        self.assertEqual(len(cdp.interatomic), 8)
        i = 0
        for interatom in interatomic_loop():
            self.assertAlmostEqual(rdcs[i], interatom.rdc['tb'])
            i += 1
Exemplo n.º 11
0
    def test_rdc_load(self):
        """Test for the loading of some RDC data with the spin ID format."""

        # Create a data pipe.
        self.interpreter.pipe.create('test', 'N-state')

        # Data directory.
        dir = status.install_path + sep+'test_suite'+sep+'shared_data'+sep+'align_data'+sep

        # Load the spins.
        self.interpreter.sequence.read(file='tb.txt', dir=dir, spin_id_col=1)
        self.interpreter.sequence.attach_protons()
        self.interpreter.sequence.display()

        # Load the RDCs.
        self.interpreter.rdc.read(align_id='tb', file='tb.txt', dir=dir, spin_id1_col=1, spin_id2_col=2, data_col=3, error_col=4)
        self.interpreter.sequence.display()

        # The RDCs.
        rdcs = [ -26.2501958629, 9.93081766942, 7.26317614156, -1.24840526981, 5.31803314334, 14.0362909456, 1.33652530397, -1.6021670281]

        # Checks.
        self.assertEqual(count_spins(), 16)
        self.assertEqual(len(cdp.interatomic), 8)
        i = 0
        for interatom in interatomic_loop():
            self.assertAlmostEqual(rdcs[i], interatom.rdc['tb'])
            i += 1
Exemplo n.º 12
0
def write(file, dir=None, sep=None, mol_name_flag=True, res_num_flag=True, res_name_flag=True, spin_num_flag=True, spin_name_flag=True, force=False):
    """Write the molecule, residue, and/or sequence data.

    This calls the lib.io.write_spin_data() function to do most of the work.


    @param file:                The name of the file to write the data to.
    @type file:                 str
    @keyword dir:               The directory to contain the file (defaults to the current directory if None).
    @type dir:                  str or None
    @keyword sep:               The column seperator which, if None, defaults to whitespace.
    @type sep:                  str or None
    @keyword mol_name_flag:     A flag which if True will cause the molecule name column to be written.
    @type mol_name_flag:        bool
    @keyword res_num_flag:      A flag which if True will cause the residue number column to be written.
    @type res_num_flag:         bool
    @keyword res_name_flag:     A flag which if True will cause the residue name column to be written.
    @type res_name_flag:        bool
    @keyword spin_name_flag:    A flag which if True will cause the spin name column to be written.
    @type spin_name_flag:       bool
    @keyword spin_num_flag:     A flag which if True will cause the spin number column to be written.
    @keyword force:             A flag which if True will cause an existing file to be overwritten.
    @type force:                bin
    """

    # Test if the sequence data is loaded.
    if not count_spins():
        raise RelaxNoSequenceError

    # Init the data.
    mol_names = []
    res_nums = []
    res_names = []
    spin_nums = []
    spin_names = []

    # Spin loop.
    for spin, mol_name, res_num, res_name in spin_loop(full_info=True):
        mol_names.append(mol_name)
        res_nums.append(res_num)
        res_names.append(res_name)
        spin_nums.append(spin.num)
        spin_names.append(spin.name)

    # Remove unwanted data.
    if not mol_name_flag:
        mol_names = None
    if not res_num_flag:
        res_nums = None
    if not res_name_flag:
        res_names = None
    if not spin_num_flag:
        spin_nums = None
    if not spin_name_flag:
        spin_names = None

    # Write the data.
    write_spin_data(file=file, dir=dir, sep=sep, mol_names=mol_names, res_nums=res_nums, res_names=res_names, spin_nums=spin_nums, spin_names=spin_names, force=force)
Exemplo n.º 13
0
def write(file, dir=None, sep=None, mol_name_flag=True, res_num_flag=True, res_name_flag=True, spin_num_flag=True, spin_name_flag=True, force=False):
    """Write the molecule, residue, and/or sequence data.

    This calls the lib.io.write_spin_data() function to do most of the work.


    @param file:                The name of the file to write the data to.
    @type file:                 str
    @keyword dir:               The directory to contain the file (defaults to the current directory if None).
    @type dir:                  str or None
    @keyword sep:               The column seperator which, if None, defaults to whitespace.
    @type sep:                  str or None
    @keyword mol_name_flag:     A flag which if True will cause the molecule name column to be written.
    @type mol_name_flag:        bool
    @keyword res_num_flag:      A flag which if True will cause the residue number column to be written.
    @type res_num_flag:         bool
    @keyword res_name_flag:     A flag which if True will cause the residue name column to be written.
    @type res_name_flag:        bool
    @keyword spin_name_flag:    A flag which if True will cause the spin name column to be written.
    @type spin_name_flag:       bool
    @keyword spin_num_flag:     A flag which if True will cause the spin number column to be written.
    @keyword force:             A flag which if True will cause an existing file to be overwritten.
    @type force:                bin
    """

    # Test if the sequence data is loaded.
    if not count_spins():
        raise RelaxNoSequenceError

    # Init the data.
    mol_names = []
    res_nums = []
    res_names = []
    spin_nums = []
    spin_names = []

    # Spin loop.
    for spin, mol_name, res_num, res_name in spin_loop(full_info=True):
        mol_names.append(mol_name)
        res_nums.append(res_num)
        res_names.append(res_name)
        spin_nums.append(spin.num)
        spin_names.append(spin.name)

    # Remove unwanted data.
    if not mol_name_flag:
        mol_names = None
    if not res_num_flag:
        res_nums = None
    if not res_name_flag:
        res_names = None
    if not spin_num_flag:
        spin_nums = None
    if not spin_name_flag:
        spin_names = None

    # Write the data.
    write_spin_data(file=file, dir=dir, sep=sep, mol_names=mol_names, res_nums=res_nums, res_names=res_names, spin_nums=spin_nums, spin_names=spin_names, force=force)
Exemplo n.º 14
0
    def _num_instances_spin(self):
        """Return the number of instances, equal to the number of selected spins.

        @return:    The number of instances (equal to the number of spins).
        @rtype:     int
        """

        # Test if sequence data is loaded.
        if not exists_mol_res_spin_data():
            raise RelaxNoSequenceError

        # Return the number of spins.
        return count_spins()
Exemplo n.º 15
0
    def _num_instances_spin(self):
        """Return the number of instances, equal to the number of selected spins.

        @return:    The number of instances (equal to the number of spins).
        @rtype:     int
        """

        # Test if sequence data is loaded.
        if not exists_mol_res_spin_data():
            raise RelaxNoSequenceError

        # Return the number of spins.
        return count_spins()
Exemplo n.º 16
0
    def test_pcs_copy_different_spins(self):
        """Test the operation of the pcs.copy user function for two data pipes with different spin system."""

        # Data directory.
        dir = status.install_path + sep+'test_suite'+sep+'shared_data'+sep+'align_data'+sep

        # Set up two data identical pipes.
        pipes = ['orig', 'new']
        delete = ['@C2', '@H17']
        for i in range(2):
            # Create a data pipe.
            self.interpreter.pipe.create(pipes[i], 'N-state')

            # Load the spins.
            self.interpreter.sequence.read(file='pcs.txt', dir=dir, spin_name_col=1)

            # Delete the spin.
            self.interpreter.spin.delete(delete[i])
            self.interpreter.sequence.display()

        # Load the PCSs into the first data pipe.
        self.interpreter.pipe.switch('orig')
        self.interpreter.pcs.read(align_id='tb', file='pcs.txt', dir=dir, spin_name_col=1, data_col=2)

        # Copy the PCSs into the second data pipe.
        self.interpreter.pcs.copy(pipe_from='orig', pipe_to='new', align_id='tb')

        # Checks.
        pcs = [
            [0.004, 0.021, 0.029, 0.016, 0.010, 0.008, 0.003, 0.006, 0.003, 0.007, 0.005, 0.001, 0.070, None, 0.025, 0.098, 0.054, 0.075, 0.065, None, 0.070, 0.015, 0.098, 0.060, 0.120],
            [0.004, 0.008, 0.021, 0.029, 0.016, 0.010, 0.008, 0.003, 0.006, 0.003, 0.007, 0.005, 0.001, 0.070, None, 0.025, 0.098, 0.054, 0.075, 0.065, None, 0.070, 0.015, 0.098, 0.120]
        ]
        for i in range(2):
            print("\nChecking data pipe '%s'." % pipes[i])
            self.assert_(hasattr(ds[pipes[i]], 'align_ids'))
            self.assert_('tb' in ds[pipes[i]].align_ids)
            self.assert_(hasattr(ds[pipes[i]], 'pcs_ids'))
            self.assert_('tb' in ds[pipes[i]].pcs_ids)
            self.assertEqual(count_spins(), 25)
            self.assertEqual(len(cdp.interatomic), 0)
            j = 0
            for spin in spin_loop(pipe=pipes[i]):
                # Atom C2 in the 'new' data pipe has no PCSs.
                if i == 1 and j == 1:
                    self.assert_(not hasattr(spin, 'pcs'))
                else:
                    if pcs[i][j] == None:
                        self.assertEqual(pcs[i][j], spin.pcs['tb'])
                    else:
                        self.assertAlmostEqual(pcs[i][j], spin.pcs['tb'])
                j += 1
Exemplo n.º 17
0
    def test_rdc_copy(self):
        """Test the operation of the rdc.copy user function."""

        # Create a data pipe.
        self.interpreter.pipe.create('orig', 'N-state')

        # Data directory.
        dir = status.install_path + sep + 'test_suite' + sep + 'shared_data' + sep + 'align_data' + sep

        # Load the spins.
        self.interpreter.sequence.read(file='tb.txt', dir=dir, spin_id_col=1)
        self.interpreter.sequence.attach_protons()
        self.interpreter.sequence.display()

        # Load the RDCs.
        self.interpreter.rdc.read(align_id='tb',
                                  file='tb.txt',
                                  dir=dir,
                                  spin_id1_col=1,
                                  spin_id2_col=2,
                                  data_col=3,
                                  error_col=4)
        self.interpreter.sequence.display()

        # The RDCs.
        rdcs = [
            -26.2501958629, 9.93081766942, 7.26317614156, -1.24840526981,
            5.31803314334, 14.0362909456, 1.33652530397, -1.6021670281
        ]

        # Create a new data pipe by copying the old, then switch to it.
        self.interpreter.pipe.copy(pipe_from='orig', pipe_to='new')
        self.interpreter.pipe.switch(pipe_name='new')

        # Delete the RDC data.
        self.interpreter.rdc.delete()

        # Copy the RDCs.
        self.interpreter.rdc.copy(pipe_from='orig', align_id='tb')

        # Checks.
        self.assert_(hasattr(cdp, 'align_ids'))
        self.assert_('tb' in cdp.align_ids)
        self.assert_(hasattr(cdp, 'rdc_ids'))
        self.assert_('tb' in cdp.rdc_ids)
        self.assertEqual(count_spins(), 16)
        self.assertEqual(len(cdp.interatomic), 8)
        i = 0
        for interatom in interatomic_loop():
            self.assertAlmostEqual(rdcs[i], interatom.rdc['tb'])
            i += 1
Exemplo n.º 18
0
    def test_pcs_copy(self):
        """Test the operation of the pcs.copy user function."""

        # Create a data pipe.
        self.interpreter.pipe.create('orig', 'N-state')

        # Data directory.
        dir = status.install_path + sep + 'test_suite' + sep + 'shared_data' + sep + 'align_data' + sep

        # Load the spins.
        self.interpreter.sequence.read(file='pcs.txt',
                                       dir=dir,
                                       spin_name_col=1)
        self.interpreter.sequence.display()

        # Load the PCSs.
        self.interpreter.pcs.read(align_id='tb',
                                  file='pcs.txt',
                                  dir=dir,
                                  spin_name_col=1,
                                  data_col=2)
        self.interpreter.sequence.display()

        # The PCSs.
        pcs = [
            0.004, 0.008, 0.021, 0.029, 0.016, 0.010, 0.008, 0.003, 0.006,
            0.003, 0.007, 0.005, 0.001, 0.070, None, 0.025, 0.098, 0.054,
            0.075, 0.065, None, 0.070, 0.015, 0.098, 0.060, 0.120
        ]

        # Create a new data pipe by copying the old, then switch to it.
        self.interpreter.pipe.copy(pipe_from='orig', pipe_to='new')
        self.interpreter.pipe.switch(pipe_name='new')

        # Delete the PCS data.
        self.interpreter.pcs.delete()

        # Copy the PCSs.
        self.interpreter.pcs.copy(pipe_from='orig', align_id='tb')

        # Checks.
        self.assert_(hasattr(cdp, 'align_ids'))
        self.assert_('tb' in cdp.align_ids)
        self.assert_(hasattr(cdp, 'pcs_ids'))
        self.assert_('tb' in cdp.pcs_ids)
        self.assertEqual(count_spins(), 26)
        self.assertEqual(len(cdp.interatomic), 0)
        i = 0
        for spin in spin_loop():
            self.assertEqual(pcs[i], spin.pcs['tb'])
            i += 1
Exemplo n.º 19
0
    def test_count_no_spins(self):
        """Test that the number of spins (zero) can be properly counted.

        The function tested is pipe_control.mol_res_spin.count_spins().
        """

        # Reset relax.
        reset()

        # Add a data pipe to the data store.
        ds.add(pipe_name='orig', pipe_type='mf')

        # Test the number of spins counted.
        self.assertEqual(mol_res_spin.count_spins(), 0)
Exemplo n.º 20
0
    def test_count_no_spins(self):
        """Test that the number of spins (zero) can be properly counted.

        The function tested is pipe_control.mol_res_spin.count_spins().
        """

        # Reset relax.
        reset()

        # Add a data pipe to the data store.
        ds.add(pipe_name='orig', pipe_type='mf')

        # Test the number of spins counted.
        self.assertEqual(mol_res_spin.count_spins(), 0)
Exemplo n.º 21
0
    def test_rdc_copy(self):
        """Test the operation of the rdc.copy user function."""

        # Create a data pipe.
        self.interpreter.pipe.create('orig', 'N-state')

        # Data directory.
        dir = status.install_path + sep+'test_suite'+sep+'shared_data'+sep+'align_data'+sep

        # Load the spins.
        self.interpreter.sequence.read(file='tb.txt', dir=dir, spin_id_col=1)
        self.interpreter.sequence.attach_protons()
        self.interpreter.sequence.display()

        # Load the RDCs.
        self.interpreter.rdc.read(align_id='tb', file='tb.txt', dir=dir, spin_id1_col=1, spin_id2_col=2, data_col=3, error_col=4)
        self.interpreter.sequence.display()

        # The RDCs.
        rdcs = [ -26.2501958629, 9.93081766942, 7.26317614156, -1.24840526981, 5.31803314334, 14.0362909456, 1.33652530397, -1.6021670281]

        # Create a new data pipe by copying the old, then switch to it.
        self.interpreter.pipe.copy(pipe_from='orig', pipe_to='new')
        self.interpreter.pipe.switch(pipe_name='new')

        # Delete the RDC data.
        self.interpreter.rdc.delete()

        # Copy the RDCs.
        self.interpreter.rdc.copy(pipe_from='orig', align_id='tb')

        # Checks.
        self.assert_(hasattr(cdp, 'align_ids'))
        self.assert_('tb' in cdp.align_ids)
        self.assert_(hasattr(cdp, 'rdc_ids'))
        self.assert_('tb' in cdp.rdc_ids)
        self.assertEqual(count_spins(), 16)
        self.assertEqual(len(cdp.interatomic), 8)
        i = 0
        for interatom in interatomic_loop():
            self.assertAlmostEqual(rdcs[i], interatom.rdc['tb'])
            i += 1
Exemplo n.º 22
0
    def test_pcs_copy(self):
        """Test the operation of the pcs.copy user function."""

        # Create a data pipe.
        self.interpreter.pipe.create('orig', 'N-state')

        # Data directory.
        dir = status.install_path + sep+'test_suite'+sep+'shared_data'+sep+'align_data'+sep

        # Load the spins.
        self.interpreter.sequence.read(file='pcs.txt', dir=dir, spin_name_col=1)
        self.interpreter.sequence.display()

        # Load the PCSs.
        self.interpreter.pcs.read(align_id='tb', file='pcs.txt', dir=dir, spin_name_col=1, data_col=2)
        self.interpreter.sequence.display()

        # The PCSs.
        pcs = [0.004, 0.008, 0.021, 0.029, 0.016, 0.010, 0.008, 0.003, 0.006, 0.003, 0.007, 0.005, 0.001, 0.070, None, 0.025, 0.098, 0.054, 0.075, 0.065, None, 0.070, 0.015, 0.098, 0.060, 0.120]

        # Create a new data pipe by copying the old, then switch to it.
        self.interpreter.pipe.copy(pipe_from='orig', pipe_to='new')
        self.interpreter.pipe.switch(pipe_name='new')

        # Delete the PCS data.
        self.interpreter.pcs.delete()

        # Copy the PCSs.
        self.interpreter.pcs.copy(pipe_from='orig', align_id='tb')

        # Checks.
        self.assert_(hasattr(cdp, 'align_ids'))
        self.assert_('tb' in cdp.align_ids)
        self.assert_(hasattr(cdp, 'pcs_ids'))
        self.assert_('tb' in cdp.pcs_ids)
        self.assertEqual(count_spins(), 26)
        self.assertEqual(len(cdp.interatomic), 0)
        i = 0
        for spin in spin_loop():
            self.assertEqual(pcs[i], spin.pcs['tb'])
            i += 1
Exemplo n.º 23
0
    def spin_count(self):
        """Count the number of loaded spins, returning a string formatted as 'xxx spins loaded'.

        @return:    The number of loaded spins in the format 'xxx spins loaded'.
        @rtype:     str
        """

        # The data pipe.
        if hasattr(self.data, 'pipe_name'):
            pipe = self.data.pipe_name
        else:
            pipe = cdp_name()

        # The count.
        if not has_pipe(pipe):
            num = 0
        else:
            num = count_spins(pipe=pipe)

        # Return the formatted string.
        return "%s spins loaded and selected" % num
Exemplo n.º 24
0
    def spin_count(self):
        """Count the number of loaded spins, returning a string formatted as 'xxx spins loaded'.

        @return:    The number of loaded spins in the format 'xxx spins loaded'.
        @rtype:     str
        """

        # The data pipe.
        if hasattr(self.data, 'pipe_name'):
            pipe = self.data.pipe_name
        else:
            pipe = cdp_name()

        # The count.
        if not has_pipe(pipe):
            num = 0
        else:
            num = count_spins(pipe=pipe)

        # Return the formatted string.
        return "%s spins loaded and selected" % num
Exemplo n.º 25
0
    def test_pcs_load(self):
        """Test for the loading of some PCS data with the spin ID format."""

        # Create a data pipe.
        self.interpreter.pipe.create('test', 'N-state')

        # Data directory.
        dir = status.install_path + sep + 'test_suite' + sep + 'shared_data' + sep + 'align_data' + sep

        # Load the spins.
        self.interpreter.sequence.read(file='pcs.txt',
                                       dir=dir,
                                       spin_name_col=1)
        self.interpreter.sequence.display()

        # Load the PCSs.
        self.interpreter.pcs.read(align_id='tb',
                                  file='pcs.txt',
                                  dir=dir,
                                  spin_name_col=1,
                                  data_col=2)
        self.interpreter.sequence.display()

        # The PCSs.
        pcs = [
            0.004, 0.008, 0.021, 0.029, 0.016, 0.010, 0.008, 0.003, 0.006,
            0.003, 0.007, 0.005, 0.001, 0.070, None, 0.025, 0.098, 0.054,
            0.075, 0.065, None, 0.070, 0.015, 0.098, 0.060, 0.120
        ]

        # Checks.
        self.assertEqual(count_spins(), 26)
        self.assertEqual(len(cdp.interatomic), 0)
        i = 0
        for spin in spin_loop():
            self.assertEqual(pcs[i], spin.pcs['tb'])
            i += 1
Exemplo n.º 26
0
def copy(pipe_from=None, pipe_to=None, spin_id1=None, spin_id2=None, verbose=True):
    """Copy the interatomic data from one data pipe to another.

    @keyword pipe_from:         The data pipe to copy the interatomic data from.  This defaults to the current data pipe.
    @type pipe_from:            str
    @keyword pipe_to:           The data pipe to copy the interatomic data to.  This defaults to the current data pipe.
    @type pipe_to:              str
    @keyword spin_id1:          The spin ID string of the first atom.
    @type spin_id1:             str
    @keyword spin_id2:          The spin ID string of the second atom.
    @type spin_id2:             str
    @keyword verbose:           A flag which if True will cause info about each spin pair to be printed out.
    @type verbose:              bool
    """

    # Defaults.
    if pipe_from == None and pipe_to == None:
        raise RelaxError("The pipe_from and pipe_to arguments cannot both be set to None.")
    elif pipe_from == None:
        pipe_from = pipes.cdp_name()
    elif pipe_to == None:
        pipe_to = pipes.cdp_name()

    # Test if the pipe_from and pipe_to data pipes exist.
    pipes.test(pipe_from)
    pipes.test(pipe_to)

    # Check that the spin IDs exist.
    if spin_id1:
        if count_spins(selection=spin_id1, pipe=pipe_from, skip_desel=False) == 0:
            raise RelaxNoSpinError(spin_id1, pipe_from)
        if count_spins(selection=spin_id1, pipe=pipe_to, skip_desel=False) == 0:
            raise RelaxNoSpinError(spin_id1, pipe_to)
    if spin_id2:
        if count_spins(selection=spin_id2, pipe=pipe_from, skip_desel=False) == 0:
            raise RelaxNoSpinError(spin_id2, pipe_from)
        if count_spins(selection=spin_id2, pipe=pipe_to, skip_desel=False) == 0:
            raise RelaxNoSpinError(spin_id2, pipe_to)

    # Check for the sequence data in the target pipe if no spin IDs are given.
    if not spin_id1 and not spin_id2:
        for spin, spin_id in spin_loop(pipe=pipe_from, return_id=True):
            if not return_spin(spin_id, pipe=pipe_to):
                raise RelaxNoSpinError(spin_id, pipe_to)

    # Test if pipe_from contains interatomic data (skipping the rest of the function if it is missing).
    if not exists_data(pipe_from):
        return

    # Loop over the interatomic data of the pipe_from data pipe.
    ids = []
    for interatom in interatomic_loop(selection1=spin_id1, selection2=spin_id2, pipe=pipe_from):
        # Create a new container.
        new_interatom = create_interatom(spin_id1=interatom.spin_id1, spin_id2=interatom.spin_id2, pipe=pipe_to)

        # Duplicate all the objects of the container.
        for name in dir(interatom):
            # Skip special objects.
            if search('^_', name):
                continue

            # Skip the spin IDs.
            if name in ['spin_id1', 'spin_id2']:
                continue

            # Skip class methods.
            if name in list(interatom.__class__.__dict__.keys()):
                continue

            # Duplicate all other objects.
            obj = deepcopy(getattr(interatom, name))
            setattr(new_interatom, name, obj)

        # Store the IDs for the printout.
        ids.append([repr(interatom.spin_id1), repr(interatom.spin_id2)])

    # Print out.
    if verbose:
        write_data(out=sys.stdout, headings=["Spin_ID_1", "Spin_ID_2"], data=ids)
Exemplo n.º 27
0
    def test_rdc_copy_back_calc(self):
        """Test the operation of the rdc.copy user function for back-calculated values."""

        # Data directory.
        dir = status.install_path + sep+'test_suite'+sep+'shared_data'+sep+'align_data'+sep

        # Set up two data identical pipes.
        pipes = ['orig', 'new']
        delete = [':6', ':11']
        for i in range(2):
            # Create a data pipe.
            self.interpreter.pipe.create(pipes[i], 'N-state')

            # Load the spins.
            self.interpreter.sequence.read(file='tb.txt', dir=dir, spin_id_col=1)
            self.interpreter.spin.element('N')

            # Delete the residue.
            self.interpreter.residue.delete(delete[i])

            # Attach protons.
            self.interpreter.sequence.attach_protons()
            self.interpreter.sequence.display()

            # Create the interatomic data containers.
            self.interpreter.interatom.define(spin_id1='@N', spin_id2='@H')

        # Printout.
        print("\n\nInteratomic data containers for the 'orig' data pipe:")
        for interatom in interatomic_loop(pipe='orig'):
            print("'%s' '%s'" % (interatom.spin_id1, interatom.spin_id2))
        print("\nInteratomic data containers for the 'new' data pipe:")
        for interatom in interatomic_loop(pipe='new'):
            print("'%s' '%s'" % (interatom.spin_id1, interatom.spin_id2))

        # Load the RDCs into the first data pipe.
        self.interpreter.pipe.switch('orig')
        self.interpreter.rdc.read(align_id='tb', file='tb.txt', dir=dir, spin_id1_col=1, spin_id2_col=2, data_col=3, error_col=4)

        # Create back-calculated RDC values from the real values.
        for interatom in interatomic_loop():
            if hasattr(interatom, 'rdc'):
                if not hasattr(interatom, 'rdc_bc'):
                    interatom.rdc_bc = {}
                interatom.rdc_bc['tb'] = interatom.rdc['tb'] + 1.0

        # Copy the RDCs, including back-calculated values, into the second data pipe.
        self.interpreter.rdc.copy(pipe_from='orig', pipe_to='new', align_id='tb', back_calc=True)

        # Checks.
        rdcs = [
            [ -26.2501958629, 7.26317614156, -1.24840526981, 5.31803314334, 14.0362909456, 1.33652530397, -1.6021670281],
            [ -26.2501958629, 9.93081766942, 7.26317614156, -1.24840526981, 5.31803314334, 14.0362909456, -1.6021670281]
        ]
        for i in range(2):
            print("\nChecking data pipe '%s'." % pipes[i])

            # Metadata.
            self.assert_(hasattr(ds[pipes[i]], 'align_ids'))
            self.assert_('tb' in ds[pipes[i]].align_ids)
            self.assert_(hasattr(ds[pipes[i]], 'rdc_ids'))
            self.assert_('tb' in ds[pipes[i]].rdc_ids)

            # Spin data.
            self.assertEqual(count_spins(pipe=pipes[i]), 14)
            self.assertEqual(len(ds[pipes[i]].interatomic), 7)
            j = 0
            for interatom in interatomic_loop(pipe=pipes[i]):
                # Residue 6 in the 'new' data pipe has no RDCs.
                if i == 1 and j == 1:
                    self.assert_(not hasattr(interatom, 'rdc'))
                    self.assert_(not hasattr(interatom, 'rdc_data_types'))
                    self.assert_(not hasattr(interatom, 'absolute_rdc'))
                else:
                    self.assertAlmostEqual(rdcs[i][j], interatom.rdc['tb'])
                    self.assertAlmostEqual(rdcs[i][j]+1.0, interatom.rdc_bc['tb'])
                    self.assert_(hasattr(interatom, 'rdc_data_types'))
                    self.assert_('tb' in interatom.rdc_data_types)
                    self.assertEqual(interatom.rdc_data_types['tb'], 'D')
                    self.assert_(hasattr(interatom, 'absolute_rdc'))
                    self.assert_('tb' in interatom.absolute_rdc)
                    self.assertEqual(interatom.absolute_rdc['tb'], False)
                j += 1
Exemplo n.º 28
0
    def test_curve_fitting_height_estimate_error(self):
        """Test the relaxation curve fitting C modules and estimate error."""

        # Reset
        self.interpreter.reset()

        # Create pipe.
        pipe_name = 'base pipe'
        pipe_bundle = 'relax_fit'
        self.interpreter.pipe.create(pipe_name=pipe_name, bundle=pipe_bundle, pipe_type='relax_fit')

        # The intensity type.
        ds.int_type = 'height'

        # Create the data pipe and load the base data.
        data_path = status.install_path + sep+'test_suite'+sep+'shared_data'+sep+'curve_fitting'

        # Create the spins
        self.interpreter.spectrum.read_spins(file="T2_ncyc1_ave.list", dir=data_path)

        # Relaxation times (in seconds).
        times = [
            0.0176,
            0.0176,
            0.0352,
            0.0704,
            0.0704,
            0.1056,
            0.1584,
            0.1584,
            0.1936,
            0.1936
            ]

        # Spectrum names.
        names = [
            'T2_ncyc1_ave',
            'T2_ncyc1b_ave',
            'T2_ncyc2_ave',
            'T2_ncyc4_ave',
            'T2_ncyc4b_ave',
            'T2_ncyc6_ave',
            'T2_ncyc9_ave',
            'T2_ncyc9b_ave',
            'T2_ncyc11_ave',
            'T2_ncyc11b_ave'
        ]


        # Loop over Spectrum names.
        for i, sname in enumerate(names):
            # Get the time.
            time = times[i]

            # Load the peak intensities.
            self.interpreter.spectrum.read_intensities(file=sname+'.list', dir=data_path, spectrum_id=sname, int_method=ds.int_type)

            # Set the relaxation times.
            self.interpreter.relax_fit.relax_time(time=time, spectrum_id=sname)

        self.interpreter.deselect.spin(':3,11,18,19,23,31,42,44,54,66,82,92,94,99,101,113,124,126,136,141,145,147,332,345,346,358,361')

        GRID_INC = 11
        MC_SIM = 3
        results_dir = mkdtemp()
        #min_method = 'simplex'
        #min_method = 'BFGS'
        min_method = 'newton'

        # De select one more.
        self.interpreter.deselect.spin(':512@ND2')

        # Set the relaxation curve type.
        self.interpreter.relax_fit.select_model('exp')

        # Do automatic
        if True:
            relax_fit.Relax_fit(pipe_name=pipe_name, pipe_bundle=pipe_bundle, file_root='R2', results_dir=results_dir, grid_inc=GRID_INC, mc_sim_num=MC_SIM, view_plots=False)

        else:
            # Prepare for finding dublictes.

            # Collect all times, and matching spectrum ID.
            all_times = []
            all_id = []
            for spectrum_id in cdp.relax_times:
                all_times.append(cdp.relax_times[spectrum_id])
                all_id.append(spectrum_id)
    
            # Get the dublicates.
            dublicates = [(val, [i for i in range(len(all_times)) if all_times[i] == val]) for val in all_times]
    
            # Loop over the list of the mapping of times and duplications.
            list_dub_mapping = []
            for i, dub in enumerate(dublicates):
                # Get current spectum id.
                cur_spectrum_id = all_id[i]
    
                # Get the tuple of time and indexes of duplications.
                time, list_index_occur = dub
    
                # Collect mapping of index to id.
                id_list = []
                if len(list_index_occur) > 1:
                    for list_index in list_index_occur:
                        id_list.append(all_id[list_index])
    
                # Store to list
                list_dub_mapping.append((cur_spectrum_id, id_list))
    
            # Assign dublicates.
            for spectrum_id, dub_pair in list_dub_mapping:
                if len(dub_pair) > 0:
                    self.interpreter.spectrum.replicated(spectrum_ids=dub_pair)
    
            # Test the number of replicates stored in cdp, is 4.
            self.assertEqual(len(cdp.replicates), 4)


            # Peak intensity error analysis.
            self.interpreter.spectrum.error_analysis()

            # Grid search.
            self.interpreter.minimise.grid_search(inc=GRID_INC)

            # Minimise.
            self.interpreter.minimise.execute(min_method, scaling=False, constraints=False)

            # Monte Carlo simulations.
            self.interpreter.monte_carlo.setup(number=MC_SIM)
            self.interpreter.monte_carlo.create_data()
            self.interpreter.monte_carlo.initial_values()
            self.interpreter.minimise.execute(min_method, scaling=False, constraints=False)
            self.interpreter.monte_carlo.error_analysis()

        # Test seq
        tseq = [ [4, 'GLY', ':4@N'],
                 [5, 'SER', ':5@N'],
                 [6, 'MET', ':6@N'],
                 [7, 'ASP', ':7@N'],
                 [8, 'SER', ':8@N'],
                 [12, 'GLY', ':12@N']]

        # Print spins
        i = 0
        for cur_spin, mol_name, resi, resn, spin_id in spin_loop(full_info=True, return_id=True, skip_desel=True):
            print(resi, resn, spin_id)
            self.assertEqual(resi, tseq[i][0])
            self.assertEqual(resn, tseq[i][1])
            self.assertEqual(spin_id, tseq[i][2])

            i += 1

        # Test the number of spins.
        self.assertEqual(count_spins(), 6)

        # Check the curve-fitting results.
        self.check_curve_fitting_manual()

        # Compare rx errors.
        if True:
            # Estimate rx and i0 errors.
            self.interpreter.error_analysis.covariance_matrix()

            # Collect:
            i0_est = []
            i0_err_est = []
            rx_est = []
            rx_err_est = []
            for cur_spin, mol_name, resi, resn, spin_id in spin_loop(full_info=True, return_id=True, skip_desel=True):
                i0_est.append(cur_spin.i0)
                i0_err_est.append(cur_spin.i0_err)
                rx_est.append(cur_spin.rx)
                rx_err_est.append(cur_spin.rx_err)

            # Set number of MC simulati0ns
            MC_SIM = 200

            # Monte Carlo simulations.
            self.interpreter.monte_carlo.setup(number=MC_SIM)
            self.interpreter.monte_carlo.create_data()
            self.interpreter.monte_carlo.initial_values()
            self.interpreter.minimise.execute(min_method, scaling=False, constraints=False)
            self.interpreter.monte_carlo.error_analysis()

            # Collect:
            i0_mc = []
            i0_err_mc = []
            rx_mc = []
            rx_err_mc = []
            for cur_spin, mol_name, resi, resn, spin_id in spin_loop(full_info=True, return_id=True, skip_desel=True):
                i0_mc.append(cur_spin.i0)
                i0_err_mc.append(cur_spin.i0_err)
                rx_mc.append(cur_spin.rx)
                rx_err_mc.append(cur_spin.rx_err)

            # Now print and compare
            i = 0
            print("Comparison between error estimation from Jacobian co-variance matrix and Monte-Carlo simulations.")
            print("Spin ID: rx_err_diff=est-MC, i0_err_diff=est-MC, rx_err=est/MC, i0_err=est/MC, i0=est/MC, rx=est/MC.")
            for cur_spin, mol_name, resi, resn, spin_id in spin_loop(full_info=True, return_id=True, skip_desel=True):
                # Extract for estimation.
                i0_est_i = i0_est[i]
                i0_err_est_i = i0_err_est[i]
                rx_est_i = rx_est[i]
                rx_err_est_i = rx_err_est[i]

                # Extract from monte carlo.
                i0_mc_i = i0_mc[i]
                i0_err_mc_i = i0_err_mc[i]
                rx_mc_i = rx_mc[i]
                rx_err_mc_i = rx_err_mc[i]

                # Add to counter.
                i += 1

                # Prepare text.
                rx_err_diff = rx_err_est_i - rx_err_mc_i
                i0_err_diff = i0_err_est_i - i0_err_mc_i

                text = "Spin '%s': rx_err_diff=%3.4f, i0_err_diff=%3.3f, rx_err=%3.4f/%3.4f, i0_err=%3.3f/%3.3f, rx=%3.3f/%3.3f, i0=%3.3f/%3.3f" % (spin_id, rx_err_diff, i0_err_diff, rx_err_est_i, rx_err_mc_i, i0_err_est_i, i0_err_mc_i, rx_est_i, rx_mc_i, i0_est_i, i0_mc_i)
                print(text)
Exemplo n.º 29
0
    def test_curve_fitting_height_estimate_error(self):
        """Test the relaxation curve fitting C modules and estimate error."""

        # Reset
        self.interpreter.reset()

        # Create pipe.
        pipe_name = 'base pipe'
        pipe_bundle = 'relax_fit'
        self.interpreter.pipe.create(pipe_name=pipe_name, bundle=pipe_bundle, pipe_type='relax_fit')

        # The intensity type.
        ds.int_type = 'height'

        # Create the data pipe and load the base data.
        data_path = status.install_path + sep+'test_suite'+sep+'shared_data'+sep+'curve_fitting'

        # Create the spins
        self.interpreter.spectrum.read_spins(file="T2_ncyc1_ave.list", dir=data_path)

        # Relaxation times (in seconds).
        times = [
            0.0176,
            0.0176,
            0.0352,
            0.0704,
            0.0704,
            0.1056,
            0.1584,
            0.1584,
            0.1936,
            0.1936
            ]

        # Spectrum names.
        names = [
            'T2_ncyc1_ave',
            'T2_ncyc1b_ave',
            'T2_ncyc2_ave',
            'T2_ncyc4_ave',
            'T2_ncyc4b_ave',
            'T2_ncyc6_ave',
            'T2_ncyc9_ave',
            'T2_ncyc9b_ave',
            'T2_ncyc11_ave',
            'T2_ncyc11b_ave'
        ]


        # Loop over Spectrum names.
        for i, sname in enumerate(names):
            # Get the time.
            time = times[i]

            # Load the peak intensities.
            self.interpreter.spectrum.read_intensities(file=sname+'.list', dir=data_path, spectrum_id=sname, int_method=ds.int_type)

            # Set the relaxation times.
            self.interpreter.relax_fit.relax_time(time=time, spectrum_id=sname)

        self.interpreter.deselect.spin(':3,11,18,19,23,31,42,44,54,66,82,92,94,99,101,113,124,126,136,141,145,147,332,345,346,358,361')

        GRID_INC = 11
        MC_SIM = 3
        results_dir = mkdtemp()
        #min_method = 'simplex'
        #min_method = 'BFGS'
        min_method = 'newton'

        # De select one more.
        self.interpreter.deselect.spin(':512@ND2')

        # Set the relaxation curve type.
        self.interpreter.relax_fit.select_model('exp')

        # Do automatic
        if True:
            relax_fit.Relax_fit(pipe_name=pipe_name, pipe_bundle=pipe_bundle, file_root='R2', results_dir=results_dir, grid_inc=GRID_INC, mc_sim_num=MC_SIM, view_plots=False)

        else:
            # Prepare for finding dublictes.

            # Collect all times, and matching spectrum ID.
            all_times = []
            all_id = []
            for spectrum_id in cdp.relax_times:
                all_times.append(cdp.relax_times[spectrum_id])
                all_id.append(spectrum_id)
    
            # Get the dublicates.
            dublicates = [(val, [i for i in range(len(all_times)) if all_times[i] == val]) for val in all_times]
    
            # Loop over the list of the mapping of times and duplications.
            list_dub_mapping = []
            for i, dub in enumerate(dublicates):
                # Get current spectum id.
                cur_spectrum_id = all_id[i]
    
                # Get the tuple of time and indexes of duplications.
                time, list_index_occur = dub
    
                # Collect mapping of index to id.
                id_list = []
                if len(list_index_occur) > 1:
                    for list_index in list_index_occur:
                        id_list.append(all_id[list_index])
    
                # Store to list
                list_dub_mapping.append((cur_spectrum_id, id_list))
    
            # Assign dublicates.
            for spectrum_id, dub_pair in list_dub_mapping:
                if len(dub_pair) > 0:
                    self.interpreter.spectrum.replicated(spectrum_ids=dub_pair)
    
            # Test the number of replicates stored in cdp, is 4.
            self.assertEqual(len(cdp.replicates), 4)


            # Peak intensity error analysis.
            self.interpreter.spectrum.error_analysis()

            # Grid search.
            self.interpreter.minimise.grid_search(inc=GRID_INC)

            # Minimise.
            self.interpreter.minimise.execute(min_method, scaling=False, constraints=False)

            # Monte Carlo simulations.
            self.interpreter.monte_carlo.setup(number=MC_SIM)
            self.interpreter.monte_carlo.create_data()
            self.interpreter.monte_carlo.initial_values()
            self.interpreter.minimise.execute(min_method, scaling=False, constraints=False)
            self.interpreter.monte_carlo.error_analysis()

        # Test seq
        tseq = [ [4, 'GLY', ':4@N'],
                 [5, 'SER', ':5@N'],
                 [6, 'MET', ':6@N'],
                 [7, 'ASP', ':7@N'],
                 [8, 'SER', ':8@N'],
                 [12, 'GLY', ':12@N']]

        # Print spins
        i = 0
        for cur_spin, mol_name, resi, resn, spin_id in spin_loop(full_info=True, return_id=True, skip_desel=True):
            print(resi, resn, spin_id)
            self.assertEqual(resi, tseq[i][0])
            self.assertEqual(resn, tseq[i][1])
            self.assertEqual(spin_id, tseq[i][2])

            i += 1

        # Test the number of spins.
        self.assertEqual(count_spins(), 6)

        # Check the curve-fitting results.
        self.check_curve_fitting_manual()

        # Compare rx errors.
        if True:
            # Estimate rx and i0 errors.
            self.interpreter.error_analysis.covariance_matrix()

            # Collect:
            i0_est = []
            i0_err_est = []
            rx_est = []
            rx_err_est = []
            for cur_spin, mol_name, resi, resn, spin_id in spin_loop(full_info=True, return_id=True, skip_desel=True):
                i0_est.append(cur_spin.i0)
                i0_err_est.append(cur_spin.i0_err)
                rx_est.append(cur_spin.rx)
                rx_err_est.append(cur_spin.rx_err)

            # Set number of MC simulati0ns
            MC_SIM = 200

            # Monte Carlo simulations.
            self.interpreter.monte_carlo.setup(number=MC_SIM)
            self.interpreter.monte_carlo.create_data()
            self.interpreter.monte_carlo.initial_values()
            self.interpreter.minimise.execute(min_method, scaling=False, constraints=False)
            self.interpreter.monte_carlo.error_analysis()

            # Collect:
            i0_mc = []
            i0_err_mc = []
            rx_mc = []
            rx_err_mc = []
            for cur_spin, mol_name, resi, resn, spin_id in spin_loop(full_info=True, return_id=True, skip_desel=True):
                i0_mc.append(cur_spin.i0)
                i0_err_mc.append(cur_spin.i0_err)
                rx_mc.append(cur_spin.rx)
                rx_err_mc.append(cur_spin.rx_err)

            # Now print and compare
            i = 0
            print("Comparison between error estimation from Jacobian co-variance matrix and Monte-Carlo simulations.")
            print("Spin ID: rx_err_diff=est-MC, i0_err_diff=est-MC, rx_err=est/MC, i0_err=est/MC, i0=est/MC, rx=est/MC.")
            for cur_spin, mol_name, resi, resn, spin_id in spin_loop(full_info=True, return_id=True, skip_desel=True):
                # Extract for estimation.
                i0_est_i = i0_est[i]
                i0_err_est_i = i0_err_est[i]
                rx_est_i = rx_est[i]
                rx_err_est_i = rx_err_est[i]

                # Extract from monte carlo.
                i0_mc_i = i0_mc[i]
                i0_err_mc_i = i0_err_mc[i]
                rx_mc_i = rx_mc[i]
                rx_err_mc_i = rx_err_mc[i]

                # Add to counter.
                i += 1

                # Prepare text.
                rx_err_diff = rx_err_est_i - rx_err_mc_i
                i0_err_diff = i0_err_est_i - i0_err_mc_i

                text = "Spin '%s': rx_err_diff=%3.4f, i0_err_diff=%3.3f, rx_err=%3.4f/%3.4f, i0_err=%3.3f/%3.3f, rx=%3.3f/%3.3f, i0=%3.3f/%3.3f" % (spin_id, rx_err_diff, i0_err_diff, rx_err_est_i, rx_err_mc_i, i0_err_est_i, i0_err_mc_i, rx_est_i, rx_mc_i, i0_est_i, i0_mc_i)
                print(text)
Exemplo n.º 30
0
    def test_pcs_copy_back_calc(self):
        """Test the operation of the pcs.copy user function for back-calculated values."""

        # Data directory.
        dir = status.install_path + sep + 'test_suite' + sep + 'shared_data' + sep + 'align_data' + sep

        # Set up two data identical pipes.
        pipes = ['orig', 'new']
        delete = ['@C2', '@H17']
        for i in range(2):
            # Create a data pipe.
            self.interpreter.pipe.create(pipes[i], 'N-state')

            # Load the spins.
            self.interpreter.sequence.read(file='pcs.txt',
                                           dir=dir,
                                           spin_name_col=1)

            # Delete the spin.
            self.interpreter.spin.delete(delete[i])
            self.interpreter.sequence.display()

        # Load the PCSs into the first data pipe.
        self.interpreter.pipe.switch('orig')
        self.interpreter.pcs.read(align_id='tb',
                                  file='pcs.txt',
                                  dir=dir,
                                  spin_name_col=1,
                                  data_col=2)

        # Create back-calculated PCS values from the real values.
        for spin in spin_loop():
            if hasattr(spin, 'pcs'):
                if not hasattr(spin, 'pcs_bc'):
                    spin.pcs_bc = {}
                spin.pcs_bc['tb'] = spin.pcs['tb']
                if spin.pcs_bc['tb'] != None:
                    spin.pcs_bc['tb'] += 1.0

        # Copy the PCSs into the second data pipe.
        self.interpreter.pcs.copy(pipe_from='orig',
                                  pipe_to='new',
                                  align_id='tb',
                                  back_calc=True)

        # Checks.
        pcs = [[
            0.004, 0.021, 0.029, 0.016, 0.010, 0.008, 0.003, 0.006, 0.003,
            0.007, 0.005, 0.001, 0.070, None, 0.025, 0.098, 0.054, 0.075,
            0.065, None, 0.070, 0.015, 0.098, 0.060, 0.120
        ],
               [
                   0.004, 0.008, 0.021, 0.029, 0.016, 0.010, 0.008, 0.003,
                   0.006, 0.003, 0.007, 0.005, 0.001, 0.070, None, 0.025,
                   0.098, 0.054, 0.075, 0.065, None, 0.070, 0.015, 0.098, 0.120
               ]]
        for i in range(2):
            print("\nChecking data pipe '%s'." % pipes[i])
            self.assert_(hasattr(ds[pipes[i]], 'align_ids'))
            self.assert_('tb' in ds[pipes[i]].align_ids)
            self.assert_(hasattr(ds[pipes[i]], 'pcs_ids'))
            self.assert_('tb' in ds[pipes[i]].pcs_ids)
            self.assertEqual(count_spins(), 25)
            self.assertEqual(len(cdp.interatomic), 0)
            j = 0
            for spin in spin_loop(pipe=pipes[i]):
                # Atom C2 in the 'new' data pipe has no PCSs.
                if i == 1 and j == 1:
                    self.assert_(not hasattr(spin, 'pcs'))
                else:
                    if pcs[i][j] == None:
                        self.assertEqual(None, spin.pcs['tb'])
                        self.assertEqual(None, spin.pcs_bc['tb'])
                    else:
                        self.assertAlmostEqual(pcs[i][j], spin.pcs['tb'])
                        self.assertAlmostEqual(pcs[i][j] + 1.0,
                                               spin.pcs_bc['tb'])
                j += 1
Exemplo n.º 31
0
def copy(pipe_from=None, pipe_to=None, spin_id1=None, spin_id2=None, verbose=True):
    """Copy the interatomic data from one data pipe to another.

    @keyword pipe_from:         The data pipe to copy the interatomic data from.  This defaults to the current data pipe.
    @type pipe_from:            str
    @keyword pipe_to:           The data pipe to copy the interatomic data to.  This defaults to the current data pipe.
    @type pipe_to:              str
    @keyword spin_id1:          The spin ID string of the first atom.
    @type spin_id1:             str
    @keyword spin_id2:          The spin ID string of the second atom.
    @type spin_id2:             str
    @keyword verbose:           A flag which if True will cause info about each spin pair to be printed out.
    @type verbose:              bool
    """

    # Defaults.
    if pipe_from == None and pipe_to == None:
        raise RelaxError("The pipe_from and pipe_to arguments cannot both be set to None.")
    elif pipe_from == None:
        pipe_from = pipes.cdp_name()
    elif pipe_to == None:
        pipe_to = pipes.cdp_name()

    # Test if the pipe_from and pipe_to data pipes exist.
    check_pipe(pipe_from)
    check_pipe(pipe_to)

    # Check that the spin IDs exist.
    if spin_id1:
        if count_spins(selection=spin_id1, pipe=pipe_from, skip_desel=False) == 0:
            raise RelaxNoSpinError(spin_id1, pipe_from)
        if count_spins(selection=spin_id1, pipe=pipe_to, skip_desel=False) == 0:
            raise RelaxNoSpinError(spin_id1, pipe_to)
    if spin_id2:
        if count_spins(selection=spin_id2, pipe=pipe_from, skip_desel=False) == 0:
            raise RelaxNoSpinError(spin_id2, pipe_from)
        if count_spins(selection=spin_id2, pipe=pipe_to, skip_desel=False) == 0:
            raise RelaxNoSpinError(spin_id2, pipe_to)

    # Check for the sequence data in the target pipe if no spin IDs are given.
    if not spin_id1 and not spin_id2:
        for spin, spin_id in spin_loop(pipe=pipe_from, return_id=True):
            if not return_spin(spin_id=spin_id, pipe=pipe_to):
                raise RelaxNoSpinError(spin_id, pipe_to)

    # Test if pipe_from contains interatomic data (skipping the rest of the function if it is missing).
    if not exists_data(pipe_from):
        return

    # Loop over the interatomic data of the pipe_from data pipe.
    ids = []
    for interatom in interatomic_loop(selection1=spin_id1, selection2=spin_id2, pipe=pipe_from):
        # Create a new container.
        new_interatom = create_interatom(spin_id1=interatom.spin_id1, spin_id2=interatom.spin_id2, pipe=pipe_to)

        # Duplicate all the objects of the container.
        for name in dir(interatom):
            # Skip special objects.
            if search('^_', name):
                continue

            # Skip the spin IDs.
            if name in ['spin_id1', 'spin_id2']:
                continue

            # Skip class methods.
            if name in interatom.__class__.__dict__:
                continue

            # Duplicate all other objects.
            obj = deepcopy(getattr(interatom, name))
            setattr(new_interatom, name, obj)

        # Store the IDs for the printout.
        ids.append([repr(interatom.spin_id1), repr(interatom.spin_id2)])

        # Reconfigure the spin hashes.
        hash_update(interatom=new_interatom, pipe=pipe_to)

    # Print out.
    if verbose:
        write_data(out=sys.stdout, headings=["Spin_ID_1", "Spin_ID_2"], data=ids)
Exemplo n.º 32
0
    def test_rdc_copy_back_calc(self):
        """Test the operation of the rdc.copy user function for back-calculated values."""

        # Data directory.
        dir = status.install_path + sep+'test_suite'+sep+'shared_data'+sep+'align_data'+sep

        # Set up two data identical pipes.
        pipes = ['orig', 'new']
        delete = [':6', ':11']
        for i in range(2):
            # Create a data pipe.
            self.interpreter.pipe.create(pipes[i], 'N-state')

            # Load the spins.
            self.interpreter.sequence.read(file='tb.txt', dir=dir, spin_id_col=1)
            self.interpreter.spin.element('N')

            # Delete the residue.
            self.interpreter.residue.delete(delete[i])

            # Attach protons.
            self.interpreter.sequence.attach_protons()
            self.interpreter.sequence.display()

            # Create the interatomic data containers.
            self.interpreter.interatom.define(spin_id1='@N', spin_id2='@H')

        # Printout.
        print("\n\nInteratomic data containers for the 'orig' data pipe:")
        for interatom in interatomic_loop(pipe='orig'):
            print("'%s' '%s'" % (interatom.spin_id1, interatom.spin_id2))
        print("\nInteratomic data containers for the 'new' data pipe:")
        for interatom in interatomic_loop(pipe='new'):
            print("'%s' '%s'" % (interatom.spin_id1, interatom.spin_id2))

        # Load the RDCs into the first data pipe.
        self.interpreter.pipe.switch('orig')
        self.interpreter.rdc.read(align_id='tb', file='tb.txt', dir=dir, spin_id1_col=1, spin_id2_col=2, data_col=3, error_col=4)

        # Create back-calculated RDC values from the real values.
        for interatom in interatomic_loop():
            if hasattr(interatom, 'rdc'):
                if not hasattr(interatom, 'rdc_bc'):
                    interatom.rdc_bc = {}
                interatom.rdc_bc['tb'] = interatom.rdc['tb'] + 1.0

        # Copy the RDCs, including back-calculated values, into the second data pipe.
        self.interpreter.rdc.copy(pipe_from='orig', pipe_to='new', align_id='tb', back_calc=True)

        # Checks.
        rdcs = [
            [ -26.2501958629, 7.26317614156, -1.24840526981, 5.31803314334, 14.0362909456, 1.33652530397, -1.6021670281],
            [ -26.2501958629, 9.93081766942, 7.26317614156, -1.24840526981, 5.31803314334, 14.0362909456, -1.6021670281]
        ]
        for i in range(2):
            print("\nChecking data pipe '%s'." % pipes[i])

            # Metadata.
            self.assert_(hasattr(ds[pipes[i]], 'align_ids'))
            self.assert_('tb' in ds[pipes[i]].align_ids)
            self.assert_(hasattr(ds[pipes[i]], 'rdc_ids'))
            self.assert_('tb' in ds[pipes[i]].rdc_ids)

            # Spin data.
            self.assertEqual(count_spins(pipe=pipes[i]), 14)
            self.assertEqual(len(ds[pipes[i]].interatomic), 7)
            j = 0
            for interatom in interatomic_loop(pipe=pipes[i]):
                # Residue 6 in the 'new' data pipe has no RDCs.
                if i == 1 and j == 1:
                    self.assert_(not hasattr(interatom, 'rdc'))
                    self.assert_(not hasattr(interatom, 'rdc_data_types'))
                    self.assert_(not hasattr(interatom, 'absolute_rdc'))
                else:
                    self.assertAlmostEqual(rdcs[i][j], interatom.rdc['tb'])
                    self.assertAlmostEqual(rdcs[i][j]+1.0, interatom.rdc_bc['tb'])
                    self.assert_(hasattr(interatom, 'rdc_data_types'))
                    self.assert_('tb' in interatom.rdc_data_types)
                    self.assertEqual(interatom.rdc_data_types['tb'], 'D')
                    self.assert_(hasattr(interatom, 'absolute_rdc'))
                    self.assert_('tb' in interatom.absolute_rdc)
                    self.assertEqual(interatom.absolute_rdc['tb'], False)
                j += 1