Exemple #1
0
    def export_atoms(self,
                     filePath,
                     indexesOffset=1,
                     format="NAMD_PSF",
                     closeFile=True):
        """
        Exports atoms to ascii file.\n

        :Parameters:
            #. filePath (path): the file path. 
            #. indexesOffset (int): atoms indexing starts from zero. this adds an offset
            #. format (str): The format of exportation. Exisiting formats are: NAMD_PSF, 
        """
        try:
            fd = open(filePath, 'w')
        except:
            raise Logger.error("cannot open file %r for writing" % filePath)

        if format is "NAMD_PSF":
            self.__NAMD_PSF_export_atoms__(fd, indexesOffset=indexesOffset)
        else:
            fd.close()
            raise Logger.error("format %r is not defined" % format)
        # close file
        if closeFile:
            fd.close()
Exemple #2
0
    def __load_ascii__(self, path):
        tempDir = tempfile.gettempdir()
        # create temp dir
        tempDir = os.path.join(tempDir, "pdbParserTmpDir")
        if not os.path.exists(tempDir):
            try:
                os.makedirs(tempDir)
            except:
                raise Logger.error(
                    "Couldn't create temporary folder %r to extract data" %
                    tempDir)
        # open zipfile
        try:
            zf = zipfile.ZipFile(path, 'r')
        except:
            raise Logger.error("Couldn't open analysis ascii zip file %r." %
                               path)

        # get files names and extract them all to tempDir
        files = zf.namelist()
        zf.extractall(tempDir)

        # read analysis files data
        for file in files:
            if self.results.has_key(os.path.basename(file)):
                Logger.warn(
                    "analysis name %r already exists. Previous values will be erased and updated with the new ones"
                    % file)
            self.results[os.path.basename(file)] = np.loadtxt(
                os.path.join(tempDir, file))
Exemple #3
0
    def export_dihedrals(self,
                         filePath,
                         indexesOffset=1,
                         key="atom_name",
                         format="NAMD_PSF",
                         closeFile=True):
        """
        Exports dihedrals to ascii file.\n

        :Parameters:
            #. filePath (path): the file path. 
            #. indexesOffset (int): atoms indexing starts from zero. this adds an offset. applies only to NAMD_PSF
            #. key (str): any pdbParser.records attribute. applies only to NAMD_TOP
            #. format (str): The format of exportation. Exisiting formats are: NAMD_PSF, NAMD_TOP
        """
        try:
            fd = open(filePath, 'w')
        except:
            raise Logger.error("cannot open file %r for writing" % filePath)
        if format is "NAMD_PSF":
            self.__NAMD_PSF_export_dihedrals__(fd, indexesOffset=indexesOffset)
        elif format is "NAMD_TOP":
            self.__NAMD_TOP_export_dihedrals__(fd, key=key)
        else:
            fd.close()
            raise Logger.error("format %r is not defined" % format)
        # close file
        if closeFile:
            fd.close()
Exemple #4
0
 def __load_datasheet__(self, path):
     # open file
     try:
         fd = open(path, 'r')
     except:
         raise Logger.error("Couldn't open analysis file %r." % path)
     # read keys
     firstLine = fd.readline()
     fd.close()
     try:
         keys = [key.strip() for key in firstLine.split('#')[1].split(';')]
     except:
         raise Logger.error(
             "Couldn't read the 'first line' from analysis datasheet file %r first line."
             % path)
     # read values
     try:
         values = np.loadtxt(path, delimiter=";")
     except:
         raise Logger.error(
             "Couldn't read the 'data' from analysis datasheet file %r." %
             path)
     # test size
     if values.shape[1] != len(keys):
         raise Logger.error(
             "values and keys length doesn't match, datasheet file %r seems corrupted."
             % path)
     # update results
     for idx in range(len(keys)):
         key = keys[idx]
         if self.results.has_key(key):
             Logger.warn(
                 "analysis name %r already exists. Previous values will be erased and updated with the new ones"
                 % key)
         self.results[key] = values[:, idx]
Exemple #5
0
def get_atoms_indexes(pdb, indexes):
    """
    check and return indexes if they are in trajectory number of atoms range.\n
 
    :Parameters:
        #. pdb (pdbParser, pdbTrajectory): the pdbParser of pdbTrajectory instance.
        #. indexes (list): The list of indexes
            
    :Returns:
        #. indexes (list): the verified list of indexes
    """
    assert pdb.__class__.__name__ in (
        "pdbParser", "pdbTrajectory"
    ), Logger.error("pdb must be pdbParser or pdbTrajectory instance")
    assert isinstance(indexes, (list, set, tuple)), Logger.error(
        "indexes must be a list of positive integers smaller than number of atoms"
    )
    indexes = sorted(set(indexes))
    nAtoms = pdb.numberOfAtoms
    assert not len([
        False for idx in indexes if (idx % 1 != 0 or idx < 0 or idx >= nAtoms)
    ]), Logger.error(
        "indexes must be a list of positive integers smaller than number of atoms"
    )
    return [int(idx) for idx in set(indexes)]
Exemple #6
0
 def __setattr__(self, name, value):
     if name == "pdb":
         Logger.error(
             "attribute %r is protected, user set_pdb method instead" %
             name)
         raise
     else:
         object.__setattr__(self, name, value)
Exemple #7
0
 def __init__(self, filename):
     """
     The constructor. 
     
     :Parameters:
         #. filename (string): the binary input file
     """
     # time unit in charmm
     self.charmmTimeToPs = 0.0488882129084
     # Identity the byte order of the file by trial-and-error
     self.__byteOrder = None
     data = file(filename, 'rb').read(4)
     for byte_order in ['<', '>']:
         reclen = struct.unpack(byte_order + 'i', data)[0]
         if reclen == 84:
             self.__byteOrder = byte_order
             break
     if self.__byteOrder is None:
         raise Logger.error("%s is not a DCD file" % filename)
     # Open the file
     self.__binary = FortranBinaryFile(filename, self.__byteOrder)
     # Read the header information
     header_data = self.__binary.next()
     if header_data[:4] != 'CORD':
         raise Logger.error("%s is not a DCD file" % filename)
     self.header = struct.unpack(self.__byteOrder + '9id9i',
                                 header_data[4:])
     self.numberOfConfigurations = self.header[0]
     self.istart = self.header[1]
     self.nsavc = self.header[2]
     self.namnf = self.header[8]
     self.charmmVersion = self.header[-1]
     self.has_pbc_data = False
     self.has_4d = False
     if self.charmmVersion != 0:
         self.header = struct.unpack(self.__byteOrder + '9if10i',
                                     header_data[4:])
         if self.header[10] != 0:
             self.has_pbc_data = True
         if self.header[11] != 0:
             self.has_4d = True
     self.delta = self.header[9] * self.charmmTimeToPs
     # Read the title
     title_data = self.__binary.next()
     nlines = struct.unpack(self.__byteOrder + 'i', title_data[:4])[0]
     assert len(title_data) == 80 * nlines + 4, Logger.error(
         "%s is not a DCD file" % filename)
     title_data = title_data[4:]
     title = []
     for i in range(nlines):
         title.append(title_data[:80].rstrip())
         title_data = title_data[80:]
     self.title = '\n'.join(title)
     # Read the number of atoms.
     self.natoms = self.__binary.get_record('i')[0]
     # Stop if there are fixed atoms.
     if self.namnf > 0:
         raise Logger.error("NAMD converter can not handle fixed atoms yet")
Exemple #8
0
 def __get_number_of_types__(self, lines):
     ntypes = False
     line = lines.pop(0)
     if "types of molecules" in line:
         ntypes = int(line.split()[0])
     else:
         Logger.error("number of 'types of molecules' not found.")
         raise
     return ntypes
Exemple #9
0
 def __get_number_of_records__(self, lines):
     nrecords = False
     while lines:
         line = lines.pop(0)
         if "molecules of all types" in line:
             nrecords = int(line.split()[0])
             break
     if not nrecords:
         Logger.error("number of 'molecules of all types' not found.")
         raise
     return nrecords
 def __initialize_variables__(self, clusterToBoxCenter, fold):
     # referenceIndex
     self.restOfAtomsIndexes = list(
         set(self._trajectory.atomsIndexes) - set(self.clusterIndexes))
     # translateToCenter
     assert isinstance(
         clusterToBoxCenter,
         bool), Logger.error("clusterToBoxCenter must be boolean")
     self.clusterToBoxCenter = clusterToBoxCenter
     # fold
     assert isinstance(fold, bool), Logger.error("fold must be boolean")
     self.fold = fold
Exemple #11
0
    def get_center_definition(self, center):
        """
        check and return axis definition.\n
 
        :Parameters:
            #. center (dictionary): The center definition    
        
        :returns:
            #. center (dictionary): The verified center definition     
        """
        assert isinstance(center,
                          dict), Logger.error("center must be a dictionary")
        assert center.keys() in (["fixed"], ["selection"]), Logger.error(
            "center can have one of two keys '%s'" % (["fixed", "selection"]))
        key = center.keys()[0]
        value = center.values()[0]
        # fixed center
        if key == "fixed":
            assert isinstance(
                value, (list, tuple, set, numpy.array)), Logger.error(
                    "fixed center value must be a list of three floats")
            value = list(value)
            assert len(value) == 3, Logger.error(
                "fixed center value must be a list of three floats")
            try:
                value = np.array([float(val) for val in value])
            except:
                raise Logger.error(
                    "fixed center value must be a list of three floats")
        # selection center
        elif key == "selection":
            assert isinstance(
                value,
                dict), Logger.error("selection value must be a dictionary")
            assert sorted(value.keys()) == (sorted([
                "indexes", "weighting"
            ])), Logger.error(
                "center selection value dictionary must have two keys '%s'" %
                (["indexes", "weighting"]))
            indexes = get_atoms_indexes(self.trajectory, value["indexes"])
            weighting = value["weighting"]
            assert is_element_property(weighting), Logger.error(
                "weighting '%s' don't exist in database" % weighting)
            elements = self.trajectory.elements
            weights = np.array([
                get_element_property(elements[idx], weighting)
                for idx in indexes
            ])
            value = {
                "indexes": indexes,
                "weighting": weighting,
                "weights": weights
            }
        else:
            raise Logger.error("center definition not valid")
        return {key: value}
Exemple #12
0
 def __get_box_vectors__(self, lines):
     while lines:
         line = lines.pop(0)
         if "Defining vectors are:" in line:
             try:
                 ox = [float(it) for it in lines.pop(0).split()]
             except:
                 Logger.error("couldn't parse defining 'OX' vectors")
             else:
                 assert len(ox)==3, "OX vector must have three float entries"
             try:
                 oy = [float(it) for it in lines.pop(0).split()]
             except:
                 Logger.error("couldn't parse defining 'OY' vectors")
             else:
                 assert len(oy)==3, "OY vector must have three float entries"
             try:
                 oz = [float(it) for it in lines.pop(0).split()]
             except:
                 Logger.error("couldn't parse defining 'OZ' vectors")
             else:
                 assert len(oz)==3, "OZ vector must have three float entries"
             break
     if not lines:
         Logger.error("simulation box 'Defining vectors' not found.")
         raise
     return np.array([ox,oy,oz])
Exemple #13
0
 def __init__(self,
              trajectory,
              configurationsIndexes,
              cylinderAtomsIndexes,
              targetAtomsIndexes,
              axis=None,
              weighting="equal",
              histBin=1,
              *args,
              **kwargs):
     # set trajectory
     super(MeanSquareDisplacementInCylinder,
           self).__init__(trajectory, *args, **kwargs)
     # set configurations indexes
     self.configurationsIndexes = self.get_trajectory_indexes(
         configurationsIndexes)
     # set atoms indexes
     self.targetAtomsIndexes = self.get_atoms_indexes(targetAtomsIndexes)
     self.cylinderAtomsIndexes = self.get_atoms_indexes(
         cylinderAtomsIndexes)
     # set steps indexes
     self.numberOfSteps = len(self.targetAtomsIndexes)
     # set weighting
     assert is_element_property(weighting), Logger.error(
         "weighting '%s' don't exist in database" % weighting)
     self.weighting = weighting
     # set residency time histogram bin
     try:
         self.histBin = float(histBin)
     except:
         raise Logger.error(
             "histBin must be number convertible. %s is given." % histBin)
     assert self.histBin % 1 == 0, logger.error(
         "histBin must be integer. %s is given." % histBin)
     assert self.histBin > 0, logger.error(
         "histBin must be positive. %s is given." % histBin)
     assert self.histBin < len(self.configurationsIndexes), logger.error(
         "histBin must smaller than numberOfConfigurations")
     # initialize variables
     self.__initialize_variables__(axis)
     # initialize results
     self.__initialize_results__()
     # get cylinder centers, matrices, radii, length
     Logger.info(
         "%s --> initializing cylinder parameters along all configurations"
         % self.__class__.__name__)
     self.cylCenters, self.cylMatrices, self.cylRadii, self.cylLengths = self.__get_cylinder_properties__(
     )
Exemple #14
0
def correlation(data1, data2=None):
    """
    Calculates the numerical correlation between two numpy.ndarray data.
    
    :Parameters:
        #. data1 (numpy.ndarray): the first numpy.ndarray. If multidimensional the correlation calculation is performed on the first dimension.
        #. data2 (None, numpy.ndarray): the second numpy.ndarray. If None the data1 autocorrelation is calculated.
    
    :Returns:
        #. correlation (numpy.ndarray): the result of the numerical correlation. 
    """
    # The signal must not be empty.
    assert isinstance(
        data1,
        np.ndarray), Logger.error("data1 must be a non zero numpy.ndarray")
    # The length of data1 is stored in data1Length
    data1Length = len(data1)
    assert data1Length > 0, Logger.error(
        "data1 must be a non zero numpy.ndarray")
    # extendedLength = 2*len(data1)
    extendedLength = 2 * data1Length
    # The FCA algorithm:
    # 1) computation of the FFT of data1 zero-padded until extendedLength
    # The computation is done along the 0-axis
    FFTData1 = FFT(data1, extendedLength, 0)
    if data2 is None:
        # Autocorrelation case
        FFTData2 = FFTData1
    else:
        # 2) computation of the FFT of data2 zero-padded until extendedLength
        # The computation is  done along the 0-axis
        assert isinstance(data2, np.ndarray), Logger.error(
            "if not None, data2 must be a numpy.ndarray")
        FFTData2 = FFT(data2, extendedLength, 0)
    # 3) Product between FFT(data1)* and FFT(data2)
    FFTData1 = np.conjugate(FFTData1) * FFTData2
    # 4) inverse FFT of the product
    # The computation is done along the 0-axis
    FFTData1 = iFFT(FFTData1, len(FFTData1), 0)
    # This refers to (1/(N-m))*Sab in the published algorithm.
    # This is the correlation function defined for positive indexes only.
    if len(FFTData1.shape) == 1:
        corr = FFTData1.real[:data1Length] / (data1Length -
                                              np.arange(data1Length))
    else:
        corr = np.add.reduce(FFTData1.real[:data1Length],
                             1) / (data1Length - np.arange(data1Length))
    return corr
 def get_reciprocal_vectors(self, index=-1):
     """
     returns the basis (3,3) array reciprocal vectors of the box at time index.
     For InfiniteBoundaries calling this method will raise an error.
     """
     raise Logger.error(
         "Infinite universe 'reciprocal vectors' definition is ambiguous")
Exemple #16
0
def get_atomic_form_factor(q, element, charge=0):
    """
        Calculates the Q dependant atomic form factor.\n   
        :Parameters:
            #. q (list, tuple, numpy.ndarray): the q vector.
            #. element (str): the atomic element.
            #. charge (int): the expected charge of the element.
        
        :Returns:
            #. formFactor (numpy.ndarray): the calculated form factor. 
    """
    assert is_element(element), "%s is not an element in database" % element
    element = str(element).lower()
    assert __atoms_database__[element]['atomicFormFactor'].has_key(
        charge
    ), Logger.error(
        "atomic form factor for element %s at with %s charge is not defined in database"
        % (element, charge))
    ff = __atoms_database__[element]['atomicFormFactor'][charge]
    a1 = ff['a1']
    b1 = ff['b1']
    a2 = ff['a2']
    b2 = ff['b2']
    a3 = ff['a3']
    b3 = ff['b3']
    a4 = ff['a4']
    b4 = ff['b4']
    c = ff['c']
    q = np.array(q)
    qOver4piSquare = (q / (4. * np.pi))**2
    t1 = a1 * np.exp(-b1 * qOver4piSquare)
    t2 = a2 * np.exp(-b2 * qOver4piSquare)
    t3 = a3 * np.exp(-b3 * qOver4piSquare)
    t4 = a4 * np.exp(-b4 * qOver4piSquare)
    return t1 + t2 + t3 + t4 + c
 def get_box_volume(self, index=-1):
     """
     returns the volume of the box at time index.
     For InfiniteBoundaries calling this method will raise an error.
     """
     raise Logger.error(
         "Infinite universe 'box volume' definition is ambiguous")
Exemple #18
0
 def get_nanotube_indexes(self, indexes=None):
     """
     get the nanotube records indexes.
     
      :Parameters:
         #. indexes (None, list, tuple, set, numpy.ndarray): the records indexes. 
         If None records are calculated automatically when nanotube residue_name is 'CNT'.
     """
     if indexes is None:
         self.nanotubeIndexes = get_records_indexes_in_attribute_values(
             self.pdb.indexes, self.pdb, "residue_name", "CNT")
         if not self.nanotubeIndexes:
             Logger.error("nanotube records indexes must be given.")
             raise
     else:
         self.nanotubeIndexes = indexes
 def get_angles(self, index=-1):
     """
     Get the basis alpha(b,c), beta(c,a), gamma(a,b) angles in rad at time index.
     For InfiniteBoundaries calling this method will raise an error.
     """
     raise Logger.error(
         "Infinite universe 'angles definition' is ambiguous")
 def get_vectors(self, index=-1):
     """
     Get the basis (3,3) array vectors of the box at time index.
     For InfiniteBoundaries calling this method will raise an error.
     """
     raise Logger.error(
         "Infinite universe 'vectors definition' is ambiguous")
 def convert(self):
     if self.format in ('charmm', 'namd'):
         self.trajectory = self.__convert_charmm__()
     else:
         raise Logger.error("unsupported dcd format")
     self.trajectory._filePath = self.dcd
     return self.trajectory
Exemple #22
0
    def save(self, path, formats=None):
        """
        Used to export the analysis results stored in self.results dictionary.\n
        
        :Parameters:
            #. path (str): The saving path.
            #. format (str): The export format. used formats are ascii or bin
        """
        if formats is None:
            formats = ["ascii"]
        elif isinstance(formats, str):
            formats = [formats]
        else:
            assert isinstance(formats, (list, tuple))
            formats = list(formats)

        for f in formats:
            if f == "ascii":
                self.__save_ascii__(str(path) + ".zip")
            elif f == "datasheet":
                self.__save_datasheet__(str(path) + ".xls")
            elif f == "bin":
                self.__save_binary__(str(path) + ".pkl")
            else:
                raise Logger.error(
                    "Unknown saving format %r. only %s formats are acceptable"
                    % (f, ["ascii", "bin", 'datasheet']))
        return self
Exemple #23
0
def get_number_of_residues(indexes, pdb):
    """
        Calculate the number of every residue type in pdb file.\n 
        residue_name, sequence_number and segment_identifier attributes in pdb file must be correct.
          
        :Parameters:
            #. indexes (list, tuple, numpy.ndarray): the indexes of pdb.
            #. pdb (pdbParser, pdbTrajectory): the pdbParser of pdbTrajectory instance.

        :Returns:
            #. residues (dictionary): keys all residues and values are number encountered. 
    """
    if pdb.__class__.__name__ == "pdbTrajectory":
        pdb = pdb._structure
    else:
        assert pdb.__class__.__name__ == "pdbParser", Logger.error(
            "pdb must be pdbParser or pdbTrajectory instance")
    res = get_records_attribute_values(indexes, pdb, "residue_name")
    seq = get_records_attribute_values(indexes, pdb, "sequence_number")
    sid = get_records_attribute_values(indexes, pdb, "segment_identifier")
    # create residues dict
    residues = dict(zip(set(res), [0] * len(set(res))))
    currentSeq = False
    currentSid = False
    for idx in range(len(seq)):
        if seq[idx] != currentSeq or sid[idx] != currentSid:
            currentSeq = seq[idx]
            currentSid = sid[idx]
            residues[res[idx]] += 1
    return residues
Exemple #24
0
def get_model_range_by_attribute_value(pdb, keys, attribute, value):
    """
        Returns (pdb.models[key]["model_start"], pdb.models[key]["model_end"])
        at the first found pdb.models[key][attribute] = value
          
        :Parameters:
            #. pdb (pdbParser, pdbTrajectory): the pdbParser of pdbTrajectory instance.
            #. keys (list, tuple, set): list of keys
            #. attribute (string): record attribute name.
            #. value (object): the desired value
            
        :Returns:
            #. range (tuple): indexes range of model 
    """
    if pdb.__class__.__name__ == "pdbTrajectory":
        pdb = pdb._structure
    else:
        assert pdb.__class__.__name__ == "pdbParser", Logger.error(
            "pdb must be pdbParser or pdbTrajectory instance")
    for key in keys:
        if pdb.models[key][attribute] == value:
            return (pdb.models[key]["model_start"],
                    pdb.models[key]["model_end"])

    return None
    def step(self, index):
        """"
        analysis step of calculation method.\n
 
        :Parameters:
            #. index (int): the step index

        :Returns:
            #. stepData (object): object used in combine method
        """
        if not isinstance(self._trajectory._boundaryConditions,
                          PeriodicBoundaries):
            raise Logger.error(
                "rebuild cluster is not possible with infinite boundaries trajectory"
            )

        # get configuration index
        confIdx = self.configurationsIndexes[index]
        # get coordinates
        boxCoords = self._trajectory.get_configuration_coordinates(confIdx)
        boxCoords = self._trajectory._boundaryConditions.real_to_box_array(
            realArray=boxCoords, index=confIdx)
        # get box coordinates
        clusterBoxCoords = boxCoords[self.clusterIndexes, :]
        # initialize variables
        incrementalCenter = np.array([0., 0., 0.])
        centerNumberOfAtoms = 0.0
        # incrementally construct cluster
        for idx in range(clusterBoxCoords.shape[0]):
            if idx > 0:
                diff = clusterBoxCoords[idx, :] - (incrementalCenter /
                                                   centerNumberOfAtoms)
                # remove multiple box distances
                intDiff = diff.astype(int)
                clusterBoxCoords[idx, :] -= intDiff
                diff -= intDiff
                # remove half box distances
                clusterBoxCoords[idx, :] = np.where(
                    np.abs(diff) < 0.5, clusterBoxCoords[idx, :],
                    clusterBoxCoords[idx, :] - np.sign(diff))
            incrementalCenter += clusterBoxCoords[idx, :]
            centerNumberOfAtoms += 1.0
        # set cluster atoms new box positions
        boxCoords[self.clusterIndexes, :] = clusterBoxCoords
        # translate cluster in box center
        if self.clusterToBoxCenter:
            # calculate cluster center of mass
            center = np.sum(clusterBoxCoords, 0) / len(self.clusterIndexes)
            # translate cluster to center of box
            boxCoords += np.array([0.5, 0.5, 0.5]) - center
        # fold all but cluster atoms
        if self.fold:
            boxCoords[self.restOfAtomsIndexes, :] %= 1
        # convert to real coordinates
        coords = self._trajectory._boundaryConditions.box_to_real_array(
            boxArray=boxCoords, index=confIdx)
        # set new coordinates
        self._trajectory.set_configuration_coordinates(confIdx, coords)
        return index, None
Exemple #26
0
    def get_center_rotationMatrix(self, coordinates):
        """
        return the center and the rotation matrix.\n
 
        :Parameters:
            #. coordinates (numpy.array): The atoms coordinates   
        
        :returns:
            #. center (numpy.array): the center
            #. rotationMatrix (numpy.array): the (3X3) rotation matrix
        """
        # principal axis definition
        if self.definition.keys()[0] == "principal":
            indexes = self.definition.values()[0]
            center, _, _, _, vect1, vect2, vect3 = get_principal_axis(
                indexes, self.trajectory)
            rotationMatrix = np.linalg.inv(np.array([vect1, vect2, vect3]))
        # vector definition
        elif self.definition.keys()[0] == "vector":
            values = self.definition.values()[0]
            center = values["center"]
            rotationMatrix = values["rotationMatrix"]
        # selection definition
        elif self.definition.keys()[0] == "selection":
            selections = self.definition.values()[0]
            X0 = np.sum(coordinates[selections[0]], 0) / len(selections[0])
            X1 = np.sum(coordinates[selections[1]], 0) / len(selections[1])
            X = X1 - X0
            norm = np.linalg.norm(X)
            assert norm > 10**-6, Logger.error(
                "vector axis can't return 0 vector")
            X /= norm
            Y = np.array([1, 1, 1]) + np.random.random(3) * X
            Y = np.cross(X, Y)
            norm = np.linalg.norm(Y)
            Y /= norm
            assert np.dot(X, Y) < 10**-6, Logger.error(
                "vector axis can't return non orthogonal X and Y vectors")
            Z = np.cross(X, Y)
            norm = np.linalg.norm(Z)
            assert norm > 10**-6, Logger.error(
                "vector axis can't return 0 Z vector")
            Z /= norm
            center = 0.5 * (X1 + X0)
            rotationMatrix = np.linalg.inv(np.array([X, Y, Z]))
        return center, rotationMatrix
Exemple #27
0
 def __write_fnc_file__(self, path, name, bondsMap, bondsMapElementsKey, bonds):
     """
     writes .fnc
     
     :Parameters:
         #. path (str): The RMC++ output configuration file path.
         #. name (str): The tile name to be put in the beginning of the file
         #. bondsMap (dict): Dictionary of bonds elements keys mapping to a bonds indexes. Double dash '--' must seperate keys. e.g. {'H2--O': 1, 'H1--O': 2}
         #. bondsMapElementsKey (list): The list of all atoms keys in pdb used to map atoms to bondsMap. e.g. ["H1","H2", ... , "H1","H2","O","O", ...]
         #. bonds (dict): The dictionary of bonds indexes. e.g. {1:[100,101], 2:[102,103,104], ..., 999:[], 1000:[20,21], ...}
     """
     try:
         fd = open(path, 'w')
     except:
         Logger.error( "cannot open file %r for writing" %outputPath) 
         raise
     # write pdb name
     fd.write("     "+str(name)+"\n\n")
     # write limits
     bondsMapLUT = {}
     for b,v in bondsMap.items():
         bondsMapLUT[v]=b
     fd.write("     No. of possible rmin-rmax pairs:\n")
     fd.write("      "+str(len(bondsMap))+"\n") 
     # write minimum
     fd.write("0.90".rjust(10)*len(bondsMap)+"\n")        
     # write maximum
     fd.write("2.10".rjust(10)*len(bondsMap)+"\n") 
     constraints = "".join([bondsMapLUT[idx].rjust(10) for idx in sorted(bondsMapLUT.keys())])
     fd.write("! %s \n"%constraints[1:])
     # write number of records
     fd.write("            "+str(len(bonds))+"\n\n")        
     # write records and bonds
     for cr in sorted(bonds.keys()):
         ctList = bonds[cr]         
         fd.write(str(cr+1).rjust(12)+str(len(ctList)).rjust(5)+"\n")                   
         types = "  "
         for ct in ctList:
             fd.write(str(ct+1).rjust(12))
             setted = list(set([bondsMapElementsKey[cr], bondsMapElementsKey[ct]]))
             types += str( bondsMap[str(setted[0])+"--"+str(setted[1])] ).ljust(1)+" "
         fd.write("\n")
         fd.write(types)
         fd.write("\n")
     # close file           
     fd.close()  
Exemple #28
0
def get_normalized_weighting(numbers, weights):
    """
    Calculates the normalized weighting scheme for a set of elements.\n   
    :Parameters:
        #. numbers (dictionary): The numbers of elements dictionary. keys are the elements and values are the numbers of elements in the system
        #. weights (dictionary): the weight of every element. keys are the elements and values are the weights. weights must have the same length.
        
    :Returns:
        #. normalizedWeights (dictionary): the normalized weighting scheme for every pair of elements. 
    """
    assert isinstance(numbers, dict), Logger.error(
        "numbers must be a dictionary where values are the number of elements")
    assert isinstance(weights, dict), Logger.error(
        "weights must be a dictionary where values are the weights of elements"
    )
    assert set(numbers.keys()) == set(weights.keys()), Logger.error(
        "numbers and weights must have the same dictionary keys. numbers:%s    weights:%s"
        % (numbers.keys(), weights.keys()))
    elements = weights.keys()
    nelements = [float(numbers[el]) for el in elements]
    totalNumberOfElements = sum(nelements)
    molarFraction = [n / totalNumberOfElements for n in nelements]
    # total weights
    totalWeight = sum([
        molarFraction[idx] * weights[elements[idx]]
        for idx in range(len(elements))
    ])**2
    # calculate weights
    normalizedWeights = {}
    for idx1 in range(len(elements)):
        el1 = elements[idx1]
        b1 = weights[el1]
        mf1 = molarFraction[idx1]
        for idx2 in range(len(elements)):
            el2 = elements[idx2]
            b2 = weights[el2]
            mf2 = molarFraction[idx2]
            # get pair elements key
            pair = el1 + '-' + el2
            if normalizedWeights.has_key(el2 + '-' + el1):
                normalizedWeights[el2 + '-' +
                                  el1] += mf1 * mf2 * b1 * b2 / totalWeight
            else:
                normalizedWeights[pair] = mf1 * mf2 * b1 * b2 / totalWeight
    return normalizedWeights
Exemple #29
0
 def next(self):
     data = self.__file.read(4)
     if not data:
         raise StopIteration
     reclen = struct.unpack(self.__byteOrder + 'i', data)[0]
     data = self.__file.read(reclen)
     reclen2 = struct.unpack(self.__byteOrder + 'i', self.__file.read(4))[0]
     assert reclen == reclen2, Logger.error("data format not respected")
     return data
Exemple #30
0
 def initialize_default_attributes(self):
     # self.pdb
     if not hasattr(self, "pdb"):
         object.__setattr__(self, "pdb", pdbParser())
     else: 
         assert isinstance(self.pdb, pdbParser), Logger.error("pdb must a pdbParser instance")  
     # self.filePath
     if not hasattr(self, "filePath"):
         object.__setattr__(self, "filePath", self.__defaults__["filePath"])
     elif self.filePath is not None: 
         try:
             fd = open(self.filePath, 'r')
         except:
             Logger.error("Cannot open %r for reading." %self.filePath)
             raise
         else:
             fd.close()
     # info
     self.info = {}