def listPDBCluster(pdb, ch, sqid=95): """Return the PDB sequence cluster that contains chain *ch* in structure *pdb* for sequence identity level *sqid*. PDB sequence cluster will be returned in as a list of tuples, e.g. ``[('1XXX', 'A'), ]``. Note that PDB clusters individual chains, so the same PDB identifier may appear twice in the same cluster if the corresponding chain is present in the structure twice. Before this function is used, :func:`fetchPDBClusters` needs to be called. This function will load the PDB sequence clusters for *sqid* automatically using :func:`loadPDBClusters`.""" assert isinstance(pdb, str) and len(pdb) == 4, \ 'pdb must be 4 char long string' assert isinstance(ch, str) and len(ch) == 1, \ 'ch must be a one char long string' try: sqid = int(sqid) except TypeError: raise TypeError('sqid must be an integer') if not (30 <= sqid <= 100): raise ValueError('sqid must be between 30 and 100') sqid = PDB_CLUSTERS_SQIDS[abs(PDB_CLUSTERS_SQIDS - sqid).argmin()] PDB_CLUSTERS_PATH = os.path.join(getPackagePath(), 'pdbclusters') clusters = PDB_CLUSTERS[sqid] if clusters is None: loadPDBClusters(sqid) clusters = PDB_CLUSTERS[sqid] pdb_ch = pdb.upper() + '_' + ch.upper() index = clusters.index(pdb_ch) maxlen = clusters.index('\n') end = clusters.find('\n', index) start = clusters.rfind('\n', index - maxlen, end) + 1 cluster = clusters[start:end] return [tuple(item.split('_')) for item in cluster.split()]
def loadPDBClusters(sqid=None): """Load previously fetched PDB sequence clusters from disk to memory.""" PDB_CLUSTERS_PATH = os.path.join(getPackagePath(), 'pdbclusters') if sqid is None: sqid_list = list(PDB_CLUSTERS) LOGGER.info('Loading all PDB sequence clusters.') else: assert isinstance(sqid, int), 'sqid must be an integer' if sqid not in PDB_CLUSTERS: raise ValueError('PDB cluster data is not available for sequence ' 'identity {0}%, try one of {1}'.format( sqid, PDB_CLUSTERS_SQID_STR)) LOGGER.info('Loading PDB sequence clusters for sequence identity ' '{0}.'.format(sqid)) sqid_list = [sqid] global PDB_CLUSTERS_UPDATE_WARNING for sqid in sqid_list: filename = os.path.join(PDB_CLUSTERS_PATH, 'bc-{0}.out.gz'.format(sqid)) if not os.path.isfile(filename): fetchPDBClusters(sqid) if PDB_CLUSTERS_UPDATE_WARNING: import time diff = (time.time() - os.path.getmtime(filename)) / 604800. if diff > 1.: LOGGER.warning( 'PDB sequence clusters are {0:.1f} week(s) old,' ' call `fetchPDBClusters` to receive updates.'.format( diff)) PDB_CLUSTERS_UPDATE_WARNING = False inp = openFile(filename) PDB_CLUSTERS[sqid] = inp.read() inp.close()
def loadPDBClusters(sqid=None): """Load previously fetched PDB sequence clusters from disk to memory.""" PDB_CLUSTERS_PATH = os.path.join(getPackagePath(), 'pdbclusters') if sqid is None: sqid_list = list(PDB_CLUSTERS) LOGGER.info('Loading all PDB sequence clusters.') else: assert isinstance(sqid, int), 'sqid must be an integer' if sqid not in PDB_CLUSTERS: raise ValueError('PDB cluster data is not available for sequence ' 'identity {0}%, try one of {1}' .format(sqid, PDB_CLUSTERS_SQID_STR)) LOGGER.info('Loading PDB sequence clusters for sequence identity ' '{0}.'.format(sqid)) sqid_list = [sqid] global PDB_CLUSTERS_UPDATE_WARNING for sqid in sqid_list: filename = os.path.join(PDB_CLUSTERS_PATH, 'bc-{0}.out.gz'.format(sqid)) if not os.path.isfile(filename): fetchPDBClusters(sqid) if PDB_CLUSTERS_UPDATE_WARNING: import time diff = (time.time() - os.path.getmtime(filename)) / 604800. if diff > 1.: LOGGER.warning('PDB sequence clusters are {0:.1f} week(s) old,' ' call `fetchPDBClusters` to receive updates.' .format(diff)) PDB_CLUSTERS_UPDATE_WARNING = False inp = openFile(filename) PDB_CLUSTERS[sqid] = inp.read() inp.close()
def listPDBCluster(pdb, ch, sqid=95): """Return the PDB sequence cluster that contains chain *ch* in structure *pdb* for sequence identity level *sqid*. PDB sequence cluster will be returned in as a list of tuples, e.g. ``[('1XXX', 'A'), ]``. Note that PDB clusters individual chains, so the same PDB identifier may appear twice in the same cluster if the corresponding chain is present in the structure twice. Before this function is used, :func:`fetchPDBClusters` needs to be called. This function will load the PDB sequence clusters for *sqid* automatically using :func:`loadPDBClusters`.""" assert isinstance(pdb, str) and len(pdb) == 4, \ 'pdb must be 4 char long string' assert isinstance(ch, str) and len(ch) == 1, \ 'ch must be a one char long string' try: sqid = int(sqid) except TypeError: raise TypeError('sqid must be an integer') if not (30 <= sqid <= 100): raise ValueError('sqid must be between 30 and 100') sqid = PDB_CLUSTERS_SQIDS[abs(PDB_CLUSTERS_SQIDS-sqid).argmin()] PDB_CLUSTERS_PATH = os.path.join(getPackagePath(), 'pdbclusters') clusters = PDB_CLUSTERS[sqid] if clusters is None: loadPDBClusters(sqid) clusters = PDB_CLUSTERS[sqid] pdb_ch = pdb.upper() + '_' + ch.upper() index = clusters.index(pdb_ch) maxlen = clusters.index('\n') end = clusters.find('\n', index) start = clusters.rfind('\n', index-maxlen, end)+1 cluster = clusters[start:end] return [tuple(item.split('_')) for item in cluster.split()]
def getPDBCluster(pdb, ch, sqid=95): """Return the PDB sequence cluster for chain *ch* in structure *pdb* that chains sharing sequence identity *sqid* or more. PDB sequence cluster will be returned in the form of a list of tuples, e.g. ``[('1XXX', 'A'), ('2YYY', 'A')]``. Note that PDB clusters chains, so the same PDB identifier may appear twice in the same cluster if the corresponding chain is present in the structure twice. Before this function is used, :func:`fetchPDBClusters` needs to be called. This function will load the PDB sequence clusters for *sqid* automatically using :func:`loadPDBClusters`.""" assert isinstance(pdb, str) and len(pdb) == 4, \ 'pdb must be 4 char long string' assert isinstance(ch, str) and len(ch) == 1, \ 'ch must be a one char long string' assert isinstance(sqid, int), 'sqid must be an integer' PDB_CLUSTERS_PATH = os.path.join(getPackagePath(), 'pdbclusters') if sqid not in PDB_CLUSTERS: keys = PDB_CLUSTERS.keys() keys.sort() raise ValueError('PDB cluster data is not available for sequence ' 'identity {0:d}%, try one of {1:s}' .format(sqid, ', '.join([str(x) for x in keys]))) clusters = PDB_CLUSTERS[sqid] if clusters is None: loadPDBClusters(sqid) clusters = PDB_CLUSTERS[sqid] pdb_ch = pdb.upper() + '_' + ch.upper() index = clusters.index(pdb_ch) maxlen = clusters.index('\n') end = clusters.find('\n', index) start = clusters.rfind('\n', index-maxlen, end)+1 cluster = clusters[start:end] return [tuple(item.split('_')) for item in cluster.split()]
def fetchPDBClusters(sqid=None): """Retrieve PDB sequence clusters. PDB sequence clusters are results of the weekly clustering of protein chains in the PDB generated by blastclust. They are available at FTP site: ftp://resources.rcsb.org/sequence/clusters/ This function will download about 10 Mb of data and save it after compressing in your home directory in :file:`.prody/pdbclusters`. Compressed files will be less than 4 Mb in size. Cluster data can be loaded using :func:`loadPDBClusters` function and be accessed using :func:`listPDBCluster`.""" if sqid is not None: if isListLike(sqid): for s in sqid: if s not in PDB_CLUSTERS: raise ValueError('sqid must be one or more of ' + PDB_CLUSTERS_SQID_STR) keys = list(sqid) else: if sqid not in PDB_CLUSTERS: raise ValueError('sqid must be one or more of ' + PDB_CLUSTERS_SQID_STR) keys = [sqid] else: keys = list(PDB_CLUSTERS) PDB_CLUSTERS_PATH = os.path.join(getPackagePath(), 'pdbclusters') if not os.path.isdir(PDB_CLUSTERS_PATH): os.mkdir(PDB_CLUSTERS_PATH) LOGGER.progress('Downloading sequence clusters', len(keys), '_prody_fetchPDBClusters') count = 0 for i, x in enumerate(keys): filename = 'bc-{0}.out'.format(x) url = ('ftp://resources.rcsb.org/sequence/clusters/' + filename) try: inp = openURL(url) except IOError: LOGGER.warning('Clusters at {0}% sequence identity level could ' 'not be downloaded.'.format(x)) continue else: out = openFile(filename + '.gz', 'w', folder=PDB_CLUSTERS_PATH) out.write(inp.read()) inp.close() out.close() count += 1 LOGGER.update(i, label='_prody_fetchPDBClusters') LOGGER.finish() if len(keys) == count: LOGGER.info('All selected PDB clusters were downloaded successfully.') elif count == 0: LOGGER.warn('PDB clusters could not be downloaded.')
def fetchPDBClusters(sqid=None): """Retrieve PDB sequence clusters. PDB sequence clusters are results of the weekly clustering of protein chains in the PDB generated by blastclust. They are available at FTP site: ftp://resources.rcsb.org/sequence/clusters/ This function will download about 10 Mb of data and save it after compressing in your home directory in :file:`.prody/pdbclusters`. Compressed files will be less than 4 Mb in size. Cluster data can be loaded using :func:`loadPDBClusters` function and be accessed using :func:`listPDBCluster`.""" if sqid is not None: if sqid not in PDB_CLUSTERS: raise ValueError('sqid must be one of ' + PDB_CLUSTERS_SQID_STR) keys = [sqid] else: keys = list(PDB_CLUSTERS) PDB_CLUSTERS_PATH = os.path.join(getPackagePath(), 'pdbclusters') if not os.path.isdir(PDB_CLUSTERS_PATH): os.mkdir(PDB_CLUSTERS_PATH) LOGGER.progress('Downloading sequence clusters', len(PDB_CLUSTERS), '_prody_fetchPDBClusters') count = 0 for i, x in enumerate(keys): filename = 'bc-{0}.out'.format(x) url = ('ftp://resources.rcsb.org/sequence/clusters/' + filename) try: inp = openURL(url) except IOError: LOGGER.warning('Clusters at {0}% sequence identity level could ' 'not be downloaded.') continue else: out = openFile(filename+'.gz', 'w', folder=PDB_CLUSTERS_PATH) out.write(inp.read()) inp.close() out.close() count += 1 LOGGER.update(i, '_prody_fetchPDBClusters') LOGGER.clear() if len(PDB_CLUSTERS) == count: LOGGER.info('All PDB clusters were downloaded successfully.') elif count == 0: LOGGER.warn('PDB clusters could not be downloaded.')
def fetchPDBClusters(): """Downloads PDB sequence clusters. PDB sequence clusters are results of the weekly clustering of protein chains in the PDB generated by blastclust. They are available at FTP site: ftp://resources.rcsb.org/sequence/clusters/ This function will download about 10 Mb of data and save it after compressing in your home directory in :file:`.prody/pdbclusters`. Compressed files will be less than 4 Mb in size. Cluster data can be loaded using :func:`loadPDBClusters` function and be accessed using :func:`getPDBCluster`.""" import urllib2 PDB_CLUSTERS_PATH = os.path.join(getPackagePath(), 'pdbclusters') if not os.path.isdir(PDB_CLUSTERS_PATH): os.mkdir(PDB_CLUSTERS_PATH) LOGGER.progress('Downloading sequence clusters', len(PDB_CLUSTERS)) count = 0 for i, x in enumerate(PDB_CLUSTERS.keys()): filename = 'bc-{0:d}.out'.format(x) url = ('ftp://resources.rcsb.org/sequence/clusters/' + filename) try: inp = urllib2.urlopen(url) except urllib2.HTTPError: LOGGER.warning('Clusters at {0:d}% sequence identity level could ' 'not be downloaded.') continue else: out = openFile(filename+'.gz', 'w', folder=PDB_CLUSTERS_PATH) out.write(inp.read()) inp.close() out.close() count += 1 LOGGER.update(i) LOGGER.clear() if len(PDB_CLUSTERS) == count: LOGGER.info('All PDB clusters were downloaded successfully.') elif count == 0: LOGGER.warning('PDB clusters could not be downloaded.')
def fetchPDBLigand(cci, filename=None): """Fetch PDB ligand data from PDB_ for chemical component *cci*. *cci* may be 3-letter chemical component identifier or a valid XML filename. If *filename* is given, XML file will be saved with that name. If you query ligand data frequently, you may configure ProDy to save XML files in your computer. Set ``ligand_xml_save`` option **True**, i.e. ``confProDy(ligand_xml_save=True)``. Compressed XML files will be save to ProDy package folder, e.g. :file:`/home/user/.prody/pdbligands`. Each file is around 5Kb when compressed. This function is compatible with PDBx/PDBML v 4.0. Ligand data is returned in a dictionary. Ligand coordinate atom data with *model* and *ideal* coordinate sets are also stored in this dictionary. Note that this dictionary will contain data that is present in the XML file and all Ligand Expo XML files do not contain every possible data field. So, it may be better if you use :meth:`dict.get` instead of indexing the dictionary, e.g. to retrieve formula weight (or relative molar mass) of the chemical component use ``data.get('formula_weight')`` instead of ``data['formula_weight']`` to avoid exceptions when this data field is not found in the XML file. URL and/or path of the XML file are returned in the dictionary with keys ``url`` and ``path``, respectively. Following example downloads data for ligand STI (a.k.a. Gleevec and Imatinib) and calculates RMSD between model (X-ray structure 1IEP) and ideal (energy minimized) coordinate sets: .. ipython:: python from prody import * ligand_data = fetchPDBLigand('STI') ligand_data['model_coordinates_db_code'] ligand_model = ligand_data['model'] ligand_ideal = ligand_data['ideal'] transformation = superpose(ligand_ideal.noh, ligand_model.noh) calcRMSD(ligand_ideal.noh, ligand_model.noh)""" if not isinstance(cci, str): raise TypeError('cci must be a string') if isfile(cci): inp = openFile(cci) xml = inp.read() inp.close() url = None path = cci cci = splitext(splitext(split(cci)[1])[0])[0].upper() elif len(cci) > 4 or not cci.isalnum(): raise ValueError('cci must be 3-letters long and alphanumeric or ' 'a valid filename') else: xml = None cci = cci.upper() if SETTINGS.get('ligand_xml_save'): folder = join(getPackagePath(), 'pdbligands') if not isdir(folder): makePath(folder) xmlgz = path = join(folder, cci + '.xml.gz') if isfile(xmlgz): with openFile(xmlgz) as inp: xml = inp.read() else: path = None #url = ('http://ligand-expo.rcsb.org/reports/{0[0]}/{0}/{0}' # '.xml'.format(cci.upper())) url = 'http://www.pdb.org/pdb/files/ligand/{0}.xml'.format(cci.upper()) if not xml: #'http://www.pdb.org/pdb/files/ligand/{0}.xml' try: inp = openURL(url) except IOError: raise IOError('XML file for ligand {0} is not found online' .format(cci)) else: xml = inp.read() inp.close() if filename: out = openFile(filename, mode='w', folder=folder) out.write(xml) out.close() if SETTINGS.get('ligand_xml_save'): with openFile(xmlgz, 'w') as out: out.write(xml) import xml.etree.cElementTree as ET root = ET.XML(xml) if (root.get('{http://www.w3.org/2001/XMLSchema-instance}' 'schemaLocation') != 'http://pdbml.pdb.org/schema/pdbx-v40.xsd pdbx-v40.xsd'): LOGGER.warn('XML is not in PDBx/PDBML v 4.0 format, resulting ' 'dictionary may not contain all data fields') ns = root.tag[:root.tag.rfind('}')+1] len_ns = len(ns) dict_ = {'url': url, 'path': path} for child in list(root.find(ns + 'chem_compCategory')[0]): tag = child.tag[len_ns:] if tag.startswith('pdbx_'): tag = tag[5:] dict_[tag] = child.text dict_['formula_weight'] = float(dict_.get('formula_weight')) identifiers_and_descriptors = [] results = root.find(ns + 'pdbx_chem_comp_identifierCategory') if results: identifiers_and_descriptors.extend(results) results = root.find(ns + 'pdbx_chem_comp_descriptorCategory') if results: identifiers_and_descriptors.extend(results) for child in identifiers_and_descriptors: program = child.get('program').replace(' ', '_') type_ = child.get('type').replace(' ', '_') dict_[program + '_' + type_] = child[0].text dict_[program + '_version'] = child.get('program_version') dict_['audits'] = [(audit.get('action_type'), audit.get('date')) for audit in list(root.find(ns + 'pdbx_chem_comp_auditCategory'))] atoms = list(root.find(ns + 'chem_comp_atomCategory')) n_atoms = len(atoms) ideal_coords = np.zeros((n_atoms, 3)) model_coords = np.zeros((n_atoms, 3)) atomnames = np.zeros(n_atoms, dtype=ATOMIC_FIELDS['name'].dtype) elements = np.zeros(n_atoms, dtype=ATOMIC_FIELDS['element'].dtype) resnames = np.zeros(n_atoms, dtype=ATOMIC_FIELDS['resname'].dtype) charges = np.zeros(n_atoms, dtype=ATOMIC_FIELDS['charge'].dtype) resnums = np.ones(n_atoms, dtype=ATOMIC_FIELDS['charge'].dtype) alternate_atomnames = np.zeros(n_atoms, dtype=ATOMIC_FIELDS['name'].dtype) leaving_atom_flags = np.zeros(n_atoms, np.bool) aromatic_flags = np.zeros(n_atoms, np.bool) stereo_configs = np.zeros(n_atoms, np.bool) ordinals = np.zeros(n_atoms, int) name2index = {} for i, atom in enumerate(atoms): data = dict([(child.tag[len_ns:], child.text) for child in list(atom)]) name = data.get('pdbx_component_atom_id', 'X') name2index[name] = i atomnames[i] = name elements[i] = data.get('type_symbol', 'X') resnames[i] = data.get('pdbx_component_comp_id', 'UNK') charges[i] = float(data.get('charge', 0)) alternate_atomnames[i] = data.get('alt_atom_id', 'X') leaving_atom_flags[i] = data.get('pdbx_leaving_atom_flag') == 'Y' aromatic_flags[i] = data.get('pdbx_atomatic_flag') == 'Y' stereo_configs[i] = data.get('pdbx_stereo_config') == 'Y' ordinals[i] = int(data.get('pdbx_ordinal', 0)) model_coords[i, 0] = float(data.get('model_Cartn_x', 0)) model_coords[i, 1] = float(data.get('model_Cartn_y', 0)) model_coords[i, 2] = float(data.get('model_Cartn_z', 0)) ideal_coords[i, 0] = float(data.get('pdbx_model_Cartn_x_ideal', 0)) ideal_coords[i, 1] = float(data.get('pdbx_model_Cartn_y_ideal', 0)) ideal_coords[i, 2] = float(data.get('pdbx_model_Cartn_z_ideal', 0)) pdbid = dict_.get('model_coordinates_db_code') if pdbid: model = AtomGroup(cci + ' model ({0})'.format(pdbid)) else: model = AtomGroup(cci + ' model') model.setCoords(model_coords) model.setNames(atomnames) model.setResnames(resnames) model.setResnums(resnums) model.setElements(elements) model.setCharges(charges) model.setFlags('leaving_atom_flags', leaving_atom_flags) model.setFlags('aromatic_flags', aromatic_flags) model.setFlags('stereo_configs', stereo_configs) model.setData('ordinals', ordinals) model.setData('alternate_atomnames', alternate_atomnames) dict_['model'] = model ideal = model.copy() ideal.setTitle(cci + ' ideal') ideal.setCoords(ideal_coords) dict_['ideal'] = ideal bonds = [] warned = set() for bond in list(root.find(ns + 'chem_comp_bondCategory') or bonds): name_1 = bond.get('atom_id_1') name_2 = bond.get('atom_id_2') try: bonds.append((name2index[name_1], name2index[name_2])) except KeyError: if name_1 not in warned and name_1 not in name2index: warned.add(name_1) LOGGER.warn('{0} specified {1} in bond category is not ' 'a valid atom name.'.format(repr(name_1), cci)) if name_2 not in warned and name_2 not in name2index: warned.add(name_2) LOGGER.warn('{0} specified {1} in bond category is not ' 'a valid atom name.'.format(repr(name_2), cci)) if bonds: bonds = np.array(bonds, int) model.setBonds(bonds) ideal.setBonds(bonds) return dict_
def fetchPDBLigand(cci, filename=None): """Fetch PDB ligand data from PDB_ for chemical component *cci*. *cci* may be 3-letter chemical component identifier or a valid XML filename. If *filename* is given, XML file will be saved with that name. If you query ligand data frequently, you may configure ProDy to save XML files in your computer. Set ``ligand_xml_save`` option **True**, i.e. ``confProDy(ligand_xml_save=True)``. Compressed XML files will be save to ProDy package folder, e.g. :file:`/home/user/.prody/pdbligands`. Each file is around 5Kb when compressed. This function is compatible with PDBx/PDBML v 4.0. Ligand data is returned in a dictionary. Ligand coordinate atom data with *model* and *ideal* coordinate sets are also stored in this dictionary. Note that this dictionary will contain data that is present in the XML file and all Ligand Expo XML files do not contain every possible data field. So, it may be better if you use :meth:`dict.get` instead of indexing the dictionary, e.g. to retrieve formula weight (or relative molar mass) of the chemical component use ``data.get('formula_weight')`` instead of ``data['formula_weight']`` to avoid exceptions when this data field is not found in the XML file. URL and/or path of the XML file are returned in the dictionary with keys ``url`` and ``path``, respectively. Following example downloads data for ligand STI (a.k.a. Gleevec and Imatinib) and calculates RMSD between model (X-ray structure 1IEP) and ideal (energy minimized) coordinate sets: .. ipython:: python from prody import * ligand_data = fetchPDBLigand('STI') ligand_data['model_coordinates_db_code'] ligand_model = ligand_data['model'] ligand_ideal = ligand_data['ideal'] transformation = superpose(ligand_ideal.noh, ligand_model.noh) calcRMSD(ligand_ideal.noh, ligand_model.noh)""" if not isinstance(cci, str): raise TypeError('cci must be a string') if isfile(cci): inp = openFile(cci) xml = inp.read() inp.close() url = None path = cci cci = splitext(splitext(split(cci)[1])[0])[0].upper() elif len(cci) > 4 or not cci.isalnum(): raise ValueError('cci must be 3-letters long and alphanumeric or ' 'a valid filename') else: xml = None cci = cci.upper() if SETTINGS.get('ligand_xml_save'): folder = join(getPackagePath(), 'pdbligands') if not isdir(folder): makePath(folder) xmlgz = path = join(folder, cci + '.xml.gz') if isfile(xmlgz): with openFile(xmlgz) as inp: xml = inp.read() else: path = None #url = ('http://ligand-expo.rcsb.org/reports/{0[0]}/{0}/{0}' # '.xml'.format(cci.upper())) url = 'http://files.rcsb.org/ligands/download/{0}.xml'.format( cci.upper()) if not xml: #'http://www.pdb.org/pdb/files/ligand/{0}.xml' try: inp = openURL(url) except IOError: raise IOError( 'XML file for ligand {0} is not found online'.format(cci)) else: xml = inp.read() inp.close() if filename: out = openFile(filename, mode='w', folder=folder) out.write(xml) out.close() if SETTINGS.get('ligand_xml_save'): with openFile(xmlgz, 'w') as out: out.write(xml) import xml.etree.cElementTree as ET root = ET.XML(xml) if (root.get('{http://www.w3.org/2001/XMLSchema-instance}' 'schemaLocation') != 'http://pdbml.pdb.org/schema/pdbx-v40.xsd pdbx-v40.xsd'): LOGGER.warn('XML is not in PDBx/PDBML v 4.0 format, resulting ' 'dictionary may not contain all data fields') ns = root.tag[:root.tag.rfind('}') + 1] len_ns = len(ns) dict_ = {'url': url, 'path': path} for child in list(root.find(ns + 'chem_compCategory')[0]): tag = child.tag[len_ns:] if tag.startswith('pdbx_'): tag = tag[5:] dict_[tag] = child.text dict_['formula_weight'] = float(dict_.get('formula_weight')) identifiers_and_descriptors = [] results = root.find(ns + 'pdbx_chem_comp_identifierCategory') if results: identifiers_and_descriptors.extend(results) results = root.find(ns + 'pdbx_chem_comp_descriptorCategory') if results: identifiers_and_descriptors.extend(results) for child in identifiers_and_descriptors: program = child.get('program').replace(' ', '_') type_ = child.get('type').replace(' ', '_') dict_[program + '_' + type_] = child[0].text dict_[program + '_version'] = child.get('program_version') dict_['audits'] = [ (audit.get('action_type'), audit.get('date')) for audit in list(root.find(ns + 'pdbx_chem_comp_auditCategory')) ] atoms = list(root.find(ns + 'chem_comp_atomCategory')) n_atoms = len(atoms) ideal_coords = np.zeros((n_atoms, 3)) model_coords = np.zeros((n_atoms, 3)) atomnames = np.zeros(n_atoms, dtype=ATOMIC_FIELDS['name'].dtype) elements = np.zeros(n_atoms, dtype=ATOMIC_FIELDS['element'].dtype) resnames = np.zeros(n_atoms, dtype=ATOMIC_FIELDS['resname'].dtype) charges = np.zeros(n_atoms, dtype=ATOMIC_FIELDS['charge'].dtype) resnums = np.ones(n_atoms, dtype=ATOMIC_FIELDS['charge'].dtype) alternate_atomnames = np.zeros(n_atoms, dtype=ATOMIC_FIELDS['name'].dtype) leaving_atom_flags = np.zeros(n_atoms, np.bool) aromatic_flags = np.zeros(n_atoms, np.bool) stereo_configs = np.zeros(n_atoms, np.bool) ordinals = np.zeros(n_atoms, int) name2index = {} for i, atom in enumerate(atoms): data = dict([(child.tag[len_ns:], child.text) for child in list(atom)]) name = data.get('pdbx_component_atom_id', 'X') name2index[name] = i atomnames[i] = name elements[i] = data.get('type_symbol', 'X') resnames[i] = data.get('pdbx_component_comp_id', 'UNK') charges[i] = float(data.get('charge', 0)) alternate_atomnames[i] = data.get('alt_atom_id', 'X') leaving_atom_flags[i] = data.get('pdbx_leaving_atom_flag') == 'Y' aromatic_flags[i] = data.get('pdbx_atomatic_flag') == 'Y' stereo_configs[i] = data.get('pdbx_stereo_config') == 'Y' ordinals[i] = int(data.get('pdbx_ordinal', 0)) model_coords[i, 0] = float(data.get('model_Cartn_x', 0)) model_coords[i, 1] = float(data.get('model_Cartn_y', 0)) model_coords[i, 2] = float(data.get('model_Cartn_z', 0)) ideal_coords[i, 0] = float(data.get('pdbx_model_Cartn_x_ideal', 0)) ideal_coords[i, 1] = float(data.get('pdbx_model_Cartn_y_ideal', 0)) ideal_coords[i, 2] = float(data.get('pdbx_model_Cartn_z_ideal', 0)) pdbid = dict_.get('model_coordinates_db_code') if pdbid: model = AtomGroup(cci + ' model ({0})'.format(pdbid)) else: model = AtomGroup(cci + ' model') model.setCoords(model_coords) model.setNames(atomnames) model.setResnames(resnames) model.setResnums(resnums) model.setElements(elements) model.setCharges(charges) model.setFlags('leaving_atom_flags', leaving_atom_flags) model.setFlags('aromatic_flags', aromatic_flags) model.setFlags('stereo_configs', stereo_configs) model.setData('ordinals', ordinals) model.setData('alternate_atomnames', alternate_atomnames) dict_['model'] = model ideal = model.copy() ideal.setTitle(cci + ' ideal') ideal.setCoords(ideal_coords) dict_['ideal'] = ideal bonds = [] warned = set() for bond in list(root.find(ns + 'chem_comp_bondCategory') or bonds): name_1 = bond.get('atom_id_1') name_2 = bond.get('atom_id_2') try: bonds.append((name2index[name_1], name2index[name_2])) except KeyError: if name_1 not in warned and name_1 not in name2index: warned.add(name_1) LOGGER.warn('{0} specified {1} in bond category is not ' 'a valid atom name.'.format(repr(name_1), cci)) if name_2 not in warned and name_2 not in name2index: warned.add(name_2) LOGGER.warn('{0} specified {1} in bond category is not ' 'a valid atom name.'.format(repr(name_2), cci)) if bonds: bonds = np.array(bonds, int) model.setBonds(bonds) ideal.setBonds(bonds) return dict_
def fetchBIRDviaFTP(**kwargs): """Retrieve the whole Biologically Interesting Molecule Reference Dictionary (BIRD) resource, which is updated every week. This includes 2 kinds of keys, which can be selected with the **keys** keyword argument. The chemical information is found in a zipped (tar.gz) directory at https://files.rcsb.org/pub/pdb/data/bird/prd/prd-all.cif.gz, which contains individual CIF files within it. This data will be downloaded and extracted to :file:`.prody/bird-prd`. Biological function information is also found in a zipped (tar.gz) directory at https://files.rcsb.org/pub/pdb/data/bird/family/family-all.cif.gz, which contains individual CIF files within it. This data will be downloaded and extracted to :file:`.prody/bird-family`. :arg keys: keys specifying which data to fetch out of ``'prd'``, ``'family'`` or ``'both'`` default is ``'both'`` :type keys: str, tuple, list, :class:`~numpy.ndarray` The underlying data can be accessed using :func:`parseBIRD`.""" BIRD_PATH = os.path.join(getPackagePath(), 'bird') keys = kwargs.get('keys', 'both') if isinstance(keys, str): if keys == 'both': keys = ['prd', 'family'] elif keys[:3].lower() == 'prd': keys = ['prd'] elif keys[:3].lower() == 'fam': keys = ['family'] else: raise ValueError("keys should be 'both', 'prd' or 'fam'") elif isListLike(keys): keys = list(keys) else: raise TypeError("keys should be list-like or string") ftp_divided = 'pdb/data/bird/' ftp_pdbext = '.cif.gz' ftp_prefix = '' if not os.path.isdir(BIRD_PATH): os.mkdir(BIRD_PATH) LOGGER.progress('Downloading BIRD', len(keys), '_prody_fetchBIRD') ftp_name, ftp_host, ftp_path = WWPDB_FTP_SERVERS[wwPDBServer() or 'us'] LOGGER.debug('Connecting wwPDB FTP server {0}.'.format(ftp_name)) from ftplib import FTP try: ftp = FTP(ftp_host) except Exception as error: raise type(error)('FTP connection problem, potential reason: ' 'no internet connectivity') else: count = 0 success = 0 failure = 0 filenames = [] ftp.login('') for i, x in enumerate(keys): data = [] ftp_fn = ftp_prefix + '{0}-all'.format(x) + ftp_pdbext try: ftp.cwd(ftp_path) ftp.cwd(ftp_divided) ftp.cwd(x) ftp.retrbinary('RETR ' + ftp_fn, data.append) except Exception as error: if ftp_fn in ftp.nlst(): LOGGER.warn('{0} download failed ({1}). It is ' 'possible that you do not have rights to ' 'download .gz files in the current network.' .format(x, str(error))) else: LOGGER.info('{0} download failed. {1} does not exist ' 'on {2}.'.format(ftp_fn, x, ftp_host)) failure += 1 filenames.append(None) else: if len(data): filename = BIRD_PATH + '/{0}-all.cif.gz'.format(x) with open(filename, 'w+b') as outfile: write = outfile.write [write(block) for block in data] success += 1 else: failure += 1 count += 1 LOGGER.update(i, label='_prody_fetchBIRD') LOGGER.finish() LOGGER.debug('PDB download via FTP completed ({0} downloaded, ' '{1} failed).'.format(success, failure))
def parseBIRD(*ids, **kwargs): """Parse data from the Biologically Interesting Molecule Reference Dictionary (BIRD) resource, which is updated every week. This includes 2 kinds of keys, which can be selected with the **keys** keyword argument. The chemical information is found in a single CIF file at https://files.rcsb.org/pub/pdb/data/bird/prd/prd-all.cif.gz. This data will be downloaded and extracted to :file:`.prody/bird-prd`. Biological function information is also found in a single CIF file at https://files.rcsb.org/pub/pdb/data/bird/family/family-all.cif.gz. This data will be downloaded and extracted to :file:`.prody/bird-family`. Individual compounds can be selected using **ids**. If needed, BIRD files are downloaded using :func:`.fetchBIRDviaFTP` function. You can also provide arguments that you would like passed on to fetchBIRDviaFTP. :arg ids: one BIRD identifier (starting with PRD or FAM) or a list of them. If **None** is provided then all of them are returned. :type ids: str, tuple, list, :class:`~numpy.ndarray`, **None** :arg key: key specifying which data to fetch out of ``'prd'`` or ``'family'`` default is ``'prd'`` :type key: str Returns :class:`.StarDataBlock` object or list of them. """ key = kwargs.get('key', 'prd') if not isinstance(key, str): raise TypeError("key should be a string") if key[:3].lower() == 'prd': key = 'prd' elif key[:3].lower() == 'fam': key = 'family' else: raise ValueError("key should be 'prd' or 'fam'") n_ids = len(ids) if n_ids == 1: if isListLike(ids[0]): ids = ids[0] n_ids = len(ids) if n_ids == 1: ids = list(ids) BIRD_PATH = os.path.join(getPackagePath(), 'bird') filename = BIRD_PATH + '/{0}-all.cif.gz'.format(key) if not os.path.isfile(filename): fetchBIRDviaFTP(keys=key, **kwargs) data = parseSTAR(filename, shlex=True) ret = [] for id in ids: try: ret.append(data.search(id)[0]) except ValueError: try: ret.append(data[id]) except ValueError: LOGGER.warn('id {0} not found in {1} data ' 'so appending None'.format(id, key)) ret.append(None) if n_ids == 1: return ret[0] return ret