class CifAtom(object): """ This is a class to write cifatom files. These are partial cif files that contain all atoms, including those created by symmetry operations. These files differ from normal cif files in that they only contain the atom_site block and this block differs from that in the standard cif files. 1. This file conatins all atoms, even those produced by rotations. 2. All label_* and auth_* fields will be the same and will the values used to copute the unit id of the atom. 3. Entity id is '?'. 4. All *_esd, charge, *_esi, occupancy and such entries are '?'. 5. We have a final field which the component unit id for each atom. """ def __init__(self, handle): self.writer = Writer(handle) def atom_container(self, structure): atoms = DataCategory('atom_site') fields = ['group_PDB', 'id', 'type_symbol', 'label_atom_id', 'label_alt_id', 'label_comp_id', 'label_asym_id', 'label_entity_id', 'label_seq_id', 'pdbx_PDB_ins_code', 'Cartn_x', 'Cartn_y', 'Cartn_z', 'occupancy', 'B_iso_or_equiv', 'Cartn_x_esd', 'Cartn_y_esd', 'Cartn_z_esd', 'occupancy_ies', 'B_iso_or_equiv_esd', 'pdbx_formal_charge', 'auth_seq_id', 'auth_comp_id', 'auth_asym_id', 'auth_atom_id', 'pdbx_PDB_model_num', 'unit_id'] for field in fields: atoms.appendAttribute(field) def key(atom): return (atom.symmetry, atom.model, atom.chain, atom.component_number, atom.insertion_code) all_atoms = it.imap(lambda r: r.atoms(), structure.residues(polymeric=None)) all_atoms = it.chain.from_iterable(all_atoms) for index, atom in enumerate(sorted(all_atoms, key=key)): alt_id = getattr(atom, 'alt_id', '.') data = [atom.group, index, atom.type, atom.name, alt_id, atom.component_id, atom.chain, '?', atom.component_number, atom.insertion_code, atom.x, atom.y, atom.z, '?', '?', '?', '?', '?', '?', '?', '.', atom.component_number, atom.component_id, atom.chain, atom.name, atom.model, atom.component_unit_id()] atoms.append(data) return atoms def __call__(self, structure): atoms = self.atom_container(structure) container = DataContainer(structure.pdb) container.append(atoms) self.writer.writeContainer(container)
def testWriteDataFile(self): """Test case - write data file """ self.lfh.write("\nStarting %s %s\n" % (self.__class__.__name__, sys._getframe().f_code.co_name)) try: # myDataList=[] ofh = open("test-output.cif", "w") curContainer=DataContainer("myblock") aCat=DataCategory("pdbx_seqtool_mapping_ref") aCat.appendAttribute("ordinal") aCat.appendAttribute("entity_id") aCat.appendAttribute("auth_mon_id") aCat.appendAttribute("auth_mon_num") aCat.appendAttribute("pdb_chain_id") aCat.appendAttribute("ref_mon_id") aCat.appendAttribute("ref_mon_num") aCat.append([1,2,3,4,5,6,7]) aCat.append([1,2,3,4,5,6,7]) aCat.append([1,2,3,4,5,6,7]) aCat.append([1,2,3,4,5,6,7]) aCat.append([7,6,5,4,3,2,1]) aCat.printIt() curContainer.append(aCat) curContainer.printIt() # myDataList.append(curContainer) pdbxW=PdbxWriter(ofh) pdbxW.write(myDataList) ofh.close() except: traceback.print_exc(file=self.lfh) self.fail()
def testWriteDataFile(self): """Test case - write data file """ self.lfh.write( "\nStarting %s %s\n" % (self.__class__.__name__, sys._getframe().f_code.co_name)) try: # myDataList = [] ofh = open("test-output.cif", "w") curContainer = DataContainer("myblock") aCat = DataCategory("pdbx_seqtool_mapping_ref") aCat.appendAttribute("ordinal") aCat.appendAttribute("entity_id") aCat.appendAttribute("auth_mon_id") aCat.appendAttribute("auth_mon_num") aCat.appendAttribute("pdb_chain_id") aCat.appendAttribute("ref_mon_id") aCat.appendAttribute("ref_mon_num") aCat.append((1, 2, 3, 4, 5, 6, 7)) aCat.append((1, 2, 3, 4, 5, 6, 7)) aCat.append((1, 2, 3, 4, 5, 6, 7)) aCat.append((1, 2, 3, 4, 5, 6, 7)) curContainer.append(aCat) myDataList.append(curContainer) pdbxW = PdbxWriter(ofh) pdbxW.write(myDataList) ofh.close() except: traceback.print_exc(file=sys.stderr) self.fail()
class CifAtom(object): """ This is a class to write cifatom files. These are partial cif files that contain all atoms, including those created by symmetry operations. These files differ from normal cif files in that they only contain the atom_site block and this block differs from that in the standard cif files. 1. This file conatins all atoms, even those produced by rotations. 2. All label_* and auth_* fields will be the same and will the values used to copute the unit id of the atom. 3. Entity id is '?'. 4. All *_esd, charge, *_esi, occupancy and such entries are '?'. 5. We have a final field which the component unit id for each atom. """ def __init__(self, handle): self.writer = Writer(handle) def atom_container(self, structure): atoms = DataCategory('atom_site') fields = [ 'group_PDB', 'id', 'type_symbol', 'label_atom_id', 'label_alt_id', 'label_comp_id', 'label_asym_id', 'label_entity_id', 'label_seq_id', 'pdbx_PDB_ins_code', 'Cartn_x', 'Cartn_y', 'Cartn_z', 'occupancy', 'B_iso_or_equiv', 'Cartn_x_esd', 'Cartn_y_esd', 'Cartn_z_esd', 'occupancy_ies', 'B_iso_or_equiv_esd', 'pdbx_formal_charge', 'auth_seq_id', 'auth_comp_id', 'auth_asym_id', 'auth_atom_id', 'pdbx_PDB_model_num', 'unit_id' ] for field in fields: atoms.appendAttribute(field) def key(atom): return (atom.symmetry, atom.model, atom.chain, atom.component_number, atom.insertion_code) all_atoms = it.imap(lambda r: r.atoms(), structure.residues(polymeric=None)) all_atoms = it.chain.from_iterable(all_atoms) for index, atom in enumerate(sorted(all_atoms, key=key)): alt_id = getattr(atom, 'alt_id', '.') data = [ atom.group, index, atom.type, atom.name, alt_id, atom.component_id, atom.chain, '?', atom.component_number, atom.insertion_code, atom.x, atom.y, atom.z, '?', '?', '?', '?', '?', '?', '?', '.', atom.component_number, atom.component_id, atom.chain, atom.name, atom.model, atom.component_unit_id() ] atoms.append(data) return atoms def __call__(self, structure): atoms = self.atom_container(structure) container = DataContainer(structure.pdb) container.append(atoms) self.writer.writeContainer(container)
def testSimpleInitialization(self): """Test case - Simple initialization of a data category and data block """ self.lfh.write( "\nStarting %s %s\n" % (self.__class__.__name__, sys._getframe().f_code.co_name)) try: # fn = "test-simple.cif" attributeNameList = [ 'aOne', 'aTwo', 'aThree', 'aFour', 'aFive', 'aSix', 'aSeven', 'aEight', 'aNine', 'aTen' ] rowList = [[1, 2, 3, 4, 5, 6, 7, 8, 9, 10], [1, 2, 3, 4, 5, 6, 7, 8, 9, 10], [1, 2, 3, 4, 5, 6, 7, 8, 9, 10], [1, 2, 3, 4, 5, 6, 7, 8, 9, 10], [1, 2, 3, 4, 5, 6, 7, 8, 9, 10], [1, 2, 3, 4, 5, 6, 7, 8, 9, 10], [1, 2, 3, 4, 5, 6, 7, 8, 9, 10], [1, 2, 3, 4, 5, 6, 7, 8, 9, 10], [1, 2, 3, 4, 5, 6, 7, 8, 9, 10], [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]] nameCat = 'myCategory' # # curContainer = DataContainer("myblock") aCat = DataCategory(nameCat, attributeNameList, rowList) aCat.printIt() curContainer.append(aCat) curContainer.printIt() # myContainerList = [] myContainerList.append(curContainer) ofh = open(fn, "w") pdbxW = PdbxWriter(ofh) pdbxW.write(myContainerList) ofh.close() myContainerList = [] ifh = open(fn, "r") pRd = PdbxReader(ifh) pRd.read(myContainerList) ifh.close() for container in myContainerList: for objName in container.getObjNameList(): name, aList, rList = container.getObj(objName).get() self.lfh.write("Recovered data category %s\n" % name) self.lfh.write("Attribute list %r\n" % repr(aList)) self.lfh.write("Row list %r\n" % repr(rList)) except: traceback.print_exc(file=self.lfh) self.fail()
def testUpdateDataFile(self): """Test case - update data file """ self.lfh.write( "\nStarting %s %s\n" % (self.__class__.__name__, sys._getframe().f_code.co_name)) try: # Create a initial data file -- # myDataList = [] curContainer = DataContainer("myblock") aCat = DataCategory("pdbx_seqtool_mapping_ref") aCat.appendAttribute("ordinal") aCat.appendAttribute("entity_id") aCat.appendAttribute("auth_mon_id") aCat.appendAttribute("auth_mon_num") aCat.appendAttribute("pdb_chain_id") aCat.appendAttribute("ref_mon_id") aCat.appendAttribute("ref_mon_num") aCat.append([9, 2, 3, 4, 5, 6, 7]) aCat.append([10, 2, 3, 4, 5, 6, 7]) aCat.append([11, 2, 3, 4, 5, 6, 7]) aCat.append([12, 2, 3, 4, 5, 6, 7]) #self.lfh.write("Assigned data category state-----------------\n") #aCat.dumpIt(fh=self.lfh) curContainer.append(aCat) myDataList.append(curContainer) ofh = open("test-output-1.cif", "w") pdbxW = PdbxWriter(ofh) pdbxW.write(myDataList) ofh.close() # # # Read and update the data - # myDataList = [] ifh = open("test-output-1.cif", "r") pRd = PdbxReader(ifh) pRd.read(myDataList) ifh.close() # myBlock = myDataList[0] myBlock.printIt() myCat = myBlock.getObj('pdbx_seqtool_mapping_ref') myCat.printIt() for iRow in xrange(0, myCat.getRowCount()): myCat.setValue('some value', 'ref_mon_id', iRow) myCat.setValue(100, 'ref_mon_num', iRow) ofh = open("test-output-2.cif", "w") pdbxW = PdbxWriter(ofh) pdbxW.write(myDataList) ofh.close() # except: traceback.print_exc(file=self.lfh) self.fail()
def testSimpleInitialization(self): """Test case - Simple initialization of a data category and data block """ self.lfh.write("\nStarting %s %s\n" % (self.__class__.__name__, sys._getframe().f_code.co_name)) try: # fn="test-simple.cif" attributeNameList=['aOne','aTwo','aThree','aFour','aFive','aSix','aSeven','aEight','aNine','aTen'] rowList=[[1,2,3,4,5,6,7,8,9,10], [1,2,3,4,5,6,7,8,9,10], [1,2,3,4,5,6,7,8,9,10], [1,2,3,4,5,6,7,8,9,10], [1,2,3,4,5,6,7,8,9,10], [1,2,3,4,5,6,7,8,9,10], [1,2,3,4,5,6,7,8,9,10], [1,2,3,4,5,6,7,8,9,10], [1,2,3,4,5,6,7,8,9,10], [1,2,3,4,5,6,7,8,9,10] ] nameCat='myCategory' # # curContainer=DataContainer("myblock") aCat=DataCategory(nameCat,attributeNameList,rowList) aCat.printIt() curContainer.append(aCat) curContainer.printIt() # myContainerList=[] myContainerList.append(curContainer) ofh = open(fn, "w") pdbxW=PdbxWriter(ofh) pdbxW.write(myContainerList) ofh.close() myContainerList=[] ifh = open(fn, "r") pRd=PdbxReader(ifh) pRd.read(myContainerList) ifh.close() for container in myContainerList: for objName in container.getObjNameList(): name,aList,rList=container.getObj(objName).get() self.lfh.write("Recovered data category %s\n" % name) self.lfh.write("Attribute list %r\n" % repr(aList)) self.lfh.write("Row list %r\n" % repr(rList)) except: traceback.print_exc(file=self.lfh) self.fail()
def testReadWriteDataFile(self): """Test case - data file read write test """ self.lfh.write("\nStarting %s %s\n" % (self.__class__.__name__, sys._getframe().f_code.co_name)) try: myDataList=[] ifh = open(self.pathPdbxDataFile, "r") pRd=PdbxReader(ifh) pRd.read(myDataList) ifh.close() ofh = open(self.pathOutputFile, "w") pWr=PdbxWriter(ofh) pWr.write(myDataList) ofh.close() except: traceback.print_exc(file=self.lfh) self.fail()
def testReadWriteDataFile(self): """Test case - data file read write test """ self.lfh.write( "\nStarting %s %s\n" % (self.__class__.__name__, sys._getframe().f_code.co_name)) try: myDataList = [] ifh = open(self.pathPdbxDataFile, "r") pRd = PdbxReader(ifh) pRd.read(myDataList) ifh.close() ofh = open(self.pathOutputFile, "w") pWr = PdbxWriter(ofh) pWr.write(myDataList) ofh.close() except: traceback.print_exc(file=self.lfh) self.fail()
def testUpdateDataFile(self): """Test case - update data file """ self.lfh.write("\nStarting %s %s\n" % (self.__class__.__name__, sys._getframe().f_code.co_name)) try: # Create a initial data file -- # myDataList=[] curContainer=DataContainer("myblock") aCat=DataCategory("pdbx_seqtool_mapping_ref") aCat.appendAttribute("ordinal") aCat.appendAttribute("entity_id") aCat.appendAttribute("auth_mon_id") aCat.appendAttribute("auth_mon_num") aCat.appendAttribute("pdb_chain_id") aCat.appendAttribute("ref_mon_id") aCat.appendAttribute("ref_mon_num") aCat.append([9,2,3,4,5,6,7]) aCat.append([10,2,3,4,5,6,7]) aCat.append([11,2,3,4,5,6,7]) aCat.append([12,2,3,4,5,6,7]) #self.lfh.write("Assigned data category state-----------------\n") #aCat.dumpIt(fh=self.lfh) curContainer.append(aCat) myDataList.append(curContainer) ofh = open("test-output-1.cif", "w") pdbxW=PdbxWriter(ofh) pdbxW.write(myDataList) ofh.close() # # # Read and update the data - # myDataList=[] ifh = open("test-output-1.cif", "r") pRd=PdbxReader(ifh) pRd.read(myDataList) ifh.close() # myBlock=myDataList[0] myBlock.printIt() myCat=myBlock.getObj('pdbx_seqtool_mapping_ref') myCat.printIt() for iRow in xrange(0,myCat.getRowCount()): myCat.setValue('some value', 'ref_mon_id',iRow) myCat.setValue(100, 'ref_mon_num',iRow) ofh = open("test-output-2.cif", "w") pdbxW=PdbxWriter(ofh) pdbxW.write(myDataList) ofh.close() # except: traceback.print_exc(file=self.lfh) self.fail()
def vis(argv): # default parameters color_file_name = None missing_value = -1.0 discard_missing = False # read arguments try: opts, args = getopt.getopt(argv[1:], "c:m:M") except getopt.GetoptError as err: sys.stderr.write("[E::" + __name__ + "] unknown command\n") return 1 if len(args) == 0: sys.stderr.write("Usage: dip-c vis [options] <in.3dg>\n") sys.stderr.write("Options:\n") sys.stderr.write( " -c <color.txt> color by a list of locus-color pairs (tab-delimited: homolog, locus, color)\n" ) sys.stderr.write( " -m FLOAT color for particles that are missing from the color scheme [" + str(missing_value) + "]\n") sys.stderr.write( " -M discard particles that are missing from the color scheme\n\n" ) sys.stderr.write("Output mmCIF format:\n") sys.stderr.write( " label_asym_id homolog name (e.g. \"1(mat)\")\n") sys.stderr.write( " label_comp_id locus // 1 Mb, 3 digits with leading zeros\n") sys.stderr.write(" label_seq_id 1\n") sys.stderr.write( " label_atom_id locus % 1 Mb // 1 kb, 3 digits with leading zeros\n" ) sys.stderr.write(" B_iso_or_equiv scalar color\n") sys.stderr.write(" covale backbone bond\n") return 1 num_color_schemes = 0 for o, a in opts: if o == "-m": missing_value = float(a) elif o == "-c": color_file_name = a elif o == "-M": discard_missing = True # read 3DG file g3d_data = file_to_g3d_data(open(args[0], "rb")) g3d_data.sort_g3d_particles() g3d_resolution = g3d_data.resolution() sys.stderr.write( "[M::" + __name__ + "] read a 3D structure with " + str(g3d_data.num_g3d_particles()) + " particles at " + ("N.A." if g3d_resolution is None else str(g3d_resolution)) + " bp resolution\n") # read color file color_data = {} if not color_file_name is None: color_file = open(color_file_name, "rb") for color_file_line in color_file: hom_name, ref_locus, color = color_file_line.strip().split("\t") ref_locus = int(ref_locus) color = float(color) color_data[(hom_name, ref_locus)] = color # open mmCIF file to write myDataList = [] curContainer = DataContainer("myblock") aCat = DataCategory("atom_site") aCat.appendAttribute("group_PDB") aCat.appendAttribute("type_symbol") aCat.appendAttribute("id") aCat.appendAttribute("label_asym_id") aCat.appendAttribute("label_comp_id") aCat.appendAttribute("label_seq_id") aCat.appendAttribute("label_atom_id") aCat.appendAttribute("Cartn_x") aCat.appendAttribute("Cartn_y") aCat.appendAttribute("Cartn_z") aCat.appendAttribute("B_iso_or_equiv") sCat = DataCategory("struct_conn") sCat.appendAttribute("id") sCat.appendAttribute("conn_type_id") sCat.appendAttribute("ptnr1_label_asym_id") sCat.appendAttribute("ptnr1_label_comp_id") sCat.appendAttribute("ptnr1_label_seq_id") sCat.appendAttribute("ptnr1_label_atom_id") sCat.appendAttribute("ptnr2_label_asym_id") sCat.appendAttribute("ptnr2_label_comp_id") sCat.appendAttribute("ptnr2_label_seq_id") sCat.appendAttribute("ptnr2_label_atom_id") # write atoms atom_id = 0 for g3d_particle in g3d_data.get_g3d_particles(): atom_id += 1 try: color = color_data[(g3d_particle.get_hom_name(), g3d_particle.get_ref_locus())] except KeyError: if discard_missing: continue color = missing_value aCat.append(g3d_particle_to_atom_data(g3d_particle, atom_id, color)) # write backbond bonds conn_id = 0 for g3d_particle_tuple in g3d_data.get_adjacent_g3d_particle_tuples( g3d_resolution): conn_id += 1 sCat.append( g3d_particle_tuple_to_conn_data(g3d_particle_tuple, conn_id)) # write output curContainer.append(sCat) curContainer.append(aCat) myDataList.append(curContainer) pdbxW = PdbxWriter(sys.stdout) pdbxW.write(myDataList) return 0
inputPosString = inputPdbLineData[1].rjust(9, '0') inputPosValue = int(inputPdbLineData[1]) inputPosList = [inputPosString[0:3], 1, inputPosString[3:6]] # for using position as B factor #inputBFactor = float(inputPdbLineData[1])/chrLengths[inputDipChr - 1] # for using external bedgraph file as B factor inputBFactor = 0 if (inputDipChr, inputPosValue) in bFactorData: inputBFactor = bFactorData[(inputDipChr, inputPosValue)] aCat.append(('HETATM', atomId, inputChrName, inputPosList[0], inputPosList[1], inputPosList[2], inputPdbLineData[2], inputPdbLineData[3], inputPdbLineData[4], inputBFactor)) atomId += 1 if inputChr == previousChr and inputPosValue - previousPosValue == resolution: sCat.append(('covale', inputChrName, previousPosList[0], previousPosList[1], previousPosList[2], inputChrName, inputPosList[0], inputPosList[1], inputPosList[2])) previousChr = inputChr previousPosValue = inputPosValue previousPosList = copy.copy(inputPosList) curContainer.append(sCat) curContainer.append(aCat) myDataList.append(curContainer) pdbxW = PdbxWriter(ofh) pdbxW.write(myDataList) ofh.close()
def __init__(self, handle): self.writer = Writer(handle)
sCat = DataCategory("struct_conn") sCat.appendAttribute("id") sCat.appendAttribute("conn_type_id") sCat.appendAttribute("ptnr1_label_asym_id") sCat.appendAttribute("ptnr1_label_comp_id") sCat.appendAttribute("ptnr1_label_seq_id") sCat.appendAttribute("ptnr1_label_atom_id") sCat.appendAttribute("ptnr2_label_asym_id") sCat.appendAttribute("ptnr2_label_comp_id") sCat.appendAttribute("ptnr2_label_seq_id") sCat.appendAttribute("ptnr2_label_atom_id") # write atoms atom_id = 0 for cen_tel_file_line in open(sys.argv[1], "rb"): atom_id += 1 if cen_tel_file_line.endswith("None\n"): continue cen_tel_file_line_data = cen_tel_file_line.strip().split("\t") chain_id = cen_tel_file_line_data[0] # column 1: name -> chain_id b_factor = float(cen_tel_file_line_data[1]) # column 2: color -> b_factor x, y, z = map(float, cen_tel_file_line_data[2:5]) # columns 3-5: x, y, z aCat.append(("HETATM", ".", atom_id, chain_id, 1, 1, 1, x, y, z, b_factor)) # write output curContainer.append(sCat) curContainer.append(aCat) myDataList.append(curContainer) pdbxW = PdbxWriter(sys.stdout) pdbxW.write(myDataList)
class CifAtom(object): """ This is a class to write cifatom files. These are partial cif files that contain all atoms, including those created by symmetry operations. These files differ from normal cif files in that they only contain the atom_site block and this block differs from that in the standard cif files. 1. This file conatins all atoms, even those produced by rotations. 2. All label_* and auth_* fields will be the same and will contain the values used to compute the unit id of the atom. 3. The entity id is '?'. 4. All *_esd, charge, *_esi, occupancy and such entries are '?'. 5. An additional final field contains the component unit id for each atom. """ def __init__(self, handle, unit_ids=True, protect_lists_of_lists=False): self.writer = Writer(handle) self.unit_ids = unit_ids self.protect_lists_of_lists = protect_lists_of_lists def atom_container(self, structure, protect_lists_of_lists): atoms = DataCategory('atom_site') fields = ['group_PDB', 'id', 'type_symbol', 'label_atom_id', 'label_alt_id', 'label_comp_id', 'label_asym_id', 'label_entity_id', 'label_seq_id', 'pdbx_PDB_ins_code', 'Cartn_x', 'Cartn_y', 'Cartn_z', 'occupancy', 'B_iso_or_equiv', 'Cartn_x_esd', 'Cartn_y_esd', 'Cartn_z_esd', 'occupancy_ies', 'B_iso_or_equiv_esd', 'pdbx_formal_charge', 'auth_seq_id', 'auth_comp_id', 'auth_asym_id', 'auth_atom_id', 'pdbx_PDB_model_num'] if self.unit_ids: fields.append('unit_id') for field in fields: atoms.appendAttribute(field) def key(atom): return (atom.symmetry, atom.model, atom.chain, atom.component_number, atom.insertion_code) all_atoms = it.imap(lambda r: r.atoms(), structure.residues(polymeric=None)) all_atoms = it.chain.from_iterable(all_atoms) for index, atom in enumerate(sorted(all_atoms, key=key)): alt_id = getattr(atom, 'alt_id', '.') data = [atom.group, index, atom.type, atom.name, alt_id, atom.component_id, atom.chain, '?', atom.component_number, atom.insertion_code, atom.x, atom.y, atom.z, '?', '?', '?', '?', '?', '?', '?', '.', atom.component_number, atom.component_id, atom.chain, atom.name, atom.model] # atoms added by infer_hydrogens don't have information to make # the unit_id and should not be written out anyway # So if the next line fails, don't write the atom out. try: unit_id = atom.component_unit_id() if self.unit_ids: data.append(atom.component_unit_id()) atoms.append(data) except: continue if protect_lists_of_lists is True: # Kludge fix for single atom residues. # Handles cases where single atom residues are not handled # correctly as lists of lists, but as simple lists instead, # which has downstream implications in units.coordinates. # # Here, we force a fix, using the line-skipping logic from # units.coordinates to keep the kludge line out of the output # data. dummy = [ 'loop_foo', 'foo', 'foo', 'foo', 'foo', 'foo', 'foo', '?', 'foo', 'foo', 'foo', 'foo', 'foo', '?', '?', '?', '?', '?', '?', '?', '.', 'foo', 'foo', 'foo', 'foo', 'foo'] atoms.append(dummy) return atoms def __call__(self, structure): atoms = self.atom_container(structure, self.protect_lists_of_lists) container = DataContainer(structure.pdb) container.append(atoms) self.writer.writeContainer(container)
def __init__(self, handle, unit_ids=True, protect_lists_of_lists=False): self.writer = Writer(handle) self.unit_ids = unit_ids self.protect_lists_of_lists = protect_lists_of_lists
def write_cif(container, ocif): ofh = open(ocif, 'w') pdbxW = PdbxWriter(ofh) pdbxW.writeContainer(curContainer) ofh.close()