class SceneFile: def __init__(self, filename, mode = 'r', scale = 1., delete = 0): if mode == 'r': raise TypeError, 'Not yet implemented.' self.file = TextFile(filename, 'w') self.memo = {} self.delete = delete self.scale = scale self.filename = filename self.writeString('proc mmtk_graphics {} {\n') self.writeString('mol new graphics {MMTK Graphics}\n') def __del__(self): self.close() def writeString(self, data): self.file.write(data) def writeVector(self, v): self.writeString(" {%g %g %g}" % tuple(v)) def close(self): if self.file is not None: self.writeString('}\nmmtk_graphics\n') self.writeString('display resetview\n') if self.delete: self.writeString('file delete ' + self.filename) self.file.close() self.file = None def write(self, object): object.writeToFile(self)
class SceneFile: def __init__(self, filename, mode='r'): if mode == 'r': raise TypeError('Not yet implemented.') self.file = TextFile(filename, 'w') self.file.write('#VRML V1.0 ascii\n') self.file.write('Separator {\n') self.memo = {} self.name_counter = 0 def __del__(self): self.close() def writeString(self, data): self.file.write(data) def close(self): if self.file is not None: self.file.write('}\n') self.file.close() self.file = None def write(self, object): object.writeToFile(self) def uniqueName(self): self.name_counter = self.name_counter + 1 return 'i' + ` self.name_counter `
class SceneFile: def __init__(self, filename, mode='r', scale=1., delete=0): if mode == 'r': raise TypeError, 'Not yet implemented.' self.file = TextFile(filename, 'w') self.memo = {} self.delete = delete self.scale = scale self.filename = filename self.writeString('proc mara_graphics {} {\n') self.writeString('mol new\n') self.writeString('mol rename top mara\n') def __del__(self): self.close() def writeString(self, data): self.file.write(data) def writeVector(self, v): self.writeString(" {%g %g %g}" % tuple(v)) def close(self): if self.file is not None: self.writeString('}\nmara_graphics\n') self.writeString('display resetview\n') if self.delete: self.writeString('file delete ' + self.filename) self.file.close() self.file = None def write(self, object): object.writeToFile(self)
class SceneFile(object): def __init__(self, filename, mode='r', scale=1.): if mode == 'r': raise TypeError('Not yet implemented.') self.file = TextFile(filename, 'w') self.filename = filename self._init(scale) def _init(self, scale): self.memo = {} self.scale = scale def __del__(self): self.close() def writeString(self, data): self.file.write(data) def writeVector(self, v): self.writeString(" %g %g %g " % tuple(v)) def close(self): self.file.close() def write(self, object): object.writeToFile(self)
def saveText(master=None, data=None, filename=None, title=None): if not filename: if master: from nMoldyn.gui import FileDialog fd = FileDialog.SaveFileDialog(master) filename = fd.go(key='SaveText', pattern='*.txt') else: raise "Undetectable_System_Error",\ "No master for a slave" if data is None: raise ValueError, "no data to be saved" if title is None: title = "pMoldyn data" if filename: file = TextFile(filename, 'w') line = '#\n# ' + title + '\n#\n' file.write(line) for i in range(len(data)): line = '' for ia in range(len(data[i])): line = line + str(data[i][ia]) + ' ' line = line + '\n' file.write(line) file.close() return filename
class SceneFile: def __init__(self, filename, mode='r'): if mode == 'r': raise TypeError, 'Not implemented.' self.file = TextFile(filename, 'w') self.file.write('#VRML V2.0 utf8\n') self.file.write('Transform { children [\n') self.memo = {} self.name_counter = 0 def __del__(self): self.close() def writeString(self, data): self.file.write(data) def close(self): if self.file is not None: self.file.write(']}\n') self.file.close() self.file = None def write(self, object): object.writeToFile(self) def uniqueName(self): self.name_counter = self.name_counter + 1 return 'i' + ` self.name_counter `
def writeDataSets(datasets, filename, separator=''): """ Write multiple datasets to a text file. @param datasets: a sequence of datasets describing a curve to be plotted. Each dataset is either a 1d-array (list of values) or a 2d-array of shape N x 2 (list of (x, y) pairs). Nested lists can be used instead of arrays. @param filename: the name of the output file @type filename: C{str} @param separator: the contents of the line that is written between two datasets @type separator: C{str} """ file = TextFile(filename, 'w') nsets = len(datasets) for i in range(nsets): d = Numeric.array(list(datasets[i])) if len(d.shape) == 1: d = d[:, Numeric.NewAxis] for point in d: for number in point: file.write( ` number ` + ' ') file.write('\n') if (i < nsets - 1): file.write(separator + '\n') file.close()
class SceneFile: def __init__(self, filename, mode = 'r'): if mode == 'r': raise TypeError, 'Not implemented.' self.file = TextFile(filename, 'w') self.file.write('#VRML V2.0 utf8\n') self.file.write('Transform { children [\n') self.memo = {} self.name_counter = 0 def __del__(self): self.close() def writeString(self, data): self.file.write(data) def close(self): if self.file is not None: self.file.write(']}\n') self.file.close() self.file = None def write(self, object): object.writeToFile(self) def uniqueName(self): self.name_counter = self.name_counter + 1 return 'i' + `self.name_counter`
class SceneFile: def __init__(self, filename, mode = 'r'): if mode == 'r': raise TypeError('Not yet implemented.') self.file = TextFile(filename, 'w') self.file.write('#VRML V1.0 ascii\n') self.file.write('Separator {\n') self.memo = {} self.name_counter = 0 def __del__(self): self.close() def writeString(self, data): self.file.write(data) def close(self): if self.file is not None: self.file.write('}\n') self.file.close() self.file = None def write(self, object): object.writeToFile(self) def uniqueName(self): self.name_counter = self.name_counter + 1 return 'i' + `self.name_counter`
def writeArray(array, filename, mode='w'): """Write array |a| to file |filename|. |mode| can be 'w' (new file) or 'a' (append).""" file = TextFile(filename, mode) if len(array.shape) == 1: array = array[:, Numeric.NewAxis] for line in array: for element in line: file.write(`element` + ' ') file.write('\n') file.close()
def writeArray(array, filename, mode='w'): """ Write a text representation of an array to a file. @param array: the array to be written @type array: C{Numeric.array} @param filename: the name of the output file @type filename: C{str} @param mode: the file access mode, 'w' (new file) or 'a' (append) @type mode: C{str} """ file = TextFile(filename, mode) if len(array.shape) == 1: array = array[:, Numeric.NewAxis] for line in array: for element in line: file.write( ` element ` + ' ') file.write('\n') file.close()
def writeDataSets(datasets, filename, separator = ''): """Write each of the items in the sequence |datasets| to the file |filename|, separating the datasets by a line containing |separator|. The items in the data sets can be one- or two-dimensional arrays or equivalent nested sequences. The output file format is understood by many plot programs. """ file = TextFile(filename, 'w') nsets = len(datasets) for i in range(nsets): d = Numeric.array(datasets[i]) if len(d.shape) == 1: d = d[:, Numeric.NewAxis] for point in d: for number in point: file.write(`number` + ' ') file.write('\n') if (i < nsets-1): file.write(separator + '\n') file.close()
class PDBFile: """PDB file with access at the record level Constructor: PDBFile(|filename|, |mode|='"r"'), where |filename| is the file name and |mode| is '"r"' for reading and '"w"' for writing, The low-level file access is handled by the module Scientific.IO.TextFile, therefore compressed files and URLs (for reading) can be used as well. """ def __init__(self, filename, mode='r', subformat=None): self.file = TextFile(filename, mode) self.output = string.lower(mode[0]) == 'w' self.export_filter = None if subformat is not None: export = export_filters.get(subformat, None) if export is not None: self.export_filter = export() self.open = 1 if self.output: self.data = { 'serial_number': 0, 'residue_number': 0, 'chain_id': '', 'segment_id': '' } self.het_flag = 0 self.chain_number = -1 def readLine(self): """Returns the contents of the next non-blank line (= record). The return value is a tuple whose first element (a string) contains the record type. For supported record types (HEADER, ATOM, HETATM, ANISOU, TERM, MODEL, CONECT), the items from the remaining fields are put into a dictionary which is returned as the second tuple element. Most dictionary elements are strings or numbers; atom positions are returned as a vector, and anisotropic temperature factors are returned as a rank-2 tensor, already multiplied by 1.e-4. White space is stripped from all strings except for atom names, whose correct interpretation can depend on an initial space. For unsupported record types, the second tuple element is a string containing the remaining part of the record. """ while 1: line = self.file.readline() if not line: return ('END', '') if line[-1] == '\n': line = line[:-1] line = string.strip(line) if line: break line = string.ljust(line, 80) type = string.strip(line[:6]) if type == 'ATOM' or type == 'HETATM': line = FortranLine(line, atom_format) data = { 'serial_number': line[1], 'name': line[2], 'alternate': string.strip(line[3]), 'residue_name': string.strip(line[4]), 'chain_id': string.strip(line[5]), 'residue_number': line[6], 'insertion_code': string.strip(line[7]), 'position': Vector(line[8:11]), 'occupancy': line[11], 'temperature_factor': line[12], 'segment_id': string.strip(line[13]), 'element': string.strip(line[14]), 'charge': string.strip(line[15]) } return type, data elif type == 'ANISOU': line = FortranLine(line, anisou_format) data = { 'serial_number': line[1], 'name': line[2], 'alternate': string.strip(line[3]), 'residue_name': string.strip(line[4]), 'chain_id': string.strip(line[5]), 'residue_number': line[6], 'insertion_code': string.strip(line[7]), 'u': 1.e-4 * Tensor([[line[8], line[11], line[12]], [line[11], line[9], line[13]], [line[12], line[13], line[10]]]), 'segment_id': string.strip(line[14]), 'element': string.strip(line[15]), 'charge': string.strip(line[16]) } return type, data elif type == 'TER': line = FortranLine(line, ter_format) data = { 'serial_number': line[1], 'residue_name': string.strip(line[2]), 'chain_id': string.strip(line[3]), 'residue_number': line[4], 'insertion_code': string.strip(line[5]) } return type, data elif type == 'CONECT': line = FortranLine(line, conect_format) data = { 'serial_number': line[1], 'bonded': filter(lambda i: i > 0, line[2:6]), 'hydrogen_bonded': filter(lambda i: i > 0, line[6:10]), 'salt_bridged': filter(lambda i: i > 0, line[10:12]) } return type, data elif type == 'MODEL': line = FortranLine(line, model_format) data = {'serial_number': line[1]} return type, data elif type == 'HEADER': line = FortranLine(line, header_format) data = {'compound': line[1], 'date': line[2], 'pdb_code': line[3]} return type, data else: return type, line[6:] def writeLine(self, type, data): """Writes a line using record type and data dictionary in the same format as returned by readLine(). Default values are provided for non-essential information, so the data dictionary need not contain all entries. """ if self.export_filter is not None: type, data = self.export_filter.processLine(type, data) if type is None: return line = [type] if type == 'ATOM' or type == 'HETATM': format = atom_format position = data['position'] line = line + [ data.get('serial_number', 1), data.get('name'), data.get('alternate', ''), string.rjust(data.get('residue_name', ''), 3), data.get('chain_id', ''), data.get('residue_number', 1), data.get('insertion_code', ''), position[0], position[1], position[2], data.get('occupancy', 0.), data.get('temperature_factor', 0.), data.get('segment_id', ''), string.rjust(data.get('element', ''), 2), data.get('charge', '') ] elif type == 'ANISOU': format = anisou_format u = 1.e4 * data['u'] u = [ int(u[0, 0]), int(u[1, 1]), int(u[2, 2]), int(u[0, 1]), int(u[0, 2]), int(u[1, 2]) ] line = line + [data.get('serial_number', 1), data.get('name'), data.get('alternate', ''), string.rjust(data.get('residue_name'), 3), data.get('chain_id', ''), data.get('residue_number', 1), data.get('insertion_code', '')] \ + u \ + [data.get('segment_id', ''), string.rjust(data.get('element', ''), 2), data.get('charge', '')] elif type == 'TER': format = ter_format line = line + [ data.get('serial_number', 1), string.rjust(data.get('residue_name'), 3), data.get('chain_id', ''), data.get('residue_number', 1), data.get('insertion_code', '') ] elif type == 'CONECT': format = conect_format line = line + [data.get('serial_number')] line = line + (data.get('bonded', []) + 4 * [None])[:4] line = line + (data.get('hydrogen_bonded', []) + 4 * [None])[:4] line = line + (data.get('salt_bridged', []) + 2 * [None])[:2] elif type == 'MODEL': format = model_format line = line + [data.get('serial_number')] elif type == 'HEADER': format = header_format line = line + [ data.get('compound', ''), data.get('date', ''), data.get('pdb_code') ] else: format = generic_format line = line + [data] self.file.write(str(FortranLine(line, format)) + '\n') def writeComment(self, text): """Writes |text| into one or several comment lines. Each line of the text is prefixed with 'REMARK' and written to the file. """ while text: eol = string.find(text, '\n') if eol == -1: eol = len(text) self.file.write('REMARK %s \n' % text[:eol]) text = text[eol + 1:] def writeAtom(self, name, position, occupancy=0.0, temperature_factor=0.0, element=''): """Writes an ATOM or HETATM record using the |name|, |occupancy|, |temperature| and |element| information supplied. The residue and chain information is taken from the last calls to the methods nextResidue() and nextChain(). """ if self.het_flag: type = 'HETATM' else: type = 'ATOM' name = string.upper(name) if element != '' and len(element) == 1 and name and name[0] == element: name = ' ' + name self.data['name'] = name self.data['position'] = position self.data['serial_number'] = (self.data['serial_number'] + 1) % 100000 self.data['occupancy'] = occupancy self.data['temperature_factor'] = temperature_factor self.data['element'] = element self.writeLine(type, self.data) def nextResidue(self, name, number=None, terminus=None): """Signals the beginning of a new residue, starting with the next call to writeAtom(). The residue name is |name|, and a |number| can be supplied optionally; by default residues in a chain will be numbered sequentially starting from 1. The value of |terminus| can be 'None', '"C"', or '"N"'; it is passed to export filters that can use this information in order to use different atom or residue names in terminal residues. """ name = string.upper(name) if self.export_filter is not None: name, number = self.export_filter.processResidue( name, number, terminus) self.het_flag = not (name in amino_acids or name in nucleic_acids) self.data['residue_name'] = name self.data['residue_number'] = (self.data['residue_number'] + 1) % 10000 self.data['insertion_code'] = '' if number is not None: if type(number) is type(0): self.data['residue_number'] = number % 10000 else: self.data['residue_number'] = number.number % 10000 self.data['insertion_code'] = number.insertion_code def nextChain(self, chain_id=None, segment_id=''): """Signals the beginning of a new chain. A chain identifier (string of length one) can be supplied as |chain_id|, by default consecutive letters from the alphabet are used. The equally optional |segment_id| defaults to an empty string. """ if chain_id is None: self.chain_number = (self.chain_number + 1) % len(self._chain_ids) chain_id = self._chain_ids[self.chain_number] if self.export_filter is not None: chain_id, segment_id = \ self.export_filter.processChain(chain_id, segment_id) self.data['chain_id'] = (chain_id + ' ')[:1] self.data['segment_id'] = (segment_id + ' ')[:4] self.data['residue_number'] = 0 _chain_ids = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' def terminateChain(self): "Signals the end of a chain." if self.export_filter is not None: self.export_filter.terminateChain() self.data['serial_number'] = (self.data['serial_number'] + 1) % 100000 self.writeLine('TER', self.data) self.data['chain_id'] = '' self.data['segment_id'] = '' def close(self): """Closes the file. This method *must* be called for write mode because otherwise the file will be incomplete. """ if self.open: if self.output: self.file.write('END\n') self.file.close() self.open = 0 def __del__(self): self.close()
def __init__(self, file, modifications=[]): if isinstance(file, basestring): file = TextFile(file) title = file.readline()[:-1] self.atom_types = DictWithDefault(None) self._readAtomTypes(file) format = FortranFormat('20(A2,2X)') done = False while not done: l = FortranLine(file.readline()[:-1], format) for entry in l: name = _normalizeName(entry) if len(name) == 0: done = True break try: # ignore errors for now self.atom_types[name].hydrophylic = True except: pass self.bonds = {} self._readBondParameters(file) self.bond_angles = {} self._readAngleParameters(file) self.dihedrals = {} self.dihedrals_2 = {} self._readDihedralParameters(file) self.impropers = {} self.impropers_1 = {} self.impropers_2 = {} self._readImproperParameters(file) self.hbonds = {} self._readHbondParameters(file) self.lj_equivalent = {} format = FortranFormat('20(A2,2X)') while True: l = FortranLine(file.readline()[:-1], format) if l.isBlank(): break name1 = _normalizeName(l[0]) for s in l[1:]: name2 = _normalizeName(s) self.lj_equivalent[name2] = name1 self.ljpar_sets = {} while True: l = FortranLine(file.readline()[:-1], 'A4,6X,A2') if l[0] == 'END ': break set_name = _normalizeName(l[0]) ljpar_set = AmberLJParameterSet(set_name, l[1]) self.ljpar_sets[set_name] = ljpar_set self._readLJParameters(file, ljpar_set) file.close() for mod, ljname in modifications: if isinstance(mod, basestring): file = TextFile(mod) else: file = mod title = file.readline()[:-1] blank = file.readline()[:-1] while True: keyword = file.readline() if not keyword: break keyword = keyword.strip()[:4] if keyword == 'MASS': self._readAtomTypes(file) elif keyword == 'BOND': self._readBondParameters(file) elif keyword == 'ANGL': self._readAngleParameters(file) elif keyword == 'DIHE': self._readDihedralParameters(file) elif keyword == 'IMPR': self._readImproperParameters(file) elif keyword == 'HBON': self._readHbondParameters(file) elif keyword == 'NONB': self._readLJParameters(file, self.ljpar_sets[ljname])
class PDBFile: """PDB file with access at the record level Constructor: PDBFile(|filename|, |mode|='"r"'), where |filename| is the file name and |mode| is '"r"' for reading and '"w"' for writing, The low-level file access is handled by the module Scientific.IO.TextFile, therefore compressed files and URLs (for reading) can be used as well. """ def __init__(self, filename, mode = 'r', subformat = None): self.file = TextFile(filename, mode) self.output = string.lower(mode[0]) == 'w' self.export_filter = None if subformat is not None: export = export_filters.get(subformat, None) if export is not None: self.export_filter = export() self.open = 1 if self.output: self.data = {'serial_number': 0, 'residue_number': 0, 'chain_id': '', 'segment_id': ''} self.het_flag = 0 self.chain_number = -1 def readLine(self): """Returns the contents of the next non-blank line (= record). The return value is a tuple whose first element (a string) contains the record type. For supported record types (HEADER, ATOM, HETATM, ANISOU, TERM, MODEL, CONECT), the items from the remaining fields are put into a dictionary which is returned as the second tuple element. Most dictionary elements are strings or numbers; atom positions are returned as a vector, and anisotropic temperature factors are returned as a rank-2 tensor, already multiplied by 1.e-4. White space is stripped from all strings except for atom names, whose correct interpretation can depend on an initial space. For unsupported record types, the second tuple element is a string containing the remaining part of the record. """ while 1: line = self.file.readline() if not line: return ('END','') if line[-1] == '\n': line = line[:-1] line = string.strip(line) if line: break line = string.ljust(line, 80) type = string.strip(line[:6]) if type == 'ATOM' or type == 'HETATM': line = FortranLine(line, atom_format) data = {'serial_number': line[1], 'name': line[2], 'alternate': string.strip(line[3]), 'residue_name': string.strip(line[4]), 'chain_id': string.strip(line[5]), 'residue_number': line[6], 'insertion_code': string.strip(line[7]), 'position': Vector(line[8:11]), 'occupancy': line[11], 'temperature_factor': line[12], 'segment_id': string.strip(line[13]), 'element': string.strip(line[14]), 'charge': string.strip(line[15])} return type, data elif type == 'ANISOU': line = FortranLine(line, anisou_format) data = {'serial_number': line[1], 'name': line[2], 'alternate': string.strip(line[3]), 'residue_name': string.strip(line[4]), 'chain_id': string.strip(line[5]), 'residue_number': line[6], 'insertion_code': string.strip(line[7]), 'u': 1.e-4*Tensor([[line[8], line[11], line[12]], [line[11], line[9] , line[13]], [line[12], line[13], line[10]]]), 'segment_id': string.strip(line[14]), 'element': string.strip(line[15]), 'charge': string.strip(line[16])} return type, data elif type == 'TER': line = FortranLine(line, ter_format) data = {'serial_number': line[1], 'residue_name': string.strip(line[2]), 'chain_id': string.strip(line[3]), 'residue_number': line[4], 'insertion_code': string.strip(line[5])} return type, data elif type == 'CONECT': line = FortranLine(line, conect_format) data = {'serial_number': line[1], 'bonded': filter(lambda i: i > 0, line[2:6]), 'hydrogen_bonded': filter(lambda i: i > 0, line[6:10]), 'salt_bridged': filter(lambda i: i > 0, line[10:12])} return type, data elif type == 'MODEL': line = FortranLine(line, model_format) data = {'serial_number': line[1]} return type, data elif type == 'HEADER': line = FortranLine(line, header_format) data = {'compound': line[1], 'date': line[2], 'pdb_code': line[3]} return type, data else: return type, line[6:] def writeLine(self, type, data): """Writes a line using record type and data dictionary in the same format as returned by readLine(). Default values are provided for non-essential information, so the data dictionary need not contain all entries. """ if self.export_filter is not None: type, data = self.export_filter.processLine(type, data) if type is None: return line = [type] if type == 'ATOM' or type == 'HETATM': format = atom_format position = data['position'] line = line + [data.get('serial_number', 1), data.get('name'), data.get('alternate', ''), string.rjust(data.get('residue_name', ''), 3), data.get('chain_id', ''), data.get('residue_number', 1), data.get('insertion_code', ''), position[0], position[1], position[2], data.get('occupancy', 0.), data.get('temperature_factor', 0.), data.get('segment_id', ''), string.rjust(data.get('element', ''), 2), data.get('charge', '')] elif type == 'ANISOU': format = anisou_format u = 1.e4*data['u'] u = [int(u[0,0]), int(u[1,1]), int(u[2,2]), int(u[0,1]), int(u[0,2]), int(u[1,2])] line = line + [data.get('serial_number', 1), data.get('name'), data.get('alternate', ''), string.rjust(data.get('residue_name'), 3), data.get('chain_id', ''), data.get('residue_number', 1), data.get('insertion_code', '')] \ + u \ + [data.get('segment_id', ''), string.rjust(data.get('element', ''), 2), data.get('charge', '')] elif type == 'TER': format = ter_format line = line + [data.get('serial_number', 1), string.rjust(data.get('residue_name'), 3), data.get('chain_id', ''), data.get('residue_number', 1), data.get('insertion_code', '')] elif type == 'CONECT': format = conect_format line = line + [data.get('serial_number')] line = line + (data.get('bonded', [])+4*[None])[:4] line = line + (data.get('hydrogen_bonded', [])+4*[None])[:4] line = line + (data.get('salt_bridged', [])+2*[None])[:2] elif type == 'MODEL': format = model_format line = line + [data.get('serial_number')] elif type == 'HEADER': format = header_format line = line + [data.get('compound', ''), data.get('date', ''), data.get('pdb_code')] else: format = generic_format line = line + [data] self.file.write(str(FortranLine(line, format)) + '\n') def writeComment(self, text): """Writes |text| into one or several comment lines. Each line of the text is prefixed with 'REMARK' and written to the file. """ while text: eol = string.find(text,'\n') if eol == -1: eol = len(text) self.file.write('REMARK %s \n' % text[:eol]) text = text[eol+1:] def writeAtom(self, name, position, occupancy=0.0, temperature_factor=0.0, element=''): """Writes an ATOM or HETATM record using the |name|, |occupancy|, |temperature| and |element| information supplied. The residue and chain information is taken from the last calls to the methods nextResidue() and nextChain(). """ if self.het_flag: type = 'HETATM' else: type = 'ATOM' name = string.upper(name) if element != '' and len(element) == 1 and name and name[0] == element: name = ' ' + name self.data['name'] = name self.data['position'] = position self.data['serial_number'] = (self.data['serial_number'] + 1) % 100000 self.data['occupancy'] = occupancy self.data['temperature_factor'] = temperature_factor self.data['element'] = element self.writeLine(type, self.data) def nextResidue(self, name, number = None, terminus = None): """Signals the beginning of a new residue, starting with the next call to writeAtom(). The residue name is |name|, and a |number| can be supplied optionally; by default residues in a chain will be numbered sequentially starting from 1. The value of |terminus| can be 'None', '"C"', or '"N"'; it is passed to export filters that can use this information in order to use different atom or residue names in terminal residues. """ name = string.upper(name) if self.export_filter is not None: name, number = self.export_filter.processResidue(name, number, terminus) self.het_flag = not (name in amino_acids or name in nucleic_acids) self.data['residue_name'] = name self.data['residue_number'] = (self.data['residue_number'] + 1) % 10000 self.data['insertion_code'] = '' if number is not None: if type(number) is type(0): self.data['residue_number'] = number % 10000 else: self.data['residue_number'] = number.number % 10000 self.data['insertion_code'] = number.insertion_code def nextChain(self, chain_id = None, segment_id = ''): """Signals the beginning of a new chain. A chain identifier (string of length one) can be supplied as |chain_id|, by default consecutive letters from the alphabet are used. The equally optional |segment_id| defaults to an empty string. """ if chain_id is None: self.chain_number = (self.chain_number + 1) % len(self._chain_ids) chain_id = self._chain_ids[self.chain_number] if self.export_filter is not None: chain_id, segment_id = \ self.export_filter.processChain(chain_id, segment_id) self.data['chain_id'] = (chain_id+' ')[:1] self.data['segment_id'] = (segment_id+' ')[:4] self.data['residue_number'] = 0 _chain_ids = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' def terminateChain(self): "Signals the end of a chain." if self.export_filter is not None: self.export_filter.terminateChain() self.data['serial_number'] = (self.data['serial_number'] + 1) % 100000 self.writeLine('TER', self.data) self.data['chain_id'] = '' self.data['segment_id'] = '' def close(self): """Closes the file. This method *must* be called for write mode because otherwise the file will be incomplete. """ if self.open: if self.output: self.file.write('END\n') self.file.close() self.open = 0 def __del__(self): self.close()
small_cages.append(Geo.Vector(x, y, z)) elif cage_type == "Kr(Large)": # Convert coordinates to vectors for the Large Cages large_cages.append(Geo.Vector(x, y, z)) else: raise CorrelationError( "Can't parse file. Encountered strange cage centre type: " + cage_type + " Only 'Ar(Small)' and 'Kr(Large)' accepted." ) xyz_file.close() print "Read in Cage Centre locations. There are %d small and %d large cages." % (len(small_cages), len(large_cages)) ############################################################################### # Generate 27 duplicates of the unit cell to make a 3X3 cube of unit cells # - Do this so that all of the cages in the central unit cell are fully formed ############################################################################### Hyd_cell = [] Oxy_cell = [] for i in range(-1, 2): for j in range(-1, 2): for k in range(-1, 2): # print "###################################################################################"
def __init__(self, file, modifications=[]): if isinstance(file, str): file = TextFile(file) title = file.readline()[:-1] self.atom_types = DictWithDefault(None) self._readAtomTypes(file) format = FortranFormat('20(A2,2X)') done = 0 while not done: l = FortranLine(file.readline()[:-1], format) for entry in l: name = _normalizeName(entry) if len(name) == 0: done = 1 break try: # ignore errors for now self.atom_types[name].hydrophylic = 1 except: pass self.bonds = {} self._readBondParameters(file) self.bond_angles = {} self._readAngleParameters(file) self.dihedrals = {} self.dihedrals_2 = {} self._readDihedralParameters(file) self.impropers = {} self.impropers_1 = {} self.impropers_2 = {} self._readImproperParameters(file) self.hbonds = {} self._readHbondParameters(file) self.lj_equivalent = {} format = FortranFormat('20(A2,2X)') while 1: l = FortranLine(file.readline()[:-1], format) if l.isBlank(): break name1 = _normalizeName(l[0]) for s in l[1:]: name2 = _normalizeName(s) self.lj_equivalent[name2] = name1 self.ljpar_sets = {} while 1: l = FortranLine(file.readline()[:-1], 'A4,6X,A2') if l[0] == 'END ': break set_name = _normalizeName(l[0]) ljpar_set = AmberLJParameterSet(set_name, l[1]) self.ljpar_sets[set_name] = ljpar_set self._readLJParameters(file, ljpar_set) file.close() for mod, ljname in modifications: if isinstance(mod, str): file = TextFile(mod) else: file = mod title = file.readline()[:-1] blank = file.readline()[:-1] while 1: keyword = file.readline() if not keyword: break keyword = string.strip(keyword)[:4] if keyword == 'MASS': self._readAtomTypes(file) elif keyword == 'BOND': self._readBondParameters(file) elif keyword == 'ANGL': self._readAngleParameters(file) elif keyword == 'DIHE': self._readDihedralParameters(file) elif keyword == 'IMPR': self._readImproperParameters(file) elif keyword == 'HBON': self._readHbondParameters(file) elif keyword == 'NONB': self._readLJParameters(file, self.ljpar_sets[ljname])
if (cage_type == "Ar(Small)"): #Convert coordinates to vectors for the Small Cages small_cages.append(Geo.Vector(x,y,z)) elif (cage_type == "Kr(Large)"): #Convert coordinates to vectors for the Large Cages large_cages.append(Geo.Vector(x,y,z)) else: raise CorrelationError("Can't parse file. Encountered strange cage centre type: " + cage_type + " Only 'Ar(Small)' and 'Kr(Large)' accepted.") xyz_file.close() print "Read in Cage Centre locations. There are %d small and %d large cages." % (len(small_cages), len(large_cages)) ############################################################################### #Generate 27 duplicates of the unit cell to make a 3X3 cube of unit cells # - Do this so that all of the cages in the central unit cell are fully formed ############################################################################### Hyd_cell = [] Oxy_cell = [] for i in range(-1, 2): for j in range(-1, 2): for k in range(-1, 2): #print "###################################################################################"