def OpenRepository(repositoryLocation): from xml.etree.cElementTree import parse as xmlparse global _packages # Check repository for latest primary.xml with urlopen(repositoryLocation + 'repodata/repomd.xml') as metadata: doctree = xmlparse(metadata) xmlns = 'http://linux.duke.edu/metadata/repo' for element in doctree.findall('{%s}data' % xmlns): if element.get('type') == 'primary': primaryUrl = element.find('{%s}location' % xmlns).get('href') # Make sure all the cache directories exist for dir in _packageCacheDirectory, _repositoryCacheDirectory, _extractedCacheDirectory: try: os.makedirs(dir) except OSError: pass # Download repository metadata (only if not already in cache) primaryFilename = os.path.join( _repositoryCacheDirectory, os.path.splitext(os.path.basename(primaryUrl))[0]) if not os.path.exists(primaryFilename): warning('Dowloading repository data') with urlopen(repositoryLocation + primaryUrl) as primaryGzFile: import io, gzip primaryGzString = io.BytesIO( primaryGzFile.read()) #3.2: use gzip.decompress with gzip.GzipFile(fileobj=primaryGzString) as primaryGzipFile: with open(primaryFilename, 'wb') as primaryFile: primaryFile.writelines(primaryGzipFile) elements = xmlparse(primaryFilename) # Parse package list from XML xmlns = 'http://linux.duke.edu/metadata/common' rpmns = 'http://linux.duke.edu/metadata/rpm' _packages = [{ 'name': p.find('{%s}name' % xmlns).text, 'arch': p.find('{%s}arch' % xmlns).text, 'buildtime': int(p.find('{%s}time' % xmlns).get('build')), 'url': repositoryLocation + p.find('{%s}location' % xmlns).get('href'), 'filename': os.path.basename(p.find('{%s}location' % xmlns).get('href')), 'provides': { provides.attrib['name'] for provides in p.findall('{%s}format/{%s}provides/{%s}entry' % (xmlns, rpmns, rpmns)) }, 'requires': { req.attrib['name'] for req in p.findall('{%s}format/{%s}requires/{%s}entry' % (xmlns, rpmns, rpmns)) } } for p in elements.findall('{%s}package' % xmlns)]
def svn(self, *cmd): pipe = Popen(self.pre + list(cmd) + [self.root], stdout=PIPE) try: data = pipe.communicate()[0] except IOError: data = "" return xmlparse(StringIO(data))
def parse_preprocessed_xml(fileh): pair = None etree = xmlparse(fileh) pairs = [] for pair in etree.iterfind('pair'): pairs.append(Pair(pair)) return pairs
def OpenRepository(repositoryLocation, arch=u'noarch'): from xml.etree.cElementTree import parse as xmlparse global _packages # Check repository for latest primary.xml myurl = repositoryLocation + u'repodata/repomd.xml' metadata = urlopen(myurl) doctree = xmlparse(metadata) xmlns = u'http://linux.duke.edu/metadata/repo' for element in doctree.findall(u'{%s}data'%xmlns): if element.get(u'type') == u'primary': primaryUrl = element.find(u'{%s}location'%xmlns).get(u'href') # Make sure all the cache directories exist for dir in _packageCacheDirectory, _repositoryCacheDirectory, _extractedCacheDirectory: try: os.makedirs(dir) except OSError: pass # Download repository metadata (only if not already in cache) primaryFilename = os.path.join(_repositoryCacheDirectory, os.path.splitext(os.path.basename(primaryUrl))[0]) if not os.path.exists(primaryFilename): warning(u'Dowloading repository data') mypriurl = repositoryLocation + primaryUrl primaryGzFile = urlopen(mypriurl) if primaryGzFile: import io, gzip primaryGzString = io.BytesIO(primaryGzFile.read()) #3.2: use gzip.decompress with gzip.GzipFile(fileobj=primaryGzString) as primaryGzipFile: with open(primaryFilename, u'wb') as primaryFile: primaryFile.writelines(primaryGzipFile) elements = xmlparse(primaryFilename) # Parse package list from XML xmlns = u'http://linux.duke.edu/metadata/common' rpmns = u'http://linux.duke.edu/metadata/rpm' _packages = [{ u'name': p.find(u'{%s}name'%xmlns).text, u'buildtime': int(p.find(u'{%s}time'%xmlns).get(u'build')), u'url': repositoryLocation + p.find(u'{%s}location'%xmlns).get(u'href'), u'filename': os.path.basename(p.find(u'{%s}location'%xmlns).get(u'href')), u'provides': set(provides.attrib[u'name'] for provides in p.findall(u'{%s}format/{%s}provides/{%s}entry'%(xmlns,rpmns,rpmns))), u'requires': set(req.attrib[u'name'] for req in p.findall(u'{%s}format/{%s}requires/{%s}entry'%(xmlns,rpmns,rpmns))) } for p in elements.findall(u'{%s}package'%xmlns) if p.find(u'{%s}arch'%xmlns).text == arch]
def emit(header, data, out): out.write('clrmamepro (\n') out.write(' name "{}"\n'.format(header['name'])) out.write(' version {}\n'.format(header['version'])) out.write(')\n\n') for crc, game in data.iteritems(): out.write('game (\n') out.write(u' name "{}"\n'.format(game['description'])) if 'year' in game: out.write(' year "{}"\n'.format(game['year'])) if 'manufacturer' in game: out.write(u' developer "{}"\n'.format(game['manufacturer'])) for rom in filter(lambda r: r['crc'] == crc, game['roms']): if 'sha1' in rom: out.write( ' rom ( name {name} size {size} crc {crc} sha1 {sha1} )\n' .format(**rom)) else: out.write( ' rom ( name {name} size {size} crc {crc} )\n'. format(**rom)) out.write(')\n\n') data = xmlparse(sys.argv[1]).getroot() with codecs.open(sys.argv[2], 'w', 'utf-8') as out: emit(header(data), crcmap(machines(data)), out)
if unique is not None: info[unique] = machine return info def emit(header, data, out): out.write('clrmamepro (\n') out.write(' name "{}"\n'.format(header['name'])) out.write(' version {}\n'.format(header['version'])) out.write(')\n\n') for crc, game in data.iteritems(): out.write('game (\n') out.write(u' name "{}"\n'.format(game['description'])) if 'year' in game: out.write(' year "{}"\n'.format(game['year'])) if 'manufacturer' in game: out.write(u' developer "{}"\n'.format(game['manufacturer'])) for rom in filter(lambda r: r['crc'] == crc, game['roms']): if 'sha1' in rom: out.write(' rom ( name {name} size {size} crc {crc} sha1 {sha1} )\n'.format(**rom)) else: out.write(' rom ( name {name} size {size} crc {crc} )\n'.format(**rom)) out.write(')\n\n') data = xmlparse(sys.argv[1]).getroot() with codecs.open(sys.argv[2], 'w', 'utf-8') as out: emit(header(data), crcmap(machines(data)), out)