def StartWork(self): StatusManager.StatusManager.pushServer=self req_GetClientId = ConnectStrings.GetString_ServerHeader()+'?'+ConnectStrings.GetString_GetClientId() print req_GetClientId getClientIdHandler=GetClientIdHandler.GetClientIdHandler() parser=make_parser() parser.setContentHandler(getClientIdHandler) parser.parse(req_GetClientId) print StatusManager.StatusManager.clientId req_AddStream=ConnectStrings.GetString_ServerHeader()+'?'+ConnectStrings.GetString_AddToStream() print req_AddStream addToStreamHandler=AddToStreamHandler.AddToStreamHandler() parser2=make_parser() parser2.setContentHandler(addToStreamHandler) url=req_AddStream StatusManager.StatusManager.stream=urllib2.urlopen(url) tagList=[] str='' while 1: char=StatusManager.StatusManager.stream.read(1) if char=='<': nextChar=StatusManager.StatusManager.stream.read(1) while nextChar!='>': str+=nextChar nextChar=StatusManager.StatusManager.stream.read(1) XmlProcess.ProcessNode(str) str=''
def tree_from_stream(stream, norm_sp=1, ext_ges=0, ext_pes=0, include_comment=1, encoding='UTF-8', html=0): """ create internal tree from xml stream (open file or IOString) if norm_sp = 1, normalize space and new line """ from xml.sax import make_parser, SAXNotRecognizedException from xml.sax.handler import feature_namespaces, feature_external_ges, \ feature_external_pes, property_lexical_handler from parser import SaxHandler handler = SaxHandler(norm_sp, include_comment, encoding) if html: parser = make_parser(["xml.sax.drivers2.drv_sgmlop_html"]) else: parser = make_parser() # do not perform Namespace processing parser.setFeature(feature_namespaces, 0) # do not include any external entities try: parser.setFeature(feature_external_ges, ext_ges) #xml.sax._exceptions. except SAXNotRecognizedException: print 'Unable to set feature external ges' try: parser.setFeature(feature_external_pes, ext_pes) #xml.sax._exceptions. except SAXNotRecognizedException: print 'Unable to set feature external pes' # add lexical handler for comments, entities, dtd and cdata parser.setProperty(property_lexical_handler, handler) parser.setContentHandler(handler) parser.parse(stream) return handler.get_tree()
def upload_annotations_file(request, project_id=None): if not request.user.is_superuser: return HttpResponseForbidden("Error: You must be an administrator to use this form") if not request.FILES.has_key('fileToUpload') or not request.FILES['fileToUpload']: return projectEdit(request,project_id=project_id,error="Please select a file to upload (supported formats are .zip and .xml).") f = request.FILES['fileToUpload'] if (not f.name.endswith('.xml')) and (not f.name.endswith('.zip')): return projectEdit(request,project_id=project_id,error="Please select a file to upload (supported formats are .zip and .xml).") if fnmatch.fnmatch(f.name, '*.xml'): parser = make_parser() curHandler = XmlImportHandler(project_id) parser.setContentHandler(curHandler) data = "" for chunk in f.chunks(): data += chunk parser.feed(data) if fnmatch.fnmatch(f.name, '*.zip'): zipdata = "" for chunk in f.chunks(): zipdata += chunk zip = zipfile.ZipFile(StringIO.StringIO(zipdata)) for file in zip.namelist(): parser = make_parser() curHandler = XmlImportHandler(project_id) parser.setContentHandler(curHandler) #pdb.set_trace() if file.find("__MACOSX/") == -1: parser.feed(zip.read(file)) zip.close() return projectEdit(request, project_id=project_id)
def get_objects_recursive(self, objtype, ids=[], recursive=False): """ Recursively get all osm objects that are listed in the ids. If recursive=False, then you get only the objects that are directly referenced in relations. If recursive=True, then you get all hierarchically referenced from the relations. """ relationids = set([]) wayids = set([]) nodeids = set([]) relationdata, waydata, nodedata = '','','' if objtype == 'node': nodeids = set(ids) elif objtype == 'way': wayids = set(ids) elif objtype == 'relation': relationids = set(ids) else: return "" if recursive: recursions = 100 # maximum recursion level else: recursions = 1 # only get all direct members loaded_relationids = set([]) while relationids: r_data = self.get_objects('relation', relationids) relationdata += '\n' + r_data if not recursions: break else: recursions -= 1 parser = make_parser() osm_handler = SubobjectHandler() parser.setContentHandler(osm_handler) parseString(OSMHEAD + r_data + OSMTAIL, osm_handler) nodeids |= osm_handler.nodes wayids |= osm_handler.ways loaded_relationids |= relationids relationids = osm_handler.relations - loaded_relationids if wayids: waydata = self.get_objects('way', wayids) parser = make_parser() osm_handler = SubobjectHandler() parser.setContentHandler(osm_handler) parseString(OSMHEAD + waydata + OSMTAIL, osm_handler) nodeids |= osm_handler.nodes if nodeids: nodedata = self.get_objects('node', nodeids) return nodedata + waydata + relationdata
def __init__(self): """ Create a new Definition Object """ self.map = {} self.patches = {} handler = DefinitionHandler() sax.make_parser() for path in [AAPATH, NAPATH]: defpath = getDatFile(path) if defpath == "": raise PDBInternalError("%s not found!" % path) acidFile = open(defpath) sax.parseString(acidFile.read(), handler) acidFile.close() self.map.update(handler.map) # Now handle patches defpath = getDatFile(PATCHPATH) if defpath == "": raise PDBInternalError("%s not found!" % PATCHPATH) handler.map = {} patchFile = open(defpath) sax.parseString(patchFile.read(), handler) patchFile.close() # Apply specific patches to the reference object, allowing users # to specify protonation states in the PDB file for patch in handler.patches: if patch.newname != "": # Find all residues matching applyto resnames = self.map.keys() for name in resnames: regexp = re.compile(patch.applyto).match(name) if not regexp: continue newname = patch.newname.replace("*", name) self.addPatch(patch, name, newname) # Either way, make sure the main patch name is available self.addPatch(patch, patch.applyto, patch.name)
def parse(filename): """ Interprets the given string as a filename, URL or XML data string, parses it and returns a Python object which represents the given document. Raises ``ValueError`` if the argument is None / empty string. Raises ``xml.sax.SAXParseException`` if something goes wrong during parsing. """ if (filename is None or (is_string(filename) and filename.strip()) == ''): raise ValueError('parse() takes a filename, URL or XML string') parser = make_parser() sax_handler = Handler() parser.setContentHandler(sax_handler) if is_string(filename) and (os.path.exists(filename) or is_url(filename)): parser.parse(filename) else: if hasattr(filename, 'read'): parser.parse(filename) else: parser.parse(StringIO(filename)) return sax_handler.root
def seed(fileobj): seeder = Seeder() parser = make_parser() parser.setFeature(feature_namespaces, 1) parser.setContentHandler(seeder) parser.parse(fileobj) return seeder.result
def scan(self): """Parse the *.qrc* files""" if not has_xml: Logs.error('no xml support was found, the rcc dependencies will be incomplete!') return ([], []) parser = make_parser() curHandler = XMLHandler() parser.setContentHandler(curHandler) fi = open(self.inputs[0].abspath(), 'r') try: parser.parse(fi) finally: fi.close() nodes = [] names = [] root = self.inputs[0].parent for x in curHandler.files: nd = root.find_resource(x) if nd: nodes.append(nd) else: names.append(x) return (nodes, names)
def readDotGraphStyles(self): p = make_parser() h = rocks.profile.RollHandler() map = {} for file in os.listdir('.'): tokens = os.path.splitext(file) if len(tokens) != 2: continue name = tokens[0] ext = tokens[1] tokens = string.split(name, '-') if len(tokens) < 2: continue prefix = tokens[0] if prefix == 'roll' and \ ext == '.xml' and \ os.path.isfile(file): fin = open(file, 'r') p.setContentHandler(h) p.parse(fin) fin.close() r = h.getRollName() map[r] = rocks.util.Struct() map[r].edgeColor = h.getEdgeColor() map[r].nodeColor = h.getNodeColor() map[r].nodeShape = h.getNodeShape() return map
def analyze (request,recurso): theParser = make_parser() theHandler = myContentHandler() theParser.setContentHandler(theHandler) file = urllib2.urlopen( 'http://barrapunto.com/index.rss') theParser.parse(file) theHandler.htmlFile.close() print "Parse complete" if request.method == 'GET': index = open("index.html","r") html=index.read() try: record = Table.objects.get(resource=recurso) return HttpResponse(record.name+"<ul>"+html.decode('utf-8')+"</ul>") except Table.DoesNotExist: return HttpResponseNotFound('Page not found:') elif request.method == 'PUT': record = Table(resource= recurso,name =request.body) record.save() return HttpResponse('<h1>Actualizando.../h1>'+ request.body)
def start(self): parser = make_parser() parser.setFeature(feature_namespaces, 0) parser.setContentHandler(self) parser.parse(self.fname) # Before we write out the config file we need to create another # VLAN which contains all the ports which have not been included # in any vlans. The switch seems to require this. ports = {} for i in range(1, hp_ports + 1): ports[i] = 0 for i in self.vlans: for j in i[1]: ports[port2portid(j)] = 1 port = () for i in range(1, hp_ports + 1): if 0 == ports[i]: port = port + (portid2port(i),) self.vlans = self.vlans + \ (((("OTHER", "0.0.0.0", "0.0.0.0"),) + \ (port,)),) self.spit()
def __init__(self, filename): """ Just initialize everything to zero. """ self.__tables = {} self.__dependencies = {} self.intable = None self.buffer = '' self.column = 0 self.column_name = 0 self.column_type = 0 self.invalue = 0 self.intable = 0 self.table_name = 0 self.comment = 0 self.tableid = 0 self.dep_name = 0 self.indep =0 self.system = None parser = make_parser() parser.setContentHandler(self) try: parser.parse(open(filename)) except: print '=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=' print 'Failure in XML parsing!' print 'Did you remember to save the Dia file without compression...?' print '=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=' traceback.print_exc()
def load_costs(self, dumpfile, iteration, weight): # load costs from dumpfile and update memory according to weight and iteration if weight <= 0: sys.stderr.write("Skipped loading of costs because the weight was %s but should have been > 0\n" % weight) return assert(weight > 0) if self.iteration == None and iteration != 0: print "Warning: continuing with empty memory" # update memory weights. memory is a weighted average across all runs self.memory_factor = self.memory_weight / (self.memory_weight + weight) self.memory_weight += weight self.iteration = iteration self.errors = [] # mark all edges as unseen for edges in self.intervals.itervalues(): for edgeMemory in edges.itervalues(): edgeMemory.seen = False # parse costs self.num_loaded = 0 parser = make_parser() parser.setContentHandler(self) parser.parse(dumpfile) # decay costs of unseen edges self.num_decayed = 0 for edges in self.intervals.itervalues(): for edgeMemory in edges.itervalues(): if edgeMemory.decay_unseen(self.memory_factor): self.num_decayed += 1
def __init__(self): PluginObject.__init__(self,self.__class__.__name__) self.parser = make_parser() self.curHandler = ResourceHandler() self.parsedXML = "" self.totalResources = [] self.parser.setContentHandler(self.curHandler)
def import_data(self, metadata, output): "Imports places data from OpenStreetMap" old_etag = metadata.get('etag', '') request = AnyMethodRequest(self._url, method='HEAD') response = urllib2.urlopen(request) new_etag = response.headers['ETag'][1:-1] self.output = output if False and new_etag == old_etag: output.write('OSM data not updated. Not updating.\n') return p = subprocess.Popen([self.SHELL_CMD % self._url], shell=True, stdin=subprocess.PIPE, stdout=subprocess.PIPE, close_fds=True) parser = make_parser() parser.setContentHandler(OSMHandler(self._get_source(), self._get_entity_types(), lambda tags, type_list=None: self._find_types(tags, self._osm_tags if type_list is None else type_list), output, self._lat_north, self._lat_south, self._lon_west, self._lon_east)) parser.parse(p.stdout) for lang_code, lang_name in settings.LANGUAGES: with override(lang_code): self.disambiguate_titles(self._get_source()) return { 'etag': new_etag, }
def create(self, request, vendor, name, version): format = request.POST.get('format', 'default') # Get the xml containing the tags from the request tags_xml = request.POST.get('tags_xml') tags_xml = tags_xml.encode("utf-8") # Parse the xml containing the tags parser = make_parser() handler = TagsXMLHandler() # Tell the parser to use our handler parser.setContentHandler(handler) # Parse the input inpsrc = InputSource() inpsrc.setByteStream(StringIO(tags_xml)) parser.parse(inpsrc) # Get the resource's id for those vendor, name and version resource = get_object_or_404(CatalogueResource, short_name=name, vendor=vendor, version=version) # Insert the tags for these resource and user in the database for e in handler._tags: tag_resource(request.user, e, resource) return get_tag_response(resource, request.user, format)
def readficheroconfig(): parser = make_parser() ParserDTD = uaserver.ParserDTD() parser.setContentHandler(ParserDTD) parser.parse(open(FICHEROCONFIG)) TAGS = ParserDTD.get_tags() return TAGS
def parse_file_sax(infile): """expects an open file as 'infile' this function takes care of closing the handle """ from xml.sax import make_parser ## instantiate the XML handler handler = SFLDXMLHandler() parser = make_parser() ## associate the handler with the parser parser.setContentHandler(handler) #infile = open(file,'r') ## actually parse the file parser.parse(infile) infile.close() local = [] fetch = [] cmds = [] struct = handler.getStructure() if struct[1].strip(): loc = DBPuppet.getURL(struct[1], "%s.pdb" % struct[0]) ## append the name of the file you will write to the 'local' list local.append(loc) else: fetch.append("%s" % str(struct[0]) ) cmds = handler.getCmds() ## open the files.. openInChimera(local, fetch, cmds)
def parseSelect(inFileName): infile = file(inFileName, 'r') topElementName = None parser = make_parser() documentHandler = SaxSelectorHandler() parser.setContentHandler(documentHandler) try: try: parser.parse(infile) except StopIteration: topElementName = documentHandler.getTopElementName() if topElementName is None: raise RuntimeError, 'no top level element' topElementName = topElementName.replace('-', '_').replace(':', '_') if topElementName not in globals(): raise RuntimeError, 'no class for top element: %s' % topElementName topElement = globals()[topElementName] infile.seek(0) doc = minidom.parse(infile) finally: infile.close() rootNode = doc.childNodes[0] rootObj = topElement.factory() rootObj.build(rootNode) # Enable Python to collect the space used by the DOM. doc = None sys.stdout.write('<?xml version="1.0" ?>\n') rootObj.export(sys.stdout, 0) return rootObj
def proxyKickstart(self): try: fin = open('nodes.xml', 'r') except IOError: raise KickstartError, 'cannot kickstart external hosts' parser = make_parser() handler = NodesHandler() parser.setContentHandler(handler) parser.parse(fin) fin.close() try: server, client, path = \ handler.getServer(self.clientName) except TypeError: raise KickstartError, \ "unknown host (not found in nodes.xml)", \ self.clientName if not path: path = 'install' url = 'http://%s/%s/kickstart.cgi?client=%s' % (server, path, client) cmd = 'wget -qO- %s' % url for line in os.popen(cmd).readlines(): self.report.append(line[:-1]) return
def filter_capabilities(content, role_id, wms, wms_url, headers, proxies): if proxies: # pragma: no cover enable_proxies(proxies) wms_structure = _wms_structure(wms_url, headers.get("Host", None)) tmp_private_layers = list(get_private_layers()) for name in get_protected_layers(role_id): tmp_private_layers.remove(name) private_layers = set() for layer in tmp_private_layers: private_layers.add(layer) if layer in wms_structure: private_layers.update(wms_structure[layer]) parser = sax.make_parser() result = StringIO() downstream_handler = XMLGenerator(result, "utf-8") filter_handler = _CapabilitiesFilter( parser, downstream_handler, u"Layer" if wms else u"FeatureType", layers_blacklist=private_layers ) # skip inclusion of DTDs parser.setFeature(sax.handler.feature_external_ges, False) parser.setFeature(sax.handler.feature_external_pes, False) filter_handler.parse(StringIO(content)) return unicode(result.getvalue(), "utf-8")
def get(self, pFilename, pDictionary=None): """ Return a dictionary (or all dictionaries) stored in a certain XML file. @type pFilename: string @param pFilename: the file name where to get the dictionary (or all dictionaries). @type pDictionary: list @param pDictionary: the dictionary to be returned (if None, then all existing dictionaries stored in the XML file will be returned). @return: status, dictionaries @rtype: int (status), list (containing a single dictionary) or dictionary (containing two or more dictionaries). """ try: xmlHandler = _XMLHandler() parser = make_parser() parser.setContentHandler(xmlHandler) parser.parse(str(pFilename)) dictionary = xmlHandler.getDictionary() if pDictionary is None: if len(dictionary) == 1: return 0, dictionary[dictionary.keys()[0]] else: return 0, dictionary else: pDictionary = str(pDictionary) if dictionary.has_key(pDictionary): return 0, dictionary[pDictionary] else: return 0, [] except Exception: return - 1, []
def limit_featurecollection(content, limit=200): """ Parse a WFS FeatureCollection XML string and produce a similar string with at most 200 features. """ parser = make_parser() _input = BytesIO(content) input_source = InputSource() input_source.setByteStream(_input) output = StringIO() downstream = XMLGenerator(output, 'utf-8') _filter = _XMLFilterLimit(parser, downstream, limit=limit) _filter.parse(input_source) result = output.getvalue() _input.close() output.close() return result
def run(self, params, args): if len(args): filename = args[0] try: file = open(filename, 'r') except: self.abort('cannot open file', filename) xml = string.join(file.readlines()) else: self.abort('no input') parser = make_parser() handler = LayoutHandler() parser.setContentHandler(handler) parser.feed(xml) displays = handler.getDisplays() (maxY, maxX) = handler.getGeometry() if self.db.execute('select * from tiles') > 0: self.abort('tiles already defined') for x in range(0, maxX): for y in range(maxY -1, -1, -1): i = (x*maxY)+y self.insertDisplay(displays[i], x, maxY-y-1)
def read(self, filename): self.publications = [] self.authors = [] self.author_idx = {} self.min_year = None self.max_year = None handler = DocumentHandler(self) parser = make_parser() parser.setContentHandler(handler) infile = open(filename, "r") valid = True try: parser.parse(infile) except SAXException as e: valid = False print "Error reading file (" + e.getMessage() + ")" infile.close() for p in self.publications: if self.min_year == None or p.year < self.min_year: self.min_year = p.year if self.max_year == None or p.year > self.max_year: self.max_year = p.year self.authors_graph = self._build_authors_graph() return valid
def test_5027_1(self): # The xml prefix (as in xml:lang below) is reserved and bound by # definition to http://www.w3.org/XML/1998/namespace. XMLGenerator had # a bug whereby a KeyError is thrown because this namespace is missing # from a dictionary. # # This test demonstrates the bug by parsing a document. test_xml = StringIO( '<?xml version="1.0"?>' '<a:g1 xmlns:a="http://example.com/ns">' '<a:g2 xml:lang="en">Hello</a:g2>' '</a:g1>') parser = make_parser() parser.setFeature(feature_namespaces, True) result = StringIO() gen = XMLGenerator(result) parser.setContentHandler(gen) parser.parse(test_xml) self.assertEqual(result.getvalue(), start + ( '<a:g1 xmlns:a="http://example.com/ns">' '<a:g2 xml:lang="en">Hello</a:g2>' '</a:g1>'))
def __init__(self, arquivo_mapa): parser = sax.make_parser() self.tmxhandler = TMXHandler(arquivo_mapa) parser.setContentHandler(self.tmxhandler) print 'Carregando o mapa %s ...' % (arquivo_mapa,) parser.parse(arquivo_mapa) print 'Pronto !'
def parse(opts): """ Entry point for XML Schema parsing into an OME Model. """ # The following two statements are required to "prime" the generateDS # code and ensure we have reasonable namespace support. filenames = opts.args namespace = opts.namespace logging.debug("Namespace: %s" % namespace) set_type_constants(namespace) generateDS.generateDS.XsdNameSpace = namespace logging.debug("Type map: %s" % opts.lang.type_map) parser = sax.make_parser() ch = XschemaHandler() parser.setContentHandler(ch) for filename in filenames: parser.parse(filename) root = ch.getRoot() if root is None: raise ModelProcessingError( "No model objects found, have you set the correct namespace?") root.annotate() return OMEModel.process(ch, opts)
def readTable(path, name, callback): parser = make_parser() handler = SXTableHandler(name, callback) parser.setContentHandler(handler) f = os.path.join(path, "%s.xml" % name) parser.parse(f)
def __init__(self, parent, config): C3Object.__init__(self, parent, config) self.parser = make_parser() self.inputSource = SaxInput() self.errorHandler = ErrorHandler() self.parser.setErrorHandler(self.errorHandler) self.parser.setContentHandler(self)
def create_parser(self): parser = make_parser() parser.setContentHandler(self) return parser
def __init__(self, session, showSteps=True, showStepSlider=True, showList=True, showConfig=True): Screen.__init__(self, session) self.isLastWizard = False # can be used to skip a "goodbye"-screen in a wizard self.stepHistory = [] self.wizard = {} parser = make_parser() if not isinstance(self.xmlfile, list): self.xmlfile = [self.xmlfile] print("[Wizard] Reading ", self.xmlfile) wizardHandler = self.parseWizard(self.wizard) parser.setContentHandler(wizardHandler) for xmlfile in self.xmlfile: if xmlfile[0] != '/': parser.parse(eEnv.resolve('${datadir}/enigma2/') + xmlfile) else: parser.parse(xmlfile) self.showSteps = showSteps self.showStepSlider = showStepSlider self.showList = showList self.showConfig = showConfig self.numSteps = len(self.wizard) self.currStep = self.getStepWithID("start") + 1 self.timeoutTimer = eTimer() self.timeoutTimer.callback.append(self.timeoutCounterFired) self["text"] = Label() if showConfig: self["config"] = ConfigList([], session=session) if self.showSteps: self["step"] = Label() if self.showStepSlider: self["stepslider"] = Slider(1, self.numSteps) if self.showList: self.list = [] self["list"] = List(self.list, enableWrapAround=True) self["list"].onSelectionChanged.append(self.selChanged) #self["list"] = MenuList(self.list, enableWrapAround = True) self.onShown.append(self.updateValues) self.configInstance = None self.currentConfigIndex = None Wizard.instance = self self.lcdCallbacks = [] self.disableKeys = False self["actions"] = NumberActionMap( [ "WizardActions", "NumberActions", "ColorActions", "SetupActions", "InputAsciiActions", "KeyboardInputActions" ], { "gotAsciiCode": self.keyGotAscii, "ok": self.ok, "back": self.back, "left": self.left, "right": self.right, "up": self.up, "down": self.down, "red": self.red, "green": self.green, "yellow": self.yellow, "blue": self.blue, "deleteBackward": self.deleteBackward, "deleteForward": self.deleteForward, "1": self.keyNumberGlobal, "2": self.keyNumberGlobal, "3": self.keyNumberGlobal, "4": self.keyNumberGlobal, "5": self.keyNumberGlobal, "6": self.keyNumberGlobal, "7": self.keyNumberGlobal, "8": self.keyNumberGlobal, "9": self.keyNumberGlobal, "0": self.keyNumberGlobal }, -1) self["VirtualKB"] = NumberActionMap( ["VirtualKeyboardActions"], { "showVirtualKeyboard": self.KeyText, }, -2) self["VirtualKB"].setEnabled(False)
def parserDblpXml(source, result): handler = mHandler(result) parser = make_parser() parser.setContentHandler(handler) parser.parse(source)
self.last_comment = None def comment(self, comment): if comment.find("TRANSLATORS:") != -1: self.last_comment = comment def startElement(self, name, attrs): for x in ["text", "title", "value", "caption"]: try: attrlist.add((attrs[x], self.last_comment)) self.last_comment = None except KeyError: pass parser = make_parser() attrlist = set() contentHandler = parseXML(attrlist) parser.setContentHandler(contentHandler) if not no_comments: parser.setProperty(property_lexical_handler, contentHandler) for arg in sys.argv[1:]: if os.path.isdir(arg): for file in os.listdir(arg): if (file.endswith(".xml")): parser.parse(os.path.join(arg, file)) else: parser.parse(arg)
def test_make_parser3(self): # Testing that make_parser can handle different types of # iterables. make_parser(['module']) make_parser(('module', )) make_parser({'module'}) make_parser(frozenset({'module'})) make_parser({'module': None}) make_parser(iter(['module']))
fatalError("Failed to start Inkscape shell process") inkscape_stderr = inkscape.stderr inkscape_stderr_thread = Thread(target=stderr_reader, args=(inkscape, inkscape_stderr)) inkscape_stdin_buf = [] inkscape_instances.append([ inkscape, inkscape_stderr, inkscape_stderr_thread, inkscape_stdin_buf ]) # initialise results before actually attempting to parse the SVG file svgBounds = SVGRect(0, 0, 0, 0) rectList = [] # Try to parse the svg file xmlParser = make_parser() xmlParser.setFeature(feature_namespaces, 0) # setup XML Parser with an SVGLayerHandler class as a callback parser #### svgLayerHandler = SVGLayerHandler() xmlParser.setContentHandler(svgLayerHandler) try: xmlParser.parse(svgFilename) except SAXParseException, e: fatalError( "Error parsing SVG file '%s': line %d,col %d: %s. If you're seeing this within inkscape, it probably indicates a bug that should be reported." % (svgFilename, e.getLineNumber(), e.getColumnNumber(), e.getMessage())) # verify that the svg file actually contained some rectangles. if len(svgLayerHandler.svg_rects) == 0:
elif name == 'end': print " end: " + self.theContent + "." if self.inContent: self.inContent = 0 self.theContent = "" def characters (self, chars): if self.inContent: self.theContent = self.theContent + chars # --- Main prog if len(sys.argv)<2: print "Usage: python xml-parser-jokes.py <document>" print print " <document>: file name of the document to parse" sys.exit(1) # Load parser and driver JokeParser = make_parser() JokeHandler = CounterHandler() JokeParser.setContentHandler(JokeHandler) # Ready, set, go! xmlFile = open(sys.argv[1],"r") JokeParser.parse(xmlFile) print "Parse complete"
def __init__(self, topologyFile): """ Initialize with the topology file reference ready for reading """ handler = TopologyHandler() sax.make_parser() sax.parseString(topologyFile.read(), handler) self.residues = handler.residues
def run(self): global hostname global current_files try: while True: file = taskQueue.get() logging.info(f'{self} Processing file {file}') current_files.add(file) mask = block_signals() # Open command pipeline p1 = subprocess.Popen( ['/usr/gnu/bin/tail', '-n', '0', '--follow=name', file], stdout=subprocess.PIPE, stdin=subprocess.DEVNULL, stderr=subprocess.DEVNULL, bufsize=128000) time.sleep(1) if p1.poll() is not None: logging.error(f'Tail failed on {file}') taskQueue.task_done() continue p2 = subprocess.Popen(['/usr/sbin/praudit', '-x'], stdin=p1.stdout, stdout=subprocess.PIPE, stderr=subprocess.DEVNULL, bufsize=128000) restore_signals(mask) parser = make_parser() handler = RecordHandler() parser.setContentHandler(handler) # Incrementally process the xml output from praudit for line in iter(p2.stdout.readline, b''): # logging.info(line) try: parser.feed(line) except Exception: print(traceback.format_exc()) raise r1 = p1.poll() p2.poll() current_files.remove(file) # In the normal case, the file would have been renamed by the audit rotation. # That will cause the tail to exit, and that will cause the praudit to exit. if r1 is None: # The tail is still running. Either praudit went bang or it has been deliberately # closed down. Either way, kill the tail and requeue the current file p1.kill() p1.wait() logging.info( f'{self} Abnormal praudit exit on {file}, requeuing') taskQueue.put(file) else: # Check if the file still exists. check = Path(file) if check.is_file(): # Still here, so the tail went bang or was deliberately closed down. logging.info( f'{self} Abnormal tail exit on {file}, requeuing') taskQueue.put(file) else: logging.info(f'{self} Finished Processing file {file}') # Signal that we have finished processing the file taskQueue.task_done() except Exception as e: # Catch all logging.error(f'Task thread exiting because of exception : {e}') taskQueue.task_done() # Just in case
# 元素开始调用 def startElement(self, tag, attributes): self.CurrentData = tag if tag == 'movie': print('***movie') title = attributes['title'] print('Title:' + title) def endElement(self, tag): if self.CurrentData == 'type': print('Type:' + self.type) def characters(self, content): if self.type == 'type': self.type = content if __name__ == '__main__': # 创建一个XMLReader parser = sax.make_parser() # 关闭命名空间 parser.setFeature(sax.handler.feature_namespaces, 0) # 重写ContextHandler handler = MovieHandler() parser.setContentHandler(handler) parser.parse('movies.xml')
def test_make_parser(self): # Creating a parser should succeed - it should fall back # to the expatreader p = make_parser(['xml.parsers.no_such_parser'])
def main(infile, outfile, offset): out = open(outfile, "w") parser = make_parser() parser.setContentHandler(RouteReader(offset, out)) parser.parse(infile)
def test_make_parser5(self): # Testing that make_parser can handle iterables with more than # one item. make_parser(['module1', 'module2']) make_parser(('module1', 'module2')) make_parser({'module1', 'module2'}) make_parser(frozenset({'module1', 'module2'})) make_parser({'module1': None, 'module2': None}) make_parser(iter(['module1', 'module2']))
def main(options): parser = make_parser() isBZ2 = False dataDir = options.datadir districts = os.path.join(dataDir, options.districtfile) matrix = os.path.join(dataDir, options.mtxfile) netfile = os.path.join(dataDir, options.netfile) print 'generate Trip file for:', netfile if "bz2" in netfile: netfile = bz2.BZ2File(netfile) isBZ2 = True matrixSum = 0. tripList = [] net = Net() odConnTable = {} vehIDtoODMap = {} sumolib.net.readNet(options.netfile, net=net) if isBZ2: parser.parse(StringIO.StringIO(netfile.read())) netfile.close() else: parser.parse(netfile) parser.setContentHandler(DistrictsReader(net)) parser.parse(districts) matrixPshort, startVertices, endVertices, currentMatrixSum, begin, period = getMatrix( net, options.debug, matrix, matrixSum)[:6] for edge in net.getEdges(): edge.helpacttime = 0. if options.debug: print len(net._edges), "edges read" print len(net._startVertices), "start vertices read" print len(net._endVertices), "target vertices read" print 'currentMatrixSum:', currentMatrixSum if options.getconns: if options.debug: print 'generate odConnTable' for start, startVertex in enumerate(startVertices): if startVertex._id not in odConnTable: odConnTable[startVertex._id] = {} for source in startVertex.sourceConnNodes: targets = net.getTargets() D, P = dijkstraPlain(source, targets) for end, endVertex in enumerate(endVertices): if startVertex._id != endVertex._id and matrixPshort[ start][end] > 0.: if endVertex._id not in odConnTable[startVertex._id]: odConnTable[startVertex._id][endVertex._id] = [] net.checkRoute(startVertex, endVertex, start, end, P, odConnTable, source, options) else: if options.debug: print 'import and use the given odConnTable' sys.path.append(options.datadir) from odConnTables import odConnTable # output trips if options.verbose: print 'output the trip file' vehID = 0 subVehID = 0 random.seed(42) matrixSum = 0. fouttrips = file(options.tripfile, 'w') fouttrips.write('<?xml version="1.0"?>\n') print >> fouttrips, """<!-- generated on %s by $Id: generateTripsXml.py 16005 2014-03-24 12:46:02Z cschmidt87 $ --> """ % datetime.datetime.now() fouttrips.write("<tripdefs>\n") if options.demandscale != 1.: print 'demand scale %s is used.' % options.demandscale for start in range(len(startVertices)): for end in range(len(endVertices)): matrixPshort[start][end] *= options.demandscale for start, startVertex in enumerate(startVertices): for end, endVertex in enumerate(endVertices): if startVertex._id != endVertex._id and matrixPshort[start][ end] > 0.: counts = 0. if options.odestimation: if matrixPshort[start][end] < 1.: counts, vehID, tripList, vehIDtoODMap = addVeh( counts, vehID, begin, period, odConnTable, startVertex, endVertex, tripList, vehIDtoODMap) else: matrixSum += matrixPshort[start][end] while (counts < float( math.ceil(matrixPshort[start][end])) and (matrixPshort[start][end] - counts) > 0.5 and float(subVehID) < matrixSum ) or float(subVehID) < matrixSum: counts, vehID, tripList, vehIDtoODMap = addVeh( counts, vehID, begin, period, odConnTable, startVertex, endVertex, tripList, vehIDtoODMap) subVehID += 1 else: matrixSum += matrixPshort[start][end] while (counts < float(math.ceil(matrixPshort[start][end])) and (matrixPshort[start][end] - counts) > 0.5 and float(vehID) < matrixSum ) or float(vehID) < matrixSum: counts, vehID, tripList, vehIDtoODMap = addVeh( counts, vehID, begin, period, odConnTable, startVertex, endVertex, tripList, vehIDtoODMap) if options.debug: print 'total demand:', matrixSum print vehID, 'trips generated' tripList.sort(key=operator.attrgetter('depart')) departpos = "free" if __name__ == "__main__": departpos = options.departpos for trip in tripList: fouttrips.write(' <trip id="%s" depart="%s" from="%s" to="%s" fromtaz="%s" totaz="%s" departlane="free" departpos="%s" departspeed="max"/>\n' \ % (trip.label, trip.depart, trip.sourceEdge, trip.sinkEdge, trip.sourceDistrict, trip.sinkDistrict, departpos)) fouttrips.write("</tripdefs>") fouttrips.close() return odConnTable, vehIDtoODMap
# regression test for SAX 2.0 -*- coding: utf-8 -*- # $Id$ from xml.sax import make_parser, ContentHandler, \ SAXException, SAXReaderNotAvailable, SAXParseException try: make_parser() except SAXReaderNotAvailable: # don't try to test this module if we cannot create a parser raise ImportError("no XML parsers available") from xml.sax.saxutils import XMLGenerator, escape, unescape, quoteattr, \ XMLFilterBase, prepare_input_source from xml.sax.expatreader import create_parser from xml.sax.handler import feature_namespaces from xml.sax.xmlreader import InputSource, AttributesImpl, AttributesNSImpl from cStringIO import StringIO import io import gc import os.path import shutil import test.test_support as support from test.test_support import findfile, run_unittest, TESTFN import unittest TEST_XMLFILE = findfile("test.xml", subdir="xmltestdata") TEST_XMLFILE_OUT = findfile("test.xml.out", subdir="xmltestdata") supports_unicode_filenames = True if not os.path.supports_unicode_filenames: try: support.TESTFN_UNICODE.encode(support.TESTFN_ENCODING)
def test_sf_1513611(self): # Bug report: http://www.python.org/sf/1513611 sio = StringIO("invalid") parser = make_parser() from xml.sax import SAXParseException self.assertRaises(SAXParseException, parser.parse, sio)
def __init__(self, filename): self.entries = [] parser = make_parser() parser.setContentHandler(self) parser.parse(filename)
def readDump(file, toCollect): parser = make_parser() dump = DumpReader(toCollect) parser.setContentHandler(dump) parser.parse(file) return dump
if __name__ == '__main__': import sys from xml.sax import make_parser from xml.dom.ext.reader import Sax2 from xml.dom.ext import PrettyPrint from xml.sax.handler import feature_namespaces,\ feature_namespace_prefixes, property_lexical_handler,\ property_declaration_handler f1 = feature_namespaces f2 = feature_namespace_prefixes p1 = property_lexical_handler p2 = property_declaration_handler file = sys.argv[1] r = Sax2.Reader() f = open(file) doc = r.fromStream(f) print 'Initial document', doc, doc.__class__ PrettyPrint(doc) for (val1, val2, val3, val4) in ((0, 0, 0, 0), (0, 1, 1, 1), (1, 0, 0, 0), (1, 1, 1, 1)): for p, d in ((Dom2SaxParser(), doc), (make_parser(['xml.sax.drivers2.drv_pyexpat']), f), (make_parser(['xml.sax.drivers2.drv_xmlproc']), f)): if not d is doc: d = open(file) _parse(p, d, ((f1, val1), (f2, val2)), ((p1, val3), (p2, val4))) f.close()
def loadXml(self, pathName): parser = sax.make_parser() parser.setContentHandler(MangHandler(self)) parser.parse(pathName)
def __init__(self): self.parser = make_parser() self.parser.setFeature(feature_namespaces, 0) self.parser.setContentHandler(self)
def parsefile(filename): parser = make_parser() parser.setContentHandler(tempHandler()) parser.parse(filename)
def test_make_parser4(self): # Testing that make_parser can handle empty iterables. make_parser([]) make_parser(tuple()) make_parser(set()) make_parser(frozenset()) make_parser({}) make_parser(iter([]))
def run(self, params, args): # In the future we should store the ARCH in the database # and allow the cgi/url to override the default setting. # When this happens we can do a db lookup instead of using # a flag and defaulting to the host architecture. (arch, basedir, landscape, size) = self.fillParams([ ('arch', self.arch), ('basedir', ), ('landscape', 'n'), ('size', '100,100'), ]) self.beginOutput() self.drawOrder = 0 self.drawKey = 1 self.drawLandscape = self.str2bool(landscape) self.drawSize = size for host in self.getHostnames(args): self.db.execute("""select d.name, a.graph from nodes n, memberships m, distributions d, appliances a where n.membership=m.id and m.appliance=a.id and m.distribution=d.id and n.name='%s'""" % host) (dist, graph) = self.db.fetchone() distrodir = self.command('report.distro') self.basedir = os.path.join(distrodir.strip(), dist, arch, 'build') if basedir: if not os.path.exists(basedir): self.abort('cannot read directory "%s"' % basedir) self.basedir = basedir graphdir = os.path.join(self.basedir, 'graphs', graph) if not os.path.exists(graphdir): self.abort('cannot read directory "%s"' % graphdir) parser = make_parser() attrs = self.db.getHostAttrs(host) for key in attrs: # escape attribute in case they have some weird char attrs[key] = rocks.util.escapeAttr(attrs[key]) handler = rocks.profile.GraphHandler(attrs, {}, prune=False) for file in os.listdir(graphdir): root, ext = os.path.splitext(file) if ext == '.xml': path = os.path.join(graphdir, file) if not os.path.isfile(path): continue fin = open(path, 'r') parser.setContentHandler(handler) parser.parse(fin) fin.close() cwd = os.getcwd() os.chdir(self.basedir) dot = self.createDotGraph(handler, self.readDotGraphStyles()) os.chdir(cwd) for line in dot: self.addOutput(host, line) self.endOutput(padChar='')
def action_update_cpe(): database.connect() INFO.create_table() CPE_VULNERS.create_table() start_time = time.time() parsed_items = [] parser = make_parser() cpe_handler = CPEHandler() parser.setContentHandler(cpe_handler) source = SOURCES["cpe22"] try: data, response = get_file(getfile=source) except: print('Update Database CPE: Cant download file: {}'.format(source)) database.close() return dict( items=0, time_delta=0, message='Update Database CPE: Cant download file: {}'.format(source) ) last_modified = parse_datetime(response.headers['last-modified'], ignoretz=True) info, created = INFO.get_or_create(name="cpe") if not created: if info.last_modified != "": info_last_modified = datetime.strptime(info.last_modified, '%Y-%m-%d %H:%M:%S') else: info_last_modified = datetime.strptime(datetime.now().strftime('%Y-%m-%d %H:%M:%S'), '%Y-%m-%d %H:%M:%S') else: info_last_modified = datetime.strptime(datetime.now().strftime('%Y-%m-%d %H:%M:%S'), '%Y-%m-%d %H:%M:%S') if info_last_modified != last_modified: info.last_modified = last_modified info.save() parser.parse(data) for cpe in cpe_handler.cpe: cpe["id"] = to_string_formatted_cpe(cpe["name"]) cpe['title'] = cpe['title'][0] cpe['cpe_2_2'] = cpe.pop('name') if not cpe['references']: cpe.pop('references') parsed_items.append(cpe) for item in progressbar(parsed_items, prefix="Update Database CPE: "): item_id = item["id"] item_title = item.get("title", "") item_refs = item.get("references", []) item_cpe22 = item.get("cpe_2_2", "") item_cpe23 = item_id cpe_selected = CPE_VULNERS.get_or_none(CPE_VULNERS.item == item_id) if cpe_selected is None: cpe_created = CPE_VULNERS( item=item_id, title=item_title, refs=item_refs, cpe22=item_cpe22, cpe23=item_cpe23 ) cpe_created.save() else: if cpe_selected.title == item_title and \ cpe_selected.refs == item_refs and \ cpe_selected.cpe22 == item_cpe22 and \ cpe_selected.cpe23 == item_cpe23: pass else: cpe_selected.title = item_title, cpe_selected.refs = item_refs, cpe_selected.cpe22 = item_cpe22, cpe_selected.cpe23 = item_cpe23 cpe_selected.save() stop_time = time.time() database.close() return dict( items=len(parsed_items), time_delta=stop_time - start_time, message="Update Database CPE: Complete." ) database.close() return dict( items=0, time_delta=0, message="Update Database CPE: Not modified" )
def main(): # for measuring the required time for reading input files inputreaderstart = datetime.datetime.now() foutlog = open('%s_log.txt' % options.type, 'w') foutlog.write( 'The stochastic user equilibrium traffic assignment will be executed with the %s model.\n' % options.type) foutlog.write( 'All vehicular releasing times are determined randomly(uniform).\n') matrices = options.mtxpsfile.split(",") parser = make_parser() if options.verbose: print("Reading net") print('net file:', options.netfile) net = Net() sumolib.net.readNet(options.netfile, net=net) parser.setContentHandler(DistrictsReader(net)) parser.parse(options.confile) if options.sigfile: parser.setContentHandler(ExtraSignalInformationReader(net)) parser.parse(options.sigfile) foutlog.write('- Reading network: done.\n') foutlog.write('number of total startVertices:%s\n' % net.getstartCounts()) foutlog.write('number of total endVertices:%s\n' % net.getendCounts()) if options.verbose: print(net.getfullEdgeCounts(), "edges read (internal edges included)") if options.curvefile: updateCurveTable(options.curvefile) if options.hours == 24.: assignHours = 16. else: assignHours = options.hours for edge in net.getEdges(): if edge._lanes: edge.getCapacity() edge.getAdjustedCapacity(net) edge.estcapacity *= assignHours edge.getConflictLink() if options.dijkstra == 'boost': net.createBoostGraph() if options.verbose: print("after link reduction:", net.getfullEdgeCounts(), "edges read") # calculate link travel time for all district connectors getConnectionTravelTime(net._startVertices, net._endVertices) foutlog.write('- Initial calculation of link parameters : done.\n') # the required time for reading the network timeForInput(inputreaderstart) if options.debug: outputNetwork(net) # initialize the map for recording the number of the assigned vehicles AssignedVeh = {} # initialize the map for recording the number of the assigned trips AssignedTrip = {} smallDemand = [] linkChoiceMap = {} odPairsMap = {} for start, startVertex in enumerate(net._startVertices): AssignedVeh[startVertex] = {} AssignedTrip[startVertex] = {} smallDemand.append([]) for end, endVertex in enumerate(net._endVertices): AssignedVeh[startVertex][endVertex] = 0 AssignedTrip[startVertex][endVertex] = 0. smallDemand[-1].append(0.) # initialization vehID = 0 matrixSum = 0.0 lohse = (options.type == "lohse") incremental = (options.type == "incremental") checkKPaths = False if not incremental and options.kPaths > 1: checkKPaths = True if not incremental: net.initialPathSet() starttime = datetime.datetime.now() # initialize the file for recording the routes if options.odestimation: net.getDetectedEdges(options.outputdir) else: foutroute = open('routes.rou.xml', 'w') print("<?xml version="1.0"?>\n<!-- generated on %s by $Id$ -->\n<routes>""" % starttime, file=foutroute) # noqa # for counter in range (0, len(matrices)): for counter, matrix in enumerate(matrices): # delete all vehicle information related to the last matrix for saving # the disk space vehicles = [] iterInterval = 0 matrixPshort, startVertices, endVertices, CurrentMatrixSum, begintime, assignPeriod, Pshort_EffCells, \ matrixSum, smallDemandRatio = getMatrix(net, options.verbose, matrix, matrixSum, options.demandscale) options.hours = float(assignPeriod) smallDemandPortion = math.ceil( float(options.maxiteration) / 2. * smallDemandRatio) if float(smallDemandPortion) != 0.: iterInterval = math.ceil( float(options.maxiteration) / float(smallDemandPortion)) departtime = begintime * 3600 if options.verbose: print('the analyzed matrices:', counter) print('Begintime:', begintime, "O'Clock") print('departtime', departtime) print('Matrix und OD Zone already read for Interval', counter) print('CurrentMatrixSum:', CurrentMatrixSum) foutlog.write('Reading matrix and O-D zones: done.\n') foutlog.write( 'Matrix und OD Zone already read for Interval:%s\n' % counter) foutlog.write('CurrentMatrixSum:%s\n' % CurrentMatrixSum) foutlog.write('number of current startVertices:%s\n' % len(startVertices)) foutlog.write('number of current endVertices:%s\n' % len(endVertices)) if options.odestimation: linkChoiceMap.clear() odPairsMap.clear() linkChoiceMap = initLinkChoiceMap( net, startVertices, endVertices, matrixPshort, linkChoiceMap, odPairsMap) for edge in net.getEdges(): edge.flow = 0. edge.helpflow = 0. edge.actualtime = edge.freeflowtime edge.helpacttime = edge.freeflowtime edge.fTT = 0. edge.TT = 0. edge.delta = 0. edge.helpacttimeEx = 0. # the number of origins, the umber of destinations and the number of # the OD pairs len(startVertices) len(endVertices) # output the origin and destination zones and the number of effective # OD pairs if options.debug: # matrixCounter) outputODZone(startVertices, endVertices, Pshort_EffCells, counter) if incremental: print('begin the incremental assignment!') iter = 0 options.lamda = 0. while iter < options.maxiteration: foutlog.write( '- Current iteration(not executed yet):%s\n' % iter) iter += 1 if iterInterval != 0 and operator.mod(iter, iterInterval) == 0: assignSmallDemand = True else: assignSmallDemand = False for start, startVertex in enumerate(startVertices): targets = set() for end, endVertex in enumerate(endVertices): if assignSmallDemand and matrixPshort[start][end] > 0. and matrixPshort[start][end] < 1.: smallDemand[start][end] = matrixPshort[ start][end] / float(smallDemandPortion) if matrixPshort[start][end] > 1. or (assignSmallDemand and smallDemand[start][end] > 0.): targets.add(endVertex) if len(targets) > 0: if options.dijkstra == 'boost': D, P = dijkstraBoost( net._boostGraph, startVertex.boost) elif options.dijkstra == 'plain': D, P = dijkstraPlain(startVertex, targets) elif options.dijkstra == 'extend': D, P = dijkstra(startVertex, targets) vehID, smallDemand, linkChoiceMap = doIncAssign( net, vehicles, options.verbose, options.maxiteration, options.odestimation, endVertices, start, startVertex, matrixPshort, smallDemand, D, P, AssignedVeh, AssignedTrip, vehID, assignSmallDemand, linkChoiceMap, odPairsMap) if options.dijkstra != 'extend': linkMap = net._fullEdges else: linkMap = net._edges for edge in linkMap.itervalues(): edge.getActualTravelTime(options, False) if options.dijkstra == 'boost': edge.boost.weight = edge.helpacttime else: print('begin the', options.type, " assignment!") # initialization for the clogit and the lohse assignment model iter_outside = 1 newRoutes = 1 stable = False first = True # begin the traffic Assignment while newRoutes > 0: foutlog.write('- SUE iteration:%s\n' % iter_outside) # Generate the effective routes als intital path solutions, # when considering k shortest paths (k is defined by the user.) if checkKPaths: checkPathStart = datetime.datetime.now() newRoutes = net.calcKPaths( options.verbose, options.kPaths, newRoutes, startVertices, endVertices, matrixPshort, options.gamma) checkPathEnd = datetime.datetime.now() - checkPathStart foutlog.write( '- Time for finding the k-shortest paths: %s\n' % checkPathEnd) foutlog.write( '- Finding the k-shortest paths for each OD pair: done.\n') if options.verbose: print('iter_outside:', iter_outside) print('number of k shortest paths:', options.kPaths) print('number of new routes:', newRoutes) elif not checkKPaths and iter_outside == 1 and counter == 0: print('search for the new path') newRoutes = net.findNewPath( startVertices, endVertices, newRoutes, matrixPshort, options.gamma, lohse, options.dijkstra) checkKPaths = False if options.verbose: print('iter_outside:', iter_outside) print('number of new routes:', newRoutes) stable = False iter_inside = 1 while not stable: if options.verbose: print('iter_inside:', iter_inside) stable = doSUEAssign( net, options, startVertices, endVertices, matrixPshort, iter_inside, lohse, first) # The matrixPlong and the matrixTruck should be added when # considering the long-distance trips and the truck trips. if lohse: stable = doLohseStopCheck( net, options, stable, iter_inside, options.maxiteration, foutlog) iter_inside += 1 if options.verbose: print('stable:', stable) newRoutes = net.findNewPath( startVertices, endVertices, newRoutes, matrixPshort, options.gamma, lohse, options.dijkstra) first = False iter_outside += 1 if newRoutes < 3 and iter_outside > int((options.maxiteration) / 2): newRoutes = 0 if iter_outside > options.maxiteration: print('The max. number of iterations is reached!') foutlog.write( 'The max. number of iterations is reached!\n') foutlog.write( 'The number of new routes and the parameter stable will be set to zero and ' + 'True respectively.\n') print('newRoutes:', newRoutes) stable = True newRoutes = 0 # update the path choice probability and the path flows as well as # generate vehicle data vehID = doSUEVehAssign(net, vehicles, options, counter, matrixPshort, startVertices, endVertices, AssignedVeh, AssignedTrip, vehID, lohse) # output the generated vehicular releasing times and routes, based on # the current matrix print('done with the assignment') # debug if options.odestimation: linkChoicesOutput(net, startVertices, endVertices, matrixPshort, linkChoiceMap, odPairsMap, options.outputdir, starttime) else: sortedVehOutput(vehicles, departtime, options, foutroute) if not options.odestimation: foutroute.write('</routes>\n') foutroute.close() # output the global performance indices assigntime = outputStatistics(net, starttime, len(matrices)) foutlog.write( '- Assignment is completed and all required information is generated. ') foutlog.close() if options.verbose: print('Duration for traffic assignment:', assigntime) print('Total assigned vehicles:', vehID) print('Total number of the assigned trips:', matrixSum)
import xml.sax as _SAX from MyContentHandler import MyContentHandler parser = _SAX.make_parser() myHandler = MyContentHandler() parser.setContentHandler(myHandler) parser.parse('../../xml_files_windows/standard.xml') print myHandler.getElementContent("data")
linea = "<a href='" + self.link + "'>" + self.title + "</a><br>" + "\n" self.htmlFile.write(linea) self.inContent = False self.theContent = "" def characters(self, chars): if self.inContent: self.theContent = self.theContent + chars # --- Main prog if len(sys.argv) < 2: print "Usage: python xml-parser-barrapunto.py <document>" print print " <document>: file name of the document to parse" sys.exit(1) # Load parser and driver theParser = make_parser() theHandler = myContentHandler() theParser.setContentHandler(theHandler) # Ready, set, go! xmlFile = open(sys.argv[1], "r") theParser.parse(xmlFile) print "Parse complete"
def __init__(self): super().__init__() self.parser = make_parser()
def parse(self, input_file): parser = make_parser() parser.setContentHandler(self) parser.parse(input_file)