Exemplo n.º 1
0
 def _xmlcorewrite(self,xmltree,root):
     #xml prolog: always use.*********************************
     #standalone, DOCTYPE, processing instructions: only possible in python >= 2.7 or if encoding is utf-8/ascii
     if sys.version >= '2.7.0' or self.ta_info['charset'] in ['us-ascii','utf-8'] or ET.VERSION >= '1.3.0':
         if self.ta_info['indented']:
             indentstring = '\n'
         else:
             indentstring = ''
         if self.ta_info['standalone']:
             standalonestring = 'standalone="%s" '%(self.ta_info['standalone'])
         else:
             standalonestring = ''
         processing_instruction = ET.ProcessingInstruction('xml', 'version="%s" encoding="%s" %s'%(self.ta_info['version'],self.ta_info['charset'], standalonestring))
         self._outstream.write(ET.tostring(processing_instruction) + indentstring) #do not use encoding here. gives double xml prolog; possibly because ET.ElementTree.write i used again by write()
         #doctype /DTD **************************************
         if self.ta_info['DOCTYPE']:
             self._outstream.write('<!DOCTYPE %s>'%(self.ta_info['DOCTYPE']) + indentstring)
         #processing instructions (other than prolog) ************
         if self.ta_info['processing_instructions']:
             for eachpi in self.ta_info['processing_instructions']:
                 processing_instruction = ET.ProcessingInstruction(eachpi[0], eachpi[1])
                 self._outstream.write(ET.tostring(processing_instruction) + indentstring) #do not use encoding here. gives double xml prolog; possibly because ET.ElementTree.write i used again by write()
     #indent the xml elements
     if self.ta_info['indented']:
         self._botsindent(root)
     #write tree to file; this is differnt for different python/elementtree versions
     if sys.version < '2.7.0' and ET.VERSION < '1.3.0':
         xmltree.write(self._outstream,encoding=self.ta_info['charset'])
     else:
         xmltree.write(self._outstream,encoding=self.ta_info['charset'],xml_declaration=False)
Exemplo n.º 2
0
 def _xmlcorewrite(self,xmltree,root):
     #xml prolog: always use.*********************************
     #standalone, DOCTYPE, processing instructions: only possible in python >= 2.7 or if encoding is utf-8/ascii
     if sys.version >= '2.7.0' or self.ta_info['charset'] in ['us-ascii','utf-8'] or ET.VERSION >= '1.3.0':
         if self.ta_info['indented']:
             indentstring = '\n'
         else:
             indentstring = ''
         if self.ta_info['standalone']:
             standalonestring = 'standalone="%s" '%(self.ta_info['standalone'])
         else:
             standalonestring = ''
         PI = ET.ProcessingInstruction('xml', 'version="%s" encoding="%s" %s'%(self.ta_info['version'],self.ta_info['charset'], standalonestring))        
         self._outstream.write(ET.tostring(PI) + indentstring) #do not use encoding here. gives double xml prolog; possibly because ET.ElementTree.write i used again by write()
         #doctype /DTD **************************************
         if self.ta_info['DOCTYPE']:
             self._outstream.write('<!DOCTYPE %s>'%(self.ta_info['DOCTYPE']) + indentstring)
         #processing instructions (other than prolog) ************
         if self.ta_info['processing_instructions']:
             for pi in self.ta_info['processing_instructions']:
                 PI = ET.ProcessingInstruction(pi[0], pi[1])
                 self._outstream.write(ET.tostring(PI) + indentstring) #do not use encoding here. gives double xml prolog; possibly because ET.ElementTree.write i used again by write()
     #indent the xml elements
     if self.ta_info['indented']:
         self.botsindent(root)
     #write tree to file; this is differnt for different python/elementtree versions
     if sys.version < '2.7.0' and ET.VERSION < '1.3.0':
         xmltree.write(self._outstream,encoding=self.ta_info['charset'])
     else:
         xmltree.write(self._outstream,encoding=self.ta_info['charset'],xml_declaration=False)
Exemplo n.º 3
0
 def test_cet():
     """cElementTree"""
     _table = cet.Element("table")
     for row in table:
         tr = cet.SubElement(_table, "tr")
         for c in row.values():
             cet.SubElement(tr, "td").text = str(c)
     cet.tostring(_table)
Exemplo n.º 4
0
 def test_cet():
     """cElementTree"""
     _table = cet.Element("table")
     for row in table:
         tr = cet.SubElement(_table, "tr")
         for c in row.values():
             cet.SubElement(tr, "td").text = str(c)
     cet.tostring(_table)
Exemplo n.º 5
0
 def test_cet(): 
     """cElementTree"""
     _table = cet.Element('table')
     for row in table:
         tr = cet.SubElement(_table, 'tr')
         for c in row.values():
             cet.SubElement(tr, 'td').text=str(c)
     cet.tostring(_table)
Exemplo n.º 6
0
 def test_cet():
     """cElementTree"""
     _table = cet.Element('table')
     for row in table:
         tr = cet.SubElement(_table, 'tr')
         for c in row.values():
             cet.SubElement(tr, 'td').text = str(c)
     cet.tostring(_table)
Exemplo n.º 7
0
    def build_header(self, message, queue_name, CMQC, now):

        if self.needs_mcd:
            self.folders["mcd"] = _mcd
            mcd = self._pad_folder(etree.tostring(self.folders["mcd"]))
            mcd_len = len(mcd)
        else:
            mcd_len = 0

        self.add_jms(message, queue_name, now)
        self.add_usr(message)

        jms = self._pad_folder(etree.tostring(self.folders["jms"]))

        if "usr" in self.folders:
            usr = self._pad_folder(etree.tostring(self.folders["usr"]))
            usr_len = len(usr)
        else:
            usr_len = 0

        jms_len = len(jms)

        total_header_length = 0
        total_header_length += MQRFH2JMS.FIXED_PART_LENGTH

        # Each folder has a 4-byte header describing its length,
        # hence the "len(self.folders) * 4" below.
        variable_part_length = len(
            self.folders) * 4 + mcd_len + jms_len + usr_len

        total_header_length += variable_part_length

        buff = StringIO()

        buff.write(CMQC.MQRFH_STRUC_ID)
        buff.write(_WMQ_MQRFH_VERSION_2)
        buff.write(pack("!l", total_header_length))
        buff.write(_WMQ_DEFAULT_ENCODING_WIRE_FORMAT)
        buff.write(_WMQ_DEFAULT_CCSID_WIRE_FORMAT)
        buff.write(CMQC.MQFMT_STRING)
        buff.write(_WMQ_MQRFH_NO_FLAGS_WIRE_FORMAT)
        buff.write(_WMQ_DEFAULT_CCSID_WIRE_FORMAT)

        if self.needs_mcd:
            buff.write(pack("!l", mcd_len))
            buff.write(mcd)

        buff.write(pack("!l", jms_len))
        buff.write(jms)

        if "usr" in self.folders:
            buff.write(pack("!l", usr_len))
            buff.write(usr)

        value = buff.getvalue()
        buff.close()

        return value
Exemplo n.º 8
0
    def build_header(self, message, queue_name, CMQC, now):
        
        if self.needs_mcd:
            self.folders["mcd"] = _mcd
            mcd = self._pad_folder(etree.tostring(self.folders["mcd"]))
            mcd_len = len(mcd)
        else:
            mcd_len = 0
            
        self.add_jms(message, queue_name, now)
        self.add_usr(message)

        jms = self._pad_folder(etree.tostring(self.folders["jms"]))

        if "usr" in self.folders:
            usr = self._pad_folder(etree.tostring(self.folders["usr"]))
            usr_len = len(usr)
        else:
            usr_len = 0

        jms_len = len(jms)

        total_header_length = 0
        total_header_length += MQRFH2JMS.FIXED_PART_LENGTH

        # Each folder has a 4-byte header describing its length,
        # hence the "len(self.folders) * 4" below.
        variable_part_length = len(self.folders) * 4 + mcd_len + jms_len + usr_len

        total_header_length += variable_part_length

        buff = StringIO()

        buff.write(CMQC.MQRFH_STRUC_ID)
        buff.write(_WMQ_MQRFH_VERSION_2)
        buff.write(pack("!l", total_header_length))
        buff.write(_WMQ_DEFAULT_ENCODING_WIRE_FORMAT)
        buff.write(_WMQ_DEFAULT_CCSID_WIRE_FORMAT)
        buff.write(CMQC.MQFMT_STRING)
        buff.write(_WMQ_MQRFH_NO_FLAGS_WIRE_FORMAT)
        buff.write(_WMQ_DEFAULT_CCSID_WIRE_FORMAT)
        
        if self.needs_mcd:
            buff.write(pack("!l", mcd_len))
            buff.write(mcd)
            
        buff.write(pack("!l", jms_len))
        buff.write(jms)

        if "usr" in self.folders:
            buff.write(pack("!l", usr_len))
            buff.write(usr)

        value = buff.getvalue()
        buff.close()

        return value
Exemplo n.º 9
0
    def _write(self, node):
        ''' write normal XML messages (no envelope)'''
        xmltree = ET.ElementTree(self._node2xml(node))
        f = botslib.opendata(self.ta_info['filename'], "wb")
        if self.ta_info['indented']:
            indentstring = '\n'
        else:
            indentstring = ''

        #xml prolog: always use.*********************************
        #syntax parameter controls if stand-alone is used within prolog.
        #in ET 1.3.0: if standalone is to be used: should surpress ET-generated prolog - explicit parameter
        #in ET 1.2.6: always generates prolog if encoding != utf-8/ascii. SO: can not use stadnalone for encoding !=utf-8,ascii
        if ET.VERSION not in ['1.2.6', '1.0.6'] or self.ta_info['charset'] in [
                'us-ascii', 'utf-8'
        ]:
            if self.ta_info['standalone']:
                standalonestring = 'standalone="%s" ' % (
                    self.ta_info['standalone'])
            else:
                standalonestring = ''
            PI = ET.ProcessingInstruction(
                'xml', 'version="%s" encoding="%s" %s' %
                (self.ta_info['version'], self.ta_info['charset'],
                 standalonestring))
            f.write(
                ET.tostring(PI) + indentstring
            )  #do not use encoding here. gives double xml prolog; possibly because ET.ElementTree.write i used again by write()

        #doctype /DTD **************************************
        if self.ta_info['DOCTYPE']:
            f.write('<!DOCTYPE %s>' % (self.ta_info['DOCTYPE']) + indentstring)

        #processing instructions (other than prolog) ************
        if self.ta_info['processing_instructions']:
            for pi in self.ta_info['processing_instructions']:
                PI = ET.ProcessingInstruction(pi[0], pi[1])
                f.write(
                    ET.tostring(PI) + indentstring
                )  #do not use encoding here. gives double xml prolog; possibly because ET.ElementTree.write i used again by write()

        #indent the xml elements
        if self.ta_info['indented']:
            root = xmltree.getroot()
            self.botsindent(root)

        if ET.VERSION <= '1.2.6':
            xmltree.write(f, encoding=self.ta_info['charset'])
        else:
            xmltree.write(f,
                          encoding=self.ta_info['charset'],
                          xml_declaration=False)
Exemplo n.º 10
0
def goInsert(doc):
    
    results=""
    tableIdDesc=""
    xmlDocResults=""
    
    if(dbSchemaChecker(doc)==1):
        if(debugProg["flag"]==1):
            debug_Info= "The XML document is " + ElementTree.tostring(doc)+"\n"
            if(debugProg["print"]==1): print debug_Info
            writefile(debug_Info,debugProg["file"])
            debug_Info=""
        tableIdDesc,results,dbtable= finder(doc)

        # Specific Errors which are returned to the client
        if(tableIdDesc=="-" and results=="-" and dbtable=="-"):
            xmlDocResults="<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n"
            xmlDocResults=xmlDocResults + "<RELATIONS name=\"ERROR\">\n\t<REL name=\"ERROR_1\">\n\t\t<ATT name=\"error_message\">Column or Type mismatch in XML and Schema file."
            xmlDocResults=xmlDocResults + "</ATT>\n\t</REL>\n</RELATIONS>\n"
        else:
            xmlDocResults = makeXMLReply(tableIdDesc,results,dbtable)
    else:
        # Specific Errors which are returned to the client
        xmlDocResults="<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n"
        xmlDocResults=xmlDocResults + "<RELATIONS name=\"ERROR\">\n\t<REL name=\"ERROR_1\">\n\t\t<ATT name=\"error_message\">Datatype mismatch in files."
        xmlDocResults=xmlDocResults + "</ATT>\n\t</REL>\n</RELATIONS>\n"
	
    return xmlDocResults
Exemplo n.º 11
0
class HtmlDom:
    
    def __init__(self, url):
        try:
            f = file(url)
            data = f.read()
            f.close()
        except IOError, e:
            try:
                result = fetch(url, agent=MOZILLA_AGENT)
                data = result['data']
            except:
                raise IOError, 'invalid URL'
        
        # create parser
        parser = tidy.TreeBuilder()
        parser.feed(data)
        xmlText = _etree.tostring(parser.close())
        
        #create the DOM
        reader = PyExpat.Reader()
        self.dom = reader.fromString(xmlText)
        
        self.nss = {u'html': XHTML_NAMESPACE}
        self.context = xml.xpath.Context.Context(self.dom, processorNss=self.nss)
Exemplo n.º 12
0
def msg_from_iterable(elems):
    """
    create full valid xml message for solr (adding data)
    """
    message = ElementTree.Element("add")
    map(message.append, map(_to_document, elems))
    return ElementTree.tostring(message)
Exemplo n.º 13
0
    def processor(self, content):
        if len(dstations) == 0:
            root = ElementTree.XML(content)
            stations = root.findall('.//station')
            for station in stations:
                if station.find('.//country').text == 'NL':
                    code = station.find('.//code').text
                    if code not in dstations:
                        dstations[code] = {'alias': [], 'defaultname': '', 'locationX': '', 'locationY': ''}
                    if station.find('.//alias').text == 'true':
                        dstations[code]['alias'].append(station.find('.//name').text)
                    else:
                        dstations[code]['defaultname'] = station.find('.//name').text
                        dstations[code]['locationX'] = station.find('.//lat').text
                        dstations[code]['locationY'] = station.find('.//long').text
                        dstations[code]['alias'].append(station.find('.//name').text)

        root = ElementTree.Element('stations')
        root.attrib['timestamp'] = str(int(time.time()))

        for station in dstations.values():
            for alias in station['alias']:
                sub = ElementTree.SubElement(root, 'station')
                sub.attrib['locationX'] = station['locationX']
                sub.attrib['locationY'] = station['locationY']
                sub.attrib['defaultname'] = station['defaultname']
                sub.text = alias

        web.header('Content-Type', 'application/xml')
        return ElementTree.tostring(root)
 def write(self, element):
     self._flush()
     indent(element, self.indentLevel)
     element.tail = element.tail[:-self.indentLevel * 2]
     self.out.write(self.indentLevel * "  " +
                    ElementTree.tostring(element, "utf-8"))
     self.lastElement = None
Exemplo n.º 15
0
 def write(self, element):
     self._flush()
     indent(element, self.indentLevel)
     if element.tail != None:
         element.tail = element.tail[:-self.indentLevel * 2]
     self.out.write(self.indentLevel * "  " + ElementTree.tostring(element, "utf-8"))
     self.lastElement = None
Exemplo n.º 16
0
def dict2xml(d):
    ''' convert python dictionary to xml.
    '''
    def makenode(tag, content):
        node = ET.Element(tag)
        if not content:
            pass  #empty element
        elif isinstance(content, basestring):
            node.text = content
        elif isinstance(content, list):
            node.tag = tag + 's'  #change node tag
            for element in content:
                node.append(makenode(tag, element))
        elif isinstance(content, dict):
            for key, value in content.items():
                node.append(makenode(key, value))
        else:
            node.text = repr(content)
        return node

    assert isinstance(d, dict) and len(d) == 1
    for key, value in d.items():
        node = makenode(key, value)
    botslib.indent_xml(node)
    return ET.tostring(node)
Exemplo n.º 17
0
def update_cix_file(mgr, path):
    log.info("convert `%s' to pretty CIX 2.0", path)
    cix = open(path, 'r').read()
    tree = tree_from_cix(cix) # converts to CIX 2.0
    tree = pretty_tree_from_tree(tree)
    new_cix = ET.tostring(tree)

    _run("p4 edit %s" % path)
    open(path, 'w').write(new_cix)
Exemplo n.º 18
0
def update_cix_file(mgr, path):
    log.info("convert `%s' to pretty CIX 2.0", path)
    cix = open(path, 'r').read()
    tree = tree_from_cix(cix)  # converts to CIX 2.0
    tree = pretty_tree_from_tree(tree)
    new_cix = ET.tostring(tree)

    _run("p4 edit %s" % path)
    open(path, 'w').write(new_cix)
Exemplo n.º 19
0
    def _make_printable(self, tutdir, up_to_root=2):
        endpath = tutdir
        tutdir = os.path.join(self.srcdirs[0], tutdir)
        import cElementTree as elementtree
        masterdoc = """<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
<?python import printable ?>
<html xmlns="http://www.w3.org/1999/xhtml" xmlns:py="http://purl.org/kid/ns#" py:extends="printable">
<head>
 <meta content="text/html; charset=UTF-8" http-equiv="content-type" />
 <link rel="stylesheet" type="text/css" href="../../default.css" py:attrs="href=root+'default.css'"/>
 <link type="text/css" rel="stylesheet" href="../../sh/SyntaxHighlighter.css" py:attrs="href=root+'sh/SyntaxHighlighter.css'"></link> 
 <title>TurboGears: 20 Minute Wiki Tutorial</title>
</head>
<body>
"""
        docs = os.listdir(tutdir)
        docs.sort()
        for doc in docs:
            if not doc.endswith(".html"):
                continue
            log.info("combining %s" % doc)
            tree = elementtree.parse(os.path.join(tutdir, doc))
            body = tree.find("{http://www.w3.org/1999/xhtml}body")
            map(body.remove, body.findall("{http://www.w3.org/1999/xhtml}script"))
            bodytext = elementtree.tostring(body)
            bodytext = bodytext.replace("</html:body>", "")
            bodytext = bodytext.replace('<html:body xmlns:html="http://www.w3.org/1999/xhtml">', "")
            masterdoc += bodytext
            
        masterdoc += """<script src="../../sh/shCore.js" py:attrs="src=root+'sh/shCore.js'"></script>
<script src="../../sh/shBrushPython.js" py:attrs="src=root+'sh/shBrushPython.js'"></script>
<script src="../../sh/shBrushXml.js" py:attrs="src=root+'sh/shBrushXml.js'"></script>
<script src="../../sh/shBrushJScript.js" py:attrs="src=root+'sh/shBrushJScript.js'"></script>
<script language="javascript">
	dp.SyntaxHighlighter.HighlightAll('code');
</script>
</body></html>"""
        masterdoc = masterdoc.replace("html:", "")
        template = kid.Template(source=masterdoc, root="../" * up_to_root)
        template.serializer = self.serializer
        
        destend = os.path.join(self.destdir, endpath)
        if not os.path.exists(destend):
            os.makedirs(destend)
        outfn = os.path.join(destend, "printable.html")
        print "combined output: %s" % outfn
        outfile = open(outfn, "w")
        masterdoc = template.serialize(encoding=self.encoding)
        masterdoc = masterdoc.replace("$${", "${")
        outfile.write(masterdoc)
        outfile.close()
        self.currentfiles.add(outfn)
        
Exemplo n.º 20
0
def getSettings():
    global debugProg
    global db_Settings
    global Soap_Server_Settings

    if not debugProg:
        debugProg={}
        debugProg["flag"]=0
        debugProg["file"]=""
        debugProg["print"]=0
        db_Settings={}
        Soap_Server_Settings={}
        
    ##    aXMLfile='XSM-configuration.xml'
        aXMLfile=CONF_FILE
        
        doc = ElementTree.parse(aXMLfile).getroot()
    
    # Go through the CONF and SET for the settings
        for node in doc.findall('CONF'):
            for nodeSet in node.findall('SET'): 
                if(nodeSet.get('name')=="debugProg" and nodeSet.get('value')=="true" and nodeSet.get('file')!=""):
                    debugProg["flag"]=1
                    debugProg["file"]=nodeSet.get('file')
                    if(nodeSet.get('print_out')=="true"):
                        debugProg["print"]=1
                    else:
                        debugProg["print"]=0
                    
                if(nodeSet.get('name')=="debugProg" and nodeSet.get('value')=="false"):
                    debugProg["flag"]=0
                    debugProg["file"]=""
                    debugProg["print"]=0
                    
                if(nodeSet.get('name')=="dbip" and nodeSet.get('value')!=""): db_Settings["dbip"]=nodeSet.get('value')
                if(nodeSet.get('name')=="dbnm" and nodeSet.get('value')!=""): db_Settings["dbnm"]=nodeSet.get('value')
                if(nodeSet.get('name')=="dbuser" and nodeSet.get('value')!=""): db_Settings["dbuser"]=nodeSet.get('value')
                if(nodeSet.get('name')=="dbpass" and nodeSet.get('value')!=""): db_Settings["dbpass"]=nodeSet.get('value')
                if(nodeSet.get('name')=="dbencod" and nodeSet.get('value')!=""): db_Settings["dbencod"]=nodeSet.get('value')
                if(nodeSet.get('name')=="dbunicod" and nodeSet.get('value')!=""): db_Settings["dbunicod"]=nodeSet.get('value')
                
                if(nodeSet.get('name')=="databaseSchema" and nodeSet.get('value')!=""): db_Settings["databaseSchema"]=nodeSet.get('value')
    
                if(nodeSet.get('name')=="Soap_Server_IP" and nodeSet.get('value')!=""): Soap_Server_Settings["Soap_Server_IP"]=nodeSet.get('value')
                if(nodeSet.get('name')=="Soap_Server_Port" and nodeSet.get('value')!=""): Soap_Server_Settings["Soap_Server_Port"]=nodeSet.get('value')
    
        if(debugProg["flag"]==1):
            debug_Info= "Getting settings from configuration file... [%s]\n" %(aXMLfile) 
            debug_Info= debug_Info + "The settings document is " + ElementTree.tostring(doc) + "\n"
            debug_Info= debug_Info +  "Settings accepted -\n\tSoap Server: " + str(Soap_Server_Settings) + "\n\t Database setting: " + str(db_Settings) + " \n\tDebug Flag: " + str(debugProg) + "\n"
            if(debugProg["print"]==1): print debug_Info
            writefile(debug_Info,debugProg["file"])
            debug_Info=""
Exemplo n.º 21
0
 def _write(self,node):
     ''' write normal XML messages (no envelope)'''
     xmltree = ET.ElementTree(self._node2xml(node))
     f = botslib.opendata(self.ta_info['filename'],"wb")
     if self.ta_info['indented']:
         indentstring = '\n'
     else:
         indentstring = ''
     
     #xml prolog: always use.*********************************
     #syntax parameter controls if stand-alone is used within prolog.
     #in ET 1.3.0: if standalone is to be used: should surpress ET-generated prolog - explicit parameter
     #in ET 1.2.6: always generates prolog if encoding != utf-8/ascii. SO: can not use stadnalone for encoding !=utf-8,ascii
     if ET.VERSION not in ['1.2.6','1.0.6'] or self.ta_info['charset'] in ['us-ascii','utf-8']:
         if self.ta_info['standalone']:
             standalonestring = 'standalone="%s" '%(self.ta_info['standalone'])
         else:
             standalonestring = ''
         PI = ET.ProcessingInstruction('xml', 'version="%s" encoding="%s" %s'%(self.ta_info['version'],self.ta_info['charset'], standalonestring))        
         f.write(ET.tostring(PI) + indentstring) #do not use encoding here. gives double xml prolog; possibly because ET.ElementTree.write i used again by write()
         
     #doctype /DTD **************************************
     if self.ta_info['DOCTYPE']:
         f.write('<!DOCTYPE %s>'%(self.ta_info['DOCTYPE']) + indentstring)
     
     #processing instructions (other than prolog) ************
     if self.ta_info['processing_instructions']:
         for pi in self.ta_info['processing_instructions']:
             PI = ET.ProcessingInstruction(pi[0], pi[1])
             f.write(ET.tostring(PI) + indentstring) #do not use encoding here. gives double xml prolog; possibly because ET.ElementTree.write i used again by write()
     
     #indent the xml elements
     if self.ta_info['indented']:
         root = xmltree.getroot()
         self.botsindent(root)
     
     if ET.VERSION <= '1.2.6':
         xmltree.write(f,encoding=self.ta_info['charset'])
     else:
         xmltree.write(f,encoding=self.ta_info['charset'],xml_declaration=False)
Exemplo n.º 22
0
def goJoin(doc):
    #global myDoc
    myDoc=''
    global tableDB
    tableDB=''

    global resultsIdCounter
    global fieldsDB
    fieldsDB=''
    global rowWhere
    rowWhere=''
    global tableFKeys
    tableFKeys=[]
    global tableFieldsG
    tableFieldsG=[]

    global fkTable
    fkTable=[]
    

    resultsIdCounter=0


    if(debugProg["flag"]==1):
        debug_Info= "The XML document is " + ElementTree.tostring(doc)+"\n"
        if(debugProg["print"]==1): print debug_Info
        writefile(debug_Info,debugProg["file"])
        debug_Info=""

    myDoc="<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n"
    myDoc=myDoc+"<RELATIONS command=\"JOIN_RESULTS\">\n"

    myDoc=myDoc+"<REL name=\'RESULTS_ID\' value=\'" + str(resultsIdCounter) + "\'>\n"    

    finderJoin(doc,'0','none')
    resultsIdCounter+=1
    #if (replacedOp!="none"): makeTheSelect(dbTable,rowWhere)
    #print " " + rowWhere + " =-=-=- " +fieldsDB + " *** " + str(tableDB) + "^^^^^^^^" + str(tableFKeys) + str(tableFieldsG)

    for n in range(len(tableFKeys)):
        rowWhere=valueConcat(rowWhere," AND ", tableFKeys[n])
        print rowWhere +  tableFKeys[n]
    myDoc=makeTheJoin(tableDB,fieldsDB,rowWhere,myDoc)
    
    myDoc=myDoc+"</REL>\n"
    myDoc=myDoc+"<REL name=\"TOTAL_RESULTS\">" + str(resultsIdCounter)
    myDoc=myDoc+"</REL>\n"

    myDoc=myDoc+"</RELATIONS>\n"

    return myDoc        
Exemplo n.º 23
0
    def _send_update(self, *args, **kwargs):
        """Send an update request to Solr.

        Solr commits are made only on deletion.

        Takes a single argument: the AMQP message that was received.
        """
        try:
            log.info("Processing update request")
            msg = args[0]
            updates = json.loads(msg.body)
            solr = SolrConnection(self.solr_uri)
            if updates["type"] == "updated":
                add = ET.Element("add")
                for update in updates["data"]:
                    doc = ET.SubElement(add, "doc")
                    for fields in update:
                        # There should only be one pair
                        # FIXME: move to a dictionary structure
                        for k, v in fields.items():
                            SolrUpdater.xml_field(doc, solr.escapeKey(k), solr.escapeVal(v))
                log.debug("Sending update to Solr: " + ET.tostring(add))
                solr.doUpdateXML(ET.tostring(add))
            elif updates["type"] == "deleted":
                for id in updates["data"]:
                    log.debug("Deleting document with id '%s'" % id)
                    solr.delete(id)
                solr.commit()
            elif updates["type"] == "deleted_db":
                db_name = updates["data"]
                log.info("Deleting indexes for database '%s'" % db_name)
                solr.deleteByQuery("_db:%s" % db_name)
                solr.commit()
            else:
                log.warning("Unrecognized update type: '%s'" % updates["type"])
        except Exception:
            log.exception("Unexpected exception")
Exemplo n.º 24
0
def goSelect(doc):
    #global myDoc
    myDoc=''

    if(debugProg["flag"]==1):
        debug_Info= "The XML document is " + ElementTree.tostring(doc)+"\n"
        if(debugProg["print"]==1): print debug_Info
        writefile(debug_Info,debugProg["file"])
        debug_Info=""

    myDoc="<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n"
    myDoc=myDoc+"<RELATIONS command=\"SELECT_RESULTS\">\n"
    myDoc = finderSelect(doc,myDoc)
    myDoc=myDoc+"</RELATIONS>\n"

    return myDoc        
Exemplo n.º 25
0
    def view(self, id, format='html'):
        try:
            vote = self._get_vote(id, format, check_owner=False)
            
            if format == 'json':
                response.content_type = "text/javascript"
                return vote.toJSON()
            elif format == 'xml':
                response.content_type = "text/xml"
                xml = vote.toXML()
                return etree.tostring(xml, encoding='UTF-8')

            c.vote = vote
            return render("vote.mako")
        except StatusException, se:
            return se.message
Exemplo n.º 26
0
    def __PrettyPrintXml(self, doc, level=0):
        """Return a pretty-printed version of the XML document.

    Args:
      doc: str XML document.
      level: int Level of prettiness, defaults to 0. If -1, remove prettiness.

    Returns:
      str Pretty-printed version of the XML document.
    """
        # Make sure we have a valid doc to work with.
        if Utils.IsHtml(doc):
            return doc

        try:
            if self.__xml_parser == PYXML:
                dom = minidom.parseString(doc)
                pretty_doc = dom.toprettyxml(indent=' ', encoding='UTF-8')
            elif self.__xml_parser == ETREE:
                tree = etree.fromstring(doc)
                self.__Indent(tree)
                pretty_doc = etree.tostring(tree, 'UTF-8')
        except (ExpatError, SyntaxError):
            # If there was a problem with loading XML message into a DOM, return
            # original XML message.
            return doc

        # Adjust prettiness of data values in the XML document.
        #
        # Before:  <operations>
        #            0
        #          </operations>
        #
        # After:   <operations>0</operations>
        pattern = re.compile('\n\s+\n')
        pretty_doc = pattern.sub('\n', pretty_doc)
        groups = re.findall('>(\n\s+(.*?)\n\s+)</', pretty_doc, re.M)
        for old, new in groups:
            if old and new and (new.find('<') > -1 or new.find('>') > -1):
                continue
            pretty_doc = pretty_doc.replace(old, new)

        if level == -1:
            pattern = re.compile('>\s+<')
            pretty_doc = pattern.sub('><', pretty_doc)
        return pretty_doc.strip('\n')
  def __PrettyPrintXml(self, doc, level=0):
    """Return a pretty-printed version of the XML document.

    Args:
      doc: str XML document.
      level: int Level of prettiness, defaults to 0. If -1, remove prettiness.

    Returns:
      str Pretty-printed version of the XML document.
    """
    # Make sure we have a valid doc to work with.
    if Utils.IsHtml(doc):
      return doc

    try:
      if self.__xml_parser == PYXML:
        dom = minidom.parseString(doc)
        pretty_doc = dom.toprettyxml(indent=' ', encoding='UTF-8')
      elif self.__xml_parser == ETREE:
        tree = etree.fromstring(doc)
        self.__Indent(tree)
        pretty_doc = etree.tostring(tree, 'UTF-8')
    except (ExpatError, SyntaxError):
      # If there was a problem with loading XML message into a DOM, return
      # original XML message.
      return doc

    # Adjust prettiness of data values in the XML document.
    #
    # Before:  <operations>
    #            0
    #          </operations>
    #
    # After:   <operations>0</operations>
    pattern = re.compile('\n\s+\n')
    pretty_doc = pattern.sub('\n', pretty_doc)
    groups = re.findall('>(\n\s+(.*?)\n\s+)</', pretty_doc, re.M)
    for old, new in groups:
      if old and new and (new.find('<') > -1 or new.find('>') > -1):
        continue
      pretty_doc = pretty_doc.replace(old, new)

    if level == -1:
      pattern = re.compile('>\s+<')
      pretty_doc = pattern.sub('><', pretty_doc)
    return pretty_doc.strip('\n')
Exemplo n.º 28
0
def fstatus(message, format='html', status='success', http_code=200):
    if http_code not in [200, 302] and status == 'success':
        status = 'error'
    response.status_int = http_code
    if format == 'xml' or format == 'atom' or format == 'rss':
        xml = etree.Element('status')
        etree.SubElement(xml, 'status').text = status
        etree.SubElement(xml, 'message').text = message
        return etree.tostring(xml, encoding='UTF-8')
    elif format == 'json':
        return json.dumps({
            'status': status,
            'message': message
        })
    else:
        c.status = status
        c.message = message
        return render('status.mako')
Exemplo n.º 29
0
def dict2xml(d):
    ''' convert python dictionary to xml.
    '''
    def makenode(tag,content):
        node = ET.Element(tag)
        if not content:
            pass    #empty element
        elif isinstance(content, basestring):
            node.text = content
        elif isinstance(content, list):
            node.tag = tag + 's'    #change node tag
            for element in content:
                node.append(makenode(tag, element))
        elif isinstance(content, dict):
            for key,value in content.items():
                node.append(makenode(key, value))
        else: 
            node.text = repr(content)
        return node
    assert isinstance(d, dict) and len(d) == 1
    for key,value in d.items():
        node = makenode(key,value)
    botslib.indent_xml(node)
    return ET.tostring(node)
Exemplo n.º 30
0
def _testOneInputFile(self, fpath, tags=None):
    _debug = False  # Set to true to dump status info for each test run.

    infile = os.path.join(gInputsDir, fpath) # input
    outfile = os.path.join(gOutputsDir, fpath+'.cix') # expected output
    tmpfile = os.path.join(gTmpDir, fpath+'.cix') # actual output
    if not os.path.exists(os.path.dirname(tmpfile)):
        os.makedirs(os.path.dirname(tmpfile))
    errfile = os.path.join(gOutputsDir, fpath+'.error')  # expected error
    # An options file is a set of kwargs for the buf.scan()
    # method call. One key-value pair per-line like this:
    #   key=value
    # Whitespace is stripped off the value.
    optsfile = os.path.join(gInputsDir, fpath+'.options') # input options
    
    if _debug:
        print()
        print("*"*50, "codeintel '%s'" % fpath)

    # Set standard options:
    opts = {"mtime": "42"}

    # Determine input options to use, if any.
    #XXX Not used. Drop it.
    if os.path.exists(optsfile):
        for line in open(optsfile, 'r').read().splitlines(0):
            name, value = line.split('=', 1)
            value = value.strip()
            try: # allow value to be a type other than string
                value = eval(value)
            except Exception:
                pass
            opts[name] = value
        if _debug:
            print("*"*50, "options")
            pprint.pprint(opts)

    # Scan the file, capturing stdout and stderr and any possible
    # error.
    # - To allow testing from different dirs (resulting in changing
    #   path strings, we normalize the <file path="..."> value and any
    #   <scope ilk="blob" src="..."> attributes).
    oldStdout = sys.stdout
    oldStderr = sys.stderr
    sys.stdout = StringIO()
    sys.stderr = StringIO()
    try:
        try:
            lang = None
            if tags and "python3" in tags:
                lang = "Python3"
            buf = self.mgr.buf_from_path(infile, lang=lang)
            buf.scan(**opts)
            tree = buf.tree

            # Normalize paths.
            relnorm_infile = infile[len(dirname(gInputsDir))+1:]
            absnorm_infile = infile
            relnorm_infile = relnorm_infile.replace('\\', '/')
            absnorm_infile = absnorm_infile.replace('\\', '/')
            for file_elem in tree:
                file_elem.set("path", relnorm_infile)
                for blob_elem in file_elem:
                    if blob_elem.get("ilk") != "blob": continue
                    norm_src = normpath(blob_elem.get("src"))
                    norm_src = norm_src.replace('\\', '/')
                    if norm_src in (relnorm_infile, absnorm_infile):
                        blob_elem.set("src", relnorm_infile)

            tree = pretty_tree_from_tree(tree)
            # Due to the dynamic nature of the ciler errors (which often
            # includes the source code line numbers), it's difficult to check
            # that the errors are identical, so we work around this by just
            # taking the first 30 characters of the error.
            cile_error = tree[0].get("error")
            if cile_error and fpath.endswith(".js"):
                tree[0].set("error", len(cile_error) < 30 and cile_error or (cile_error[:30] + "..."))
            cix = ET.tostring(tree)

        except CodeIntelError as ex:
            error = traceback.format_exc()
        else:
            error = None
            if isinstance(cix, six.text_type):
                with io.open(tmpfile, mode="wt", encoding="utf-8") as fout:
                    fout.write(cix)
            else:
                with open(tmpfile, mode="wb") as fout:
                    fout.write(cix)
    finally:
        stdout = sys.stdout.getvalue()
        stderr = sys.stderr.getvalue()
        sys.stdout = oldStdout
        sys.stderr = oldStderr
    if _debug:
        print("*"*50, "stdout")
        print(stdout)
        print("*"*50, "stderr")
        print(stderr)
        print("*"*50, "error")
        print(str(error))
        print("*" * 50)

    generateMissing = False
    if not os.path.exists(outfile) and generateMissing:
        with io.open(outfile, mode='wt', encoding='utf-8') as fout:
            with io.open(tmpfile, mode='rt', encoding='utf-8') as ftmp:
                fout.write(ftmp.read())

    # Verify that the results are as expected.
    if os.path.exists(outfile) and error:
        self.fail("scanning '%s' raised an error but success was "
                  "expected:\n%s" % (_encode_for_stdout(fpath), indent(error)))
    elif os.path.exists(outfile):
        # Convert the <file path="..."/> to the native directory separator.
        def to_native_sep(match):
            path = match.group(2).replace("\\", os.sep).replace("/", os.sep)
            return match.group(1)+path+match.group(3)
        path_pat = re.compile(r'(<file .*?path=")(.*?)(".*?>)', re.S)

        # Note that we don't really care about line endings here, so we read
        # both files in universal newlines mode (i.e. translate to \n)
        # and normalize '&#10;', '&#13;' and '&apos;'
        with io.open(outfile, mode='rt', encoding='utf-8') as fout:
            expected = path_pat.sub(to_native_sep, fout.read())
            expected = expected.replace('&#xA;', '&#10;').replace('&#xD;', '&#13;').replace('&apos;', '\'')
        with io.open(tmpfile, mode='rt', encoding='utf-8') as ftmp:
            actual = path_pat.sub(to_native_sep, ftmp.read())
            actual = actual.replace('&#xA;', '&#10;').replace('&#xD;', '&#13;').replace('&apos;', '\'')
        
        if expected != actual:
            do_fail = True
            # Useful temporary thing while XML output format is changing.
            #if os.stat("../support/xmldiff.py"):
            #    rc = os.system('python ../support/xmldiff.py "%s" "%s"' % (outfile, tmpfile))
            #    if rc == 0:
            #        do_fail = False
            if do_fail:
                diff = list(difflib.ndiff(expected.splitlines(1),
                                          actual.splitlines(1)))
                diff = _diffContext(diff, 2)
                if diff:
                    error_str = "%r != %r:\n --- %s\n +++ %s\n%s" \
                                % (outfile, tmpfile, outfile, tmpfile,
                                   ''.join(diff))
                    if gMaxDiffOutput > 0 and gMaxNumLines > 0:
                        if len(error_str) > gMaxDiffOutput:
                            error_lines = error_str.split("\n")
                            if len(error_lines) > gMaxNumLines:
                                error_lines = error_lines[:gMaxNumLines] + ["..."]
                            if gMaxLineLength > 0:
                                error_str = "\n".join([len(x) > gMaxLineLength and x[:gMaxLineLength] or x
                                                   for x in error_lines])
                            else:
                                error_str = "\n".join(error_lines)
                    self.fail(_encode_for_stdout(error_str))
    elif os.path.exists(errfile):
        # There is no reference output file. This means that processing
        # this file is expected to fail.
        expectedError = open(errfile, 'r').read()
        actualError = str(error)
        self.failUnlessEqual(actualError.strip(), expectedError.strip())
    else:
        self.fail("No reference output file or error file for '%s'." % infile)

    # Ensure next test file gets a clean codeintel.
    toDelete = []
    for modname in sys.modules:
        if modname == "codeintel" or modname.startswith("codeintel."):
            toDelete.append(modname)
    for modname in toDelete:
        del sys.modules[modname]
Exemplo n.º 31
0
def _testOneInputFile(self, fpath, tags=None):
    _debug = False  # Set to true to dump status info for each test run.

    infile = os.path.join(gInputsDir, fpath)  # input
    outfile = os.path.join(gOutputsDir, fpath + '.cix')  # expected output
    tmpfile = os.path.join(gTmpDir, fpath + '.cix')  # actual output
    if not os.path.exists(os.path.dirname(tmpfile)):
        os.makedirs(os.path.dirname(tmpfile))
    errfile = os.path.join(gOutputsDir, fpath + '.error')  # expected error
    # An options file is a set of kwargs for the buf.scan()
    # method call. One key-value pair per-line like this:
    #   key=value
    # Whitespace is stripped off the value.
    optsfile = os.path.join(gInputsDir, fpath + '.options')  # input options

    if _debug:
        print
        print "*" * 50, "codeintel '%s'" % fpath

    # Set standard options:
    opts = {"mtime": "42"}

    # Determine input options to use, if any.
    # XXX Not used. Drop it.
    if os.path.exists(optsfile):
        for line in open(optsfile, 'r').read().splitlines(0):
            name, value = line.split('=', 1)
            value = value.strip()
            try:  # allow value to be a type other than string
                value = eval(value)
            except Exception:
                pass
            opts[name] = value
        if _debug:
            print "*" * 50, "options"
            pprint.pprint(opts)

    # Scan the file, capturing stdout and stderr and any possible
    # error.
    # - To allow testing from different dirs (resulting in changing
    #   path strings, we normalize the <file path="..."> value and any
    #   <scope ilk="blob" src="..."> attributes).
    oldStdout = sys.stdout
    oldStderr = sys.stderr
    sys.stdout = StringIO.StringIO()
    sys.stderr = StringIO.StringIO()
    try:
        try:
            lang = None
            if tags and "python3" in tags:
                lang = "Python3"
            buf = self.mgr.buf_from_path(infile, lang=lang)
            buf.scan(**opts)
            tree = buf.tree

            # Normalize paths.
            relnorm_infile = infile[len(dirname(gInputsDir)) + 1:]
            absnorm_infile = infile
            relnorm_infile = relnorm_infile.replace('\\', '/')
            absnorm_infile = absnorm_infile.replace('\\', '/')
            for file_elem in tree:
                file_elem.set("path", relnorm_infile)
                for blob_elem in file_elem:
                    if blob_elem.get("ilk") != "blob":
                        continue
                    norm_src = normpath(blob_elem.get("src"))
                    norm_src = norm_src.replace('\\', '/')
                    if norm_src in (relnorm_infile, absnorm_infile):
                        blob_elem.set("src", relnorm_infile)

            tree = pretty_tree_from_tree(tree)
            # Due to the dynamic nature of the ciler errors (which often
            # includes the source code line numbers), it's difficult to check
            # that the errors are identical, so we work around this by just
            # taking the first 30 characters of the error.
            cile_error = tree[0].get("error")
            if cile_error and fpath.endswith(".js"):
                tree[0].set(
                    "error",
                    len(cile_error) < 30 and cile_error
                    or (cile_error[:30] + "..."))
            cix = ET.tostring(tree)

        except CodeIntelError, ex:
            error = traceback.format_exc()
        else:
Exemplo n.º 32
0
def _testOneInputFile(self, fpath, tags=None):
    _debug = False  # Set to true to dump status info for each test run.

    infile = os.path.join(gInputsDir, fpath)  # input
    outfile = os.path.join(gOutputsDir, fpath+'.cix')  # expected output
    tmpfile = os.path.join(gTmpDir, fpath+'.cix')  # actual output
    if not os.path.exists(os.path.dirname(tmpfile)):
        os.makedirs(os.path.dirname(tmpfile))
    errfile = os.path.join(gOutputsDir, fpath+'.error')  # expected error
    # An options file is a set of kwargs for the buf.scan()
    # method call. One key-value pair per-line like this:
    #   key=value
    # Whitespace is stripped off the value.
    optsfile = os.path.join(gInputsDir, fpath+'.options')  # input options

    if _debug:
        print
        print "*"*50, "codeintel '%s'" % fpath

    # Set standard options:
    opts = {"mtime": "42"}

    # Determine input options to use, if any.
    # XXX Not used. Drop it.
    if os.path.exists(optsfile):
        for line in open(optsfile, 'r').read().splitlines(0):
            name, value = line.split('=', 1)
            value = value.strip()
            try:  # allow value to be a type other than string
                value = eval(value)
            except Exception:
                pass
            opts[name] = value
        if _debug:
            print "*"*50, "options"
            pprint.pprint(opts)

    # Scan the file, capturing stdout and stderr and any possible
    # error.
    # - To allow testing from different dirs (resulting in changing
    #   path strings, we normalize the <file path="..."> value and any
    #   <scope ilk="blob" src="..."> attributes).
    oldStdout = sys.stdout
    oldStderr = sys.stderr
    sys.stdout = StringIO.StringIO()
    sys.stderr = StringIO.StringIO()
    try:
        try:
            lang = None
            if tags and "python3" in tags:
                lang = "Python3"
            buf = self.mgr.buf_from_path(infile, lang=lang)
            buf.scan(**opts)
            tree = buf.tree

            # Normalize paths.
            relnorm_infile = infile[len(dirname(gInputsDir))+1:]
            absnorm_infile = infile
            relnorm_infile = relnorm_infile.replace('\\', '/')
            absnorm_infile = absnorm_infile.replace('\\', '/')
            for file_elem in tree:
                file_elem.set("path", relnorm_infile)
                for blob_elem in file_elem:
                    if blob_elem.get("ilk") != "blob":
                        continue
                    norm_src = normpath(blob_elem.get("src"))
                    norm_src = norm_src.replace('\\', '/')
                    if norm_src in (relnorm_infile, absnorm_infile):
                        blob_elem.set("src", relnorm_infile)

            tree = pretty_tree_from_tree(tree)
            # Due to the dynamic nature of the ciler errors (which often
            # includes the source code line numbers), it's difficult to check
            # that the errors are identical, so we work around this by just
            # taking the first 30 characters of the error.
            cile_error = tree[0].get("error")
            if cile_error and fpath.endswith(".js"):
                tree[0].set("error", len(
                    cile_error) < 30 and cile_error or (cile_error[:30] + "..."))
            cix = ET.tostring(tree)

        except CodeIntelError, ex:
            error = traceback.format_exc()
        else:
Exemplo n.º 33
0
def _testOneInputFile(self, fpath, tags=None):
    _debug = False  # Set to true to dump status info for each test run.

    infile = os.path.join(gInputsDir, fpath)  # input
    outfile = os.path.join(gOutputsDir, fpath + '.cix')  # expected output
    tmpfile = os.path.join(gTmpDir, fpath + '.cix')  # actual output
    if not os.path.exists(os.path.dirname(tmpfile)):
        os.makedirs(os.path.dirname(tmpfile))
    errfile = os.path.join(gOutputsDir, fpath + '.error')  # expected error
    # An options file is a set of kwargs for the buf.scan()
    # method call. One key-value pair per-line like this:
    #   key=value
    # Whitespace is stripped off the value.
    optsfile = os.path.join(gInputsDir, fpath + '.options')  # input options

    if _debug:
        print()
        print("*" * 50, "codeintel '%s'" % fpath)

    # Set standard options:
    opts = {"mtime": "42"}

    # Determine input options to use, if any.
    #XXX Not used. Drop it.
    if os.path.exists(optsfile):
        for line in open(optsfile, 'r').read().splitlines(0):
            name, value = line.split('=', 1)
            value = value.strip()
            try:  # allow value to be a type other than string
                value = eval(value)
            except Exception:
                pass
            opts[name] = value
        if _debug:
            print("*" * 50, "options")
            pprint.pprint(opts)

    # Scan the file, capturing stdout and stderr and any possible
    # error.
    # - To allow testing from different dirs (resulting in changing
    #   path strings, we normalize the <file path="..."> value and any
    #   <scope ilk="blob" src="..."> attributes).
    oldStdout = sys.stdout
    oldStderr = sys.stderr
    sys.stdout = StringIO()
    sys.stderr = StringIO()
    try:
        try:
            lang = None
            if tags and "python3" in tags:
                lang = "Python3"
            buf = self.mgr.buf_from_path(infile, lang=lang)
            buf.scan(**opts)
            tree = buf.tree

            # Normalize paths.
            relnorm_infile = infile[len(dirname(gInputsDir)) + 1:]
            absnorm_infile = infile
            relnorm_infile = relnorm_infile.replace('\\', '/')
            absnorm_infile = absnorm_infile.replace('\\', '/')
            for file_elem in tree:
                file_elem.set("path", relnorm_infile)
                for blob_elem in file_elem:
                    if blob_elem.get("ilk") != "blob": continue
                    norm_src = normpath(blob_elem.get("src"))
                    norm_src = norm_src.replace('\\', '/')
                    if norm_src in (relnorm_infile, absnorm_infile):
                        blob_elem.set("src", relnorm_infile)

            tree = pretty_tree_from_tree(tree)
            # Due to the dynamic nature of the ciler errors (which often
            # includes the source code line numbers), it's difficult to check
            # that the errors are identical, so we work around this by just
            # taking the first 30 characters of the error.
            cile_error = tree[0].get("error")
            if cile_error and fpath.endswith(".js"):
                tree[0].set(
                    "error",
                    len(cile_error) < 30 and cile_error
                    or (cile_error[:30] + "..."))
            cix = ET.tostring(tree)

        except CodeIntelError as ex:
            error = traceback.format_exc()
        else:
            error = None
            if isinstance(cix, six.text_type):
                with io.open(tmpfile, mode="wt", encoding="utf-8") as fout:
                    fout.write(cix)
            else:
                with open(tmpfile, mode="wb") as fout:
                    fout.write(cix)
    finally:
        stdout = sys.stdout.getvalue()
        stderr = sys.stderr.getvalue()
        sys.stdout = oldStdout
        sys.stderr = oldStderr
    if _debug:
        print("*" * 50, "stdout")
        print(stdout)
        print("*" * 50, "stderr")
        print(stderr)
        print("*" * 50, "error")
        print(str(error))
        print("*" * 50)

    generateMissing = False
    if not os.path.exists(outfile) and generateMissing:
        with io.open(outfile, mode='wt', encoding='utf-8') as fout:
            with io.open(tmpfile, mode='rt', encoding='utf-8') as ftmp:
                fout.write(ftmp.read())

    # Verify that the results are as expected.
    if os.path.exists(outfile) and error:
        self.fail("scanning '%s' raised an error but success was "
                  "expected:\n%s" % (_encode_for_stdout(fpath), indent(error)))
    elif os.path.exists(outfile):
        # Convert the <file path="..."/> to the native directory separator.
        def to_native_sep(match):
            path = match.group(2).replace("\\", os.sep).replace("/", os.sep)
            return match.group(1) + path + match.group(3)

        path_pat = re.compile(r'(<file .*?path=")(.*?)(".*?>)', re.S)

        # Note that we don't really care about line endings here, so we read
        # both files in universal newlines mode (i.e. translate to \n)
        # and normalize '&#10;', '&#13;' and '&apos;'
        with io.open(outfile, mode='rt', encoding='utf-8') as fout:
            expected = path_pat.sub(to_native_sep, fout.read())
            expected = expected.replace('&#xA;', '&#10;').replace(
                '&#xD;', '&#13;').replace('&apos;', '\'')
        with io.open(tmpfile, mode='rt', encoding='utf-8') as ftmp:
            actual = path_pat.sub(to_native_sep, ftmp.read())
            actual = actual.replace('&#xA;', '&#10;').replace(
                '&#xD;', '&#13;').replace('&apos;', '\'')

        if expected != actual:
            do_fail = True
            # Useful temporary thing while XML output format is changing.
            #if os.stat("../support/xmldiff.py"):
            #    rc = os.system('python ../support/xmldiff.py "%s" "%s"' % (outfile, tmpfile))
            #    if rc == 0:
            #        do_fail = False
            if do_fail:
                diff = list(
                    difflib.ndiff(expected.splitlines(1),
                                  actual.splitlines(1)))
                diff = _diffContext(diff, 2)
                if diff:
                    error_str = "%r != %r:\n --- %s\n +++ %s\n%s" \
                                % (outfile, tmpfile, outfile, tmpfile,
                                   ''.join(diff))
                    if gMaxDiffOutput > 0 and gMaxNumLines > 0:
                        if len(error_str) > gMaxDiffOutput:
                            error_lines = error_str.split("\n")
                            if len(error_lines) > gMaxNumLines:
                                error_lines = error_lines[:gMaxNumLines] + [
                                    "..."
                                ]
                            if gMaxLineLength > 0:
                                error_str = "\n".join([
                                    len(x) > gMaxLineLength
                                    and x[:gMaxLineLength] or x
                                    for x in error_lines
                                ])
                            else:
                                error_str = "\n".join(error_lines)
                    self.fail(_encode_for_stdout(error_str))
    elif os.path.exists(errfile):
        # There is no reference output file. This means that processing
        # this file is expected to fail.
        expectedError = open(errfile, 'r').read()
        actualError = str(error)
        self.failUnlessEqual(actualError.strip(), expectedError.strip())
    else:
        self.fail("No reference output file or error file for '%s'." % infile)

    # Ensure next test file gets a clean codeintel.
    toDelete = []
    for modname in sys.modules:
        if modname == "codeintel" or modname.startswith("codeintel."):
            toDelete.append(modname)
    for modname in toDelete:
        del sys.modules[modname]
Exemplo n.º 34
0
def convert_data_from_dict_to_xml(data):
    '''
    Convert data fields from dictionary to XML when perform studio submission on Basic Template tab.
    The output will be updated to value of Advanced Editor.

        1. problem description
        2. Image url
        3. variables (name, min_value, max_value, type, decimal_places)
        4. answer_template_string

    :param data -- a dictionary of fields supported for raw editor
    :return:
    <problem>
        <description>Given a = [a] and b = [b]. Calculate the [sum], [difference] of a and b. </description>
        <images>
            <image_url link="http://example.com/image1">Image 1</image_url>
            <image_url link="http://example.com/image2">Image 2</image_url>
        </images>
        <variables>
            <variable name="a" min="1" max="200" type="integer"/>
            <variable name="b" min="1.0" max="20.5" type="float" decimal_places="2"/>
        </variables>
        <answer_templates>
            <answer sum = "[a] + [b]" difference = "[a] - [b]">Teacher's answer</answer>
        </answer_templates>
    </problem>
    '''
    print("## CALLING FUNCTION convert_data_from_dict_to_xml() ##")
    # print("Input data type: {}".format(type(data)))
    print("Input data: {}".format(data))

    xml_string = ''
    # init the root element: problem
    problem = ET.Element('problem')

    # convert question template
    field_question_template = data['question_template']
    description = ET.SubElement(problem, 'description')
    description.text = field_question_template

    # Convert answer template tring to dictionary,
    # then build xml string for raw edit fields
    field_answer_template = data['answer_template']
    # Check for empty input
    if not field_answer_template:
        raise JsonHandlerError(400, "Answer template must not be empty")

    # Handle answer template
    # Parse and convert answer template string to dictionary first
    answer_template_dict = {}
    answer_template_list = field_answer_template.split('\n')

    for answer in answer_template_list:
        # only process if not empty, ignore empty answer template
        if answer:
            # answer template must contains '=' character
            if (answer.find('=') != -1):  # found '=' at lowest index of string
                answer_attrib_list = answer.split('=')
                # print "answer_attrib_list = "
                # print(answer_attrib_list)

                answer_attrib_key = answer_attrib_list[0]
                answer_attrib_value = answer_attrib_list[1]
                # print "answer_attrib_key = "
                # print(answer_attrib_key)
                # print "answer_attrib_value = "
                # print(answer_attrib_value)

                # Remove unexpected white spaces
                answer_attrib_key = answer_attrib_key.lstrip(
                )  # all leading whitespaces are removed from the string.
                answer_attrib_key = answer_attrib_key.rstrip(
                )  # all ending whitespaces are removed from the string.
                answer_attrib_value = answer_attrib_value.lstrip(
                )  # all leading whitespaces are removed from the string.
                answer_attrib_value = answer_attrib_value.rstrip(
                )  # all ending whitespaces are removed from the string.

                # print "REMOVED SPACES, answer_attrib_key = "
                # print(answer_attrib_key)
                # print "REMOVED SPACES,answer_attrib_value = "
                # print(answer_attrib_value)

                # Add answer attribute to dict
                answer_template_dict[answer_attrib_key] = answer_attrib_value
            else:
                raise JsonHandlerError(
                    400,
                    "Unsupported answer format. Answer template must contains '=' character: {}"
                    .format(answer))
                # print("Resulted answer_template_dict: {}".format(answer_template_dict))
    # Answer template xml elements
    answer_templates = ET.SubElement(problem, 'answer_templates')
    answer = ET.SubElement(answer_templates, 'answer')
    # Add the converted dict data to xml elements
    for attrib_key, attrib_value in answer_template_dict.iteritems():
        answer.set(attrib_key, attrib_value)
        answer.text = "Teacher's answer"

    # Convert numeric variables
    field_variables = data['variables']
    # xml elements
    variables_elem = ET.SubElement(problem, 'variables')
    for var_name, attributes in field_variables.iteritems():
        var_name = ET.SubElement(variables_elem, 'variable')
        for attribute, value in attributes.iteritems():
            # Set attribute
            var_name.set(attribute, value)

    # Convert string variables dictionary to xml string
    field_string_variables = data['string_variables']
    # xml elements
    string_variables_elem = string_variables_dict_to_xml_element(
        field_string_variables)
    # Adds the element subelement to the end of this elements internal list of subelements.
    problem.append(string_variables_elem)

    # convert image
    field_image_url = data['image_url']
    # xml elements
    images = ET.SubElement(problem, 'images')
    image_url = ET.SubElement(images, 'image_url')
    # Set attribute
    image_url.set('link', field_image_url)

    # print "before indent, Problem elem dum = ", ET.dump(problem)
    indented_problem = indent(problem)
    # print "after indent ,Problem elem dum = ", ET.dump(indented_problem)

    xml_string = ET.tostring(indented_problem)
    # print "Output xml string = ", xml_string
    print("## End FUNCTION convert_data_from_dict_to_xml() ##")

    return xml_string
Exemplo n.º 35
0
def test_string_variables_dict_to_xml_element(string_variables_dict):
    elem = string_variables_dict_to_xml_element(string_variables_dict)
    xml_string = ET.tostring(indent(elem))
    print xml_string
Exemplo n.º 36
0
    def processor(self, content):
        # XSLT was invented for these kind of transformations
	# TODO: RitNummer
        
        dtreinen = []
        root = ElementTree.XML(content)
        treinen = root.findall('.//VertrekkendeTrein')
        for trein in treinen:
            result = {}
            result['vertrektijd'] = trein.find('.//VertrekTijd').text
            spoor = trein.find('.//VertrekSpoor')
            result['spoor'] = spoor.text
            result['spoorwijziging'] = spoor.attrib['wijziging']
            result['station'] = trein.find('.//EindBestemming')

            vehicle = trein.find('.//TreinSoort')
            if vehicle is not None:
                result['type'] = vehicle.text

            vertraging = trein.find('.//VertrekVertragingTekst')
            if vertraging:
                result['vertraging'] = vertraging.text
            
	    ritnummer = trein.find('.//RitNummer')
            if ritnummer:
                result['ritnummer'] = ritnummer.text

            dtreinen.append(result)

        root = ElementTree.Element('liveboard')
        root.attrib['version'] = "1.0"
        root.attrib['timestamp'] = str(int(time.time()))

        self.renderStation(root, self.station)

        departures = ElementTree.SubElement(root, 'departures')
        departures.attrib['number'] = str(len(dtreinen))

        for trein in dtreinen:
            departure = ElementTree.SubElement(departures, 'departure')
            if 'vertraging' in trein:
                departure.attrib['delay'] = trein['vertraging']

            self.renderStation(departure, self.station)

            if 'type' in trein:
                sub = ElementTree.SubElement(departure, 'vehicle')
                sub.text = 'NL.NS.'+trein['type']
            
            if 'vertrektijd' in trein:
                sub = ElementTree.SubElement(departure, 'time')
                sub.attrib['formatted'] = trein['vertrektijd']
                sub.text = str(int(time.mktime(iso8601.parse_date(trein['vertrektijd']).timetuple())))

            if 'spoor' in trein:
                sub = ElementTree.SubElement(departure, 'platform')
                sub.text = trein['spoor']

                if 'spoorwijziging' in trein:
                    sub.attrib['change'] = trein['spoorwijziging']

        return ElementTree.tostring(root)
Exemplo n.º 37
0
 def write(self, element):
     self._flush()
     indent(element, self.indentLevel)
     self.out.write(ElementTree.tostring(element, "utf-8"))
     self.lastElement = None
Exemplo n.º 38
0
def wordpress(xml, instance, author):
    """Imports from an wordpress export xml file directly into everything.
    Uses cElementTree (http://effbot.org/zone/celementtree.htm) for XML access."""

    xml = ET.parse(StringIO(xml["content"]))
    root = xml.getroot().find("channel")

    for i in root.findall("item"):
        slug = i.findtext("{http://wordpress.org/export/1.0/}post_name")
        # Check if this Entry already exists
        try:
            e = Entry.objects.get(slug=slug)
        except:
            # Create a new everything entry!
            tags = []
            try:
                for cat in i.findall("category"):
                    tags.append(cat.text)
                tagstring = string.join(tags, ", ")
            except:
                tagstring = ""

            if i.findtext("{http://wordpress.org/export/1.0/}status") == "publish":
                status = "public"
            else:
                status = "draft"
            date = dateutil.parser.parse(i.findtext("{http://wordpress.org/export/1.0/}post_date"))
            content = i.findtext("{http://purl.org/rss/1.0/modules/content/}encoded")

            e = Entry(
                title=i.findtext("title"),
                content=content,
                tags=tagstring,
                slug=slug,
                status=status,
                app=instance,
                author_id=int(author),
            )
            e.save()
            e.created = date
            e.save()
            # print "Imported Entry " + e.title

        # Import the comments, too!
        for c in i.findall("{http://wordpress.org/export/1.0/}comment"):
            ref = "%s.entry.%s" % (instance, e.id)

            content = c.find("{http://wordpress.org/export/1.0/}comment_content")
            content = ET.tostring(content)
            thecontent = content.replace(
                '<ns0:comment_content xmlns:ns0="http://wordpress.org/export/1.0/">', ""
            ).replace("</ns0:comment_content>", "")
            themail = c.findtext("{http://wordpress.org/export/1.0/}comment_author_email")

            # Check if this Comment does already exists
            try:
                v = Comment.objects.get(ref=ref, content=thecontent, mail=themail)
            except:
                if str(c.findtext("{http://wordpress.org/export/1.0/}comment_approved")) == str(1):
                    status = "ok"
                else:
                    status = "unsure"

                v = Comment(
                    name=c.findtext("{http://wordpress.org/export/1.0/}comment_author"),
                    mail=themail,
                    url=c.findtext("{http://wordpress.org/export/1.0/}comment_author_url"),
                    ip=c.findtext("{http://wordpress.org/export/1.0/}comment_author_IP"),
                    content=thecontent,
                    date=dateutil.parser.parse(c.findtext("{http://wordpress.org/export/1.0/}comment_date")),
                    status=status,
                    ref=ref,
                )
                v.save()
Exemplo n.º 39
0
 def write(self, element):
     self._flush()
     indent(element, self.indentLevel)
     self.out.write(ElementTree.tostring(element, "utf-8"))
     self.lastElement = None