def _fix_gnuplot_svg_size(self, image, size=None):
        """
        Mathematica SVGs do not have height/width attributes in the correct
        place. Set as the actual plot size, which is sometimes hidden among the
        xml

        Parameters
        ----------
        image : str
            SVG data.
        size : tuple of int
            Image width, height.

        """
        (svg,) = minidom.parseString(image).getElementsByTagName('svg')
        try:
            (rect,) = minidom.parseString(image).getElementsByTagName('image')
        except:
            rect = minidom.parseString(image).getElementsByTagName('rect')[1]

        w = rect.getAttribute('width')
        h = rect.getAttribute('height')

        if size is not None:
            width, height = size 
        else:
            width, height = int(w),int(h)

        svg.setAttribute('width', '%dpx' % width)
        svg.setAttribute('height', '%dpx' % height)
        return svg.toxml()
예제 #2
0
파일: tests.py 프로젝트: gongfacun/django
 def _validate_output(serial_str):
     try:
         minidom.parseString(serial_str)
     except Exception:
         return False
     else:
         return True
예제 #3
0
    def test_create(self):
        serializer = common.MetadataTemplate()
        fixture = {
            'metadata': {
                'key9': 'value9',
                'key2': 'value2',
                'key1': 'value1',
            },
        }
        output = serializer.serialize(fixture)
        root = etree.XML(output)
        xmlutil.validate_schema(root, 'metadata')
        metadata_dict = fixture['metadata']
        metadata_elems = root.findall('{0}meta'.format(NS))
        self.assertEqual(len(metadata_elems), 3)
        for i, metadata_elem in enumerate(metadata_elems):
            (meta_key, meta_value) = metadata_dict.items()[i]
            self.assertEqual(str(metadata_elem.get('key')), str(meta_key))
            self.assertEqual(str(metadata_elem.text).strip(), str(meta_value))
        actual = minidom.parseString(output.replace("  ", ""))

        expected = minidom.parseString("""
            <metadata xmlns="http://docs.openstack.org/compute/api/v1.1">
                <meta key="key2">value2</meta>
                <meta key="key9">value9</meta>
                <meta key="key1">value1</meta>
            </metadata>
        """.replace("  ", "").replace("\n", ""))

        self.assertEqual(expected.toxml(), actual.toxml())
예제 #4
0
파일: __init__.py 프로젝트: idkfa/dexml
    def render(self,encoding=None,fragment=False,pretty=False,nsmap=None):
        """Produce XML from this model's instance data.

        A unicode string will be returned if any of the objects contain
        unicode values; specifying the 'encoding' argument forces generation
        of a bytestring.

        By default a complete XML document is produced, including the
        leading "<?xml>" declaration.  To generate an XML fragment set
        the 'fragment' argument to True.
        """
        if nsmap is None:
            nsmap = {}
        data = []
        if not fragment:
            if encoding:
                s = '<?xml version="1.0" encoding="%s" ?>' % (encoding,)
                data.append(s)
            else:
                data.append('<?xml version="1.0" ?>')
        data.extend(self._render(nsmap))
        xml = "".join(data)
        if pretty and encoding:
            xml = minidom.parseString(xml).toprettyxml(encoding=encoding)
        else:
            if pretty:
                xml = minidom.parseString(xml).toprettyxml()
            if encoding:
                xml = xml.encode(encoding)
        return xml
예제 #5
0
	def handle_create_space(self):
		def unescape(s):
			s = s.replace("&lt;", "<")
			s = s.replace("&gt;", ">")
			# this has to be last:
			s = s.replace("&amp;", "&")
			return s

		headers = self.soapheaders('http://schemas.microsoft.com/sharepoint/soap/dws/CreateDws')

		space = self.ask('name', '')
		soapbody = """<?xml version="1.0"?>
<s:Envelope xmlns:s="http://schemas.xmlsoap.org/soap/envelope/">
<s:Body>
<CreateDws xmlns="http://schemas.microsoft.com/sharepoint/soap/dws/">
<name/>
<users/>
<title>%s</title>
<documents/>
</CreateDws>
</s:Body>
</s:Envelope>""" % space
		# no space in the url, we're creating a new one!
		response = self.urlopen("%s/_vti_bin/dws.asmx" % self.path, soapbody, headers)
		if response.code != 200:
			raise Exception("failed to create space, http error %s" % response.code)
		ret = response.read()
		try:
			xml = minidom.parseString(ret)
			inner = unescape(xml.getElementsByTagName('CreateDwsResult')[0].firstChild.toxml())
			xml = minidom.parseString(inner)
			url = xml.getElementsByTagName('Url')[0].firstChild.toxml()
			print 'created space at %s' % url
		except Exception:
			print "response is invalid xml: '%s'" % ret
예제 #6
0
	def testMergeBest(self):
		master_xml = do_merge(header + """\n
  <group>
    <implementation id='sha1=123' version='1'/>
  </group>
  <group>
    <requires interface='http://foo'/>
    <implementation id='sha1=002' version='2'/>
  </group>""" + footer, local_file_req)
		master = parse(master_xml)
		assert master.url == 'http://test/hello.xml', master
		assert len(master.implementations) == 3
		deps = master.implementations['sha1=003'].requires
		assert len(deps) == 1
		assert deps[0].interface == 'http://foo', deps[0]

		assert len(minidom.parseString(master_xml).documentElement.getElementsByTagNameNS(XMLNS_IFACE, 'group')) == 2

		# Again, but with the groups the other way around
		master_xml = do_merge(header + """\n
  <group>
    <requires interface='http://foo'/>
    <implementation id='sha1=002' version='2'/>
  </group>
  <group>
    <implementation id='sha1=123' version='1'/>
  </group>""" + footer, local_file_req)
		master = parse(master_xml)
		assert master.url == 'http://test/hello.xml', master
		assert len(master.implementations) == 3
		deps = master.implementations['sha1=003'].requires
		assert len(deps) == 1
		assert deps[0].interface == 'http://foo', deps[0]

		assert len(minidom.parseString(master_xml).documentElement.getElementsByTagNameNS(XMLNS_IFACE, 'group')) == 2
예제 #7
0
def check_merge(master, new, expected):
	master_doc = minidom.parseString(header + master + footer)
	new_doc = minidom.parseString(header + new + footer)
	merge.merge(master_doc, new_doc)

	expected_doc = minidom.parseString(header + expected + footer)

	def remove_boring(doc):
		for node in list(doc.documentElement.childNodes):
			if node.localName in ('name', 'summary', 'description'):
				doc.documentElement.removeChild(node)
	remove_boring(master_doc)
	remove_boring(expected_doc)

	formatting.format_node(master_doc.documentElement, "\n")
	formatting.format_node(expected_doc.documentElement, "\n")

	master_doc.normalize()
	expected_doc.normalize()

	if xmltools.nodes_equal(master_doc.documentElement, expected_doc.documentElement):
		return

	actual = master_doc.documentElement.toxml()
	expected = expected_doc.documentElement.toxml()

	assert actual != expected

	raise Exception("Failed.\n\nExpected:\n{}\nActual:\n{}".format(expected, actual))
예제 #8
0
    def test_application_wadl(self):
        "the 'application.wadl' method"

        if self.host.endswith('query'):
            appmethod = '%sapplication.wadl' % self.host[:-len('query')]
        else:
            pass

        req = urllib2.Request(appmethod)
        try:
            u = urllib2.urlopen(req)
            buffer = u.read()
        except:
            msg = 'Error calling the "application.wadl" method'
            self.assertTrue(False, msg)

        msg = 'The "application.wadl" method returned an empty string'
        self.assertGreater(len(buffer), 0, msg)
        msg = 'The file returned by "application.wadl" does not contain a "<"'
        self.assertIn('<', buffer, msg)

        # Check that the returned value is a valid xml file
        msg = 'Error "application.wadl" method does not return a valid xml file'
        try:
            parseString(buffer)
        except:
            self.assertTrue(False, msg)
예제 #9
0
    def test_geofeed_rss(self):
        "Tests geographic feeds using GeoRSS over RSSv2."
        # Uses `GEOSGeometry` in `item_geometry`
        doc1 = minidom.parseString(self.client.get('/feeds/rss1/').content)
        # Uses a 2-tuple in `item_geometry`
        doc2 = minidom.parseString(self.client.get('/feeds/rss2/').content)
        feed1, feed2 = doc1.firstChild, doc2.firstChild

        # Making sure the box got added to the second GeoRSS feed.
        self.assertChildNodes(feed2.getElementsByTagName('channel')[0],
                              ['title', 'link', 'description', 'language',
                               'lastBuildDate', 'item', 'georss:box', 'atom:link']
                              )

        # Incrementing through the feeds.
        for feed in [feed1, feed2]:
            # Ensuring the georss namespace was added to the <rss> element.
            self.assertEqual(feed.getAttribute(u'xmlns:georss'),  u'http://www.georss.org/georss')
            chan = feed.getElementsByTagName('channel')[0]
            items = chan.getElementsByTagName('item')
            self.assertEqual(len(items), City.objects.count())

            # Ensuring the georss element was added to each item in the feed.
            for item in items:
                self.assertChildNodes(item, ['title', 'link', 'description', 'guid', 'georss:point'])
예제 #10
0
    def handle_post(self, params, path_info, host, post_data, request_method):
        import xml.dom.minidom as m

        actions = []

        id = self.get_id_from_path_info(path_info)
        if id:
            action = Action()
            action.method = "update"
            action.id = id

            doc = m.parseString(post_data)
            entry = doc.getElementsByTagName("Placemark")[0]
            feature = self.entry_to_feature(entry)
            action.feature = feature
            actions.append(action)

        else:
            doc = m.parseString(post_data)
            entries = doc.getElementsByTagName("Placemark")
            entries.reverse()
            for entry in entries:
                action = Action()
                action.method = "create"
                feature_obj = self.entry_to_feature(entry)
                action.feature = feature_obj
                actions.append(action)

        return actions
예제 #11
0
def extract_project_deps(project_filepath, log):
    try:
        with open(project_filepath, 'r') as file:
            contents = file.read()
    except:
        # log.warning("failed to acquire {0}.".format(project_filepath))
        return False, set()

    deps = set()
    directory = os.path.split(project_filepath)[0]

    for node in xml.parseString(contents).getElementsByTagName('parameter'):
        if node.getAttribute('name') == 'filename':
            filepath = node.getAttribute('value')
            filepath = convert_path_to_local(filepath)
            filepath = os.path.join(directory, filepath)
            deps.add(filepath)

    for node in xml.parseString(contents).getElementsByTagName('parameters'):
        if node.getAttribute('name') == 'filename':
            for child in node.childNodes:
                if child.nodeType == xml.Node.ELEMENT_NODE:
                    filepath = child.getAttribute('value')
                    filepath = convert_path_to_local(filepath)
                    filepath = os.path.join(directory, filepath)
                    deps.add(filepath)

    return True, deps
예제 #12
0
    def run(self, *args, **kwargs):

        if not args:
            self.parser.error('One or more systems must be specified')
        dryrun = kwargs.get('dryrun')
        xml = kwargs.get('xml')
        prettyxml = kwargs.get('prettyxml')
        wait = kwargs.get('wait')
        self.set_hub(**kwargs)
        requests_session = self.requests_session()
        submitted_jobs = []
        failed = False
        for fqdn in args:
            res = requests_session.post('jobs/+inventory',
                                        json={'fqdn':fqdn,
                                              'dryrun':dryrun})
            try:
                res.raise_for_status()
            except HTTPError, e:
                sys.stderr.write('HTTP error: %s, %s\n' % (fqdn, e))
                content_type, _ = cgi.parse_header(e.response.headers.get(
                    'Content-Type', ''))
                if content_type == 'text/plain':
                    sys.stderr.write('\t' +
                                     e.response.content.rstrip('\n') +
                                     '\n')
                failed = True
            else:
                res_data = res.json()
                if xml:
                    print res_data['job_xml']
                if prettyxml:
                    print parseString(res_data['job_xml']).toprettyxml(encoding='utf8')
                if not dryrun:
                    submitted_jobs.append(res_data['job_id'])
예제 #13
0
    def test_geofeed_rss(self):
        "Tests geographic feeds using GeoRSS over RSSv2."
        # Uses `GEOSGeometry` in `item_geometry`
        doc1 = minidom.parseString(self.client.get("/feeds/rss1/").content)
        # Uses a 2-tuple in `item_geometry`
        doc2 = minidom.parseString(self.client.get("/feeds/rss2/").content)
        feed1, feed2 = doc1.firstChild, doc2.firstChild

        # Making sure the box got added to the second GeoRSS feed.
        self.assertChildNodes(
            feed2.getElementsByTagName("channel")[0],
            ["title", "link", "description", "language", "lastBuildDate", "item", "georss:box", "atom:link"],
        )

        # Incrementing through the feeds.
        for feed in [feed1, feed2]:
            # Ensuring the georss namespace was added to the <rss> element.
            self.assertEqual(feed.getAttribute("xmlns:georss"), "http://www.georss.org/georss")
            chan = feed.getElementsByTagName("channel")[0]
            items = chan.getElementsByTagName("item")
            self.assertEqual(len(items), City.objects.count())

            # Ensuring the georss element was added to each item in the feed.
            for item in items:
                self.assertChildNodes(item, ["title", "link", "description", "guid", "georss:point"])
예제 #14
0
def get_story_url_from_epub_html(inputio,_is_good_url=None):
    # print("get_story_url_from_epub_html called")
    epub = ZipFile(inputio, 'r') # works equally well with inputio as a path or a blob

    ## Find the .opf file.
    container = epub.read("META-INF/container.xml")
    containerdom = parseString(container)
    rootfilenodelist = containerdom.getElementsByTagName("rootfile")
    rootfilename = rootfilenodelist[0].getAttribute("full-path")

    contentdom = parseString(epub.read(rootfilename))
    #firstmetadom = contentdom.getElementsByTagName("metadata")[0]

    ## Save the path to the .opf file--hrefs inside it are relative to it.
    relpath = get_path_part(rootfilename)

    # spin through the manifest--only place there are item tags.
    for item in contentdom.getElementsByTagName("item"):
        if( item.getAttribute("media-type") == "application/xhtml+xml" ):
            filehref=relpath+item.getAttribute("href")
            soup = make_soup(epub.read(filehref).decode("utf-8"))
            for link in soup.findAll('a',href=re.compile(r'^http.*')):
                ahref=link['href']
                # print("href:(%s)"%ahref)
                # hack for bad ficsaver ffnet URLs.
                m = re.match(r"^http://www.fanfiction.net/s(?P<id>\d+)//$",ahref)
                if m != None:
                    ahref="http://www.fanfiction.net/s/%s/1/"%m.group('id')
                if _is_good_url == None or _is_good_url(ahref):
                    return ahref
    return None
    def test_generate_report_dictionary_from_dom(self):
        """Test generate_report_dictionary_from_dom function."""
        self.mock_the_dialog(test_entire_mode=False)
        self.impact_merge_dialog.prepare_input()
        self.impact_merge_dialog.validate_all_layers()

        # Create the DOM
        first_postprocessing_report = \
            self.impact_merge_dialog.first_impact['postprocessing_report']
        second_postprocessing_report = \
            self.impact_merge_dialog.second_impact['postprocessing_report']
        first_report = (
            '<body>' +
            first_postprocessing_report +
            '</body>')
        second_report = (
            '<body>' +
            second_postprocessing_report +
            '</body>')

        # Now create a dom document for each
        first_document = minidom.parseString(get_string(first_report))
        second_document = minidom.parseString(get_string(second_report))
        tables = first_document.getElementsByTagName('table')
        tables += second_document.getElementsByTagName('table')

        report_dict = \
            self.impact_merge_dialog.generate_report_dictionary_from_dom(
                tables)
        # There should be 4 keys in that dict
        # (3 for each aggregation unit and 1 for total in aggregation unit)
        expected_number_of_keys = 4
        self.assertEqual(len(report_dict), expected_number_of_keys)
예제 #16
0
파일: Soap.py 프로젝트: AnnaHomolka/PyWPS
    def getWPSContent(self):
    	    """Get the specific WPS XML content of inside the SOAP request. The Element position may change if there is a SOAP header or if is was sent as a message inside the Body content
    	    The script will check for a standard WPS request or a ExecuteProcess_ one"""   
	   
       
            reqWPS=self.root.xpath("//*[local-name() = 'GetCapabilities' or local-name()='DescribeProcess' or local-name()='Execute' or contains(local-name(),'ExecuteProcess_') or contains(local-name(),'ExecuteProcessAsync_')] ")
            if bool(reqWPS):
             #General WPS:
           #print reqWPS[0].tag #getting the element's name
                if "ExecuteProcess" in reqWPS[0].tag:
                   XMLStr=SOAPtoWPS(reqWPS[0])
                   XMLDoc=minidom.parseString(XMLStr)
            	  
                   return getFirstChildNode(XMLDoc)
        
        
    #GetCapabilites/DescribeProcess or Execute
    #getCapabilities=root.xpath("//*[local-name() = 'GetCapabilities' or local-name()='DescribeProcess']")
                else:
                   #Normal WPS
                   reqWPS=doFixTavernaBug(reqWPS[0])
                   XMLDoc = minidom.parseString(etree.tostring(reqWPS))
                   return getFirstChildNode(XMLDoc)

            else: #if bool(reqWPS)
                raise  pywps.NoApplicableCode("Could not deternine the WPS request type from SOAP envelope. Couldnt determine GetCapabilities/DescribeProcess/Execute/ExecuteProcess_ from XML content")
예제 #17
0
    def test_sortable_columns(self):
        """Make columns sortable:
        - All columns
        - Only specific columns"""
        generator = component.getUtility(ITableGenerator, 'ftw.tablegenerator')
        employees = [
            {'name': 'some name', 'date': 'somedate'},
        ]
        columns = ('name', 'date')
        parsed = parseString(
            generator.generate(employees, columns, sortable=True))
        # Sortable=True adds a class sortable to all table headers
        self.assertEqual(
            parsed.getElementsByTagName('th')[0]._attrs['class'].nodeValue,
            'sortable')
        self.assertEqual(
            parsed.getElementsByTagName('th')[1]._attrs['class'].nodeValue,
            'sortable')

        # Add sortable class only on column 'name',
        # all other has a nosort class
        columns = ('name', 'date')
        sortable = ('name', )
        parsed = parseString(
            generator.generate(employees, columns, sortable=sortable))
        self.assertEqual(
            parsed.getElementsByTagName('th')[0]._attrs['class'].nodeValue,
            'sortable')
        self.assertEqual(
            parsed.getElementsByTagName('th')[1]._attrs['class'].nodeValue,
            u'nosort')
예제 #18
0
def processDocumentNode( c ):
    '''this executes the stylesheet node against the current node'''
    try:
        if not styleNodeSelected( c ): return
        proc = Processor()
        stylenode = stylenodes[ c ]
        pos = c.p
        c.selectPosition( stylenode )
        sIO = getString( c )
        mdom1 = minidom.parseString( sIO )
        sIO = str( mdom1.toxml() )
        hstring = str( stylenode.h )
        if hstring == "": hstring = "no headline"
        stylesource = InputSource.DefaultFactory.fromString( sIO, uri = hstring)
        proc.appendStylesheet( stylesource )
        c.selectPosition( pos )
        xmlnode = pos.v
        xIO = getString( c )
        mdom2 = minidom.parseString( xIO )
        xIO = str( mdom2.toxml())
        xhead = str( xmlnode.headString )
        if xhead == "": xhead = "no headline"
        xmlsource = InputSource.DefaultFactory.fromString( xIO, uri = xhead )
        result = proc.run( xmlsource )
        nhline = "xsl:transform of " + str( xmlnode.headString )
        p2 = pos.insertAfter() # tnode )
        p2.setBodyString(result)
        p2.setHeadString(nhline)
        c.redraw()

    except Exception as x:
        g.es( 'exception ' + str( x ))
    c.redraw()
예제 #19
0
 def __make_file_provider(self, dirname, filename, recurse, removeself):
     """Change parsed FileKey to action provider"""
     regex = ''
     if recurse:
         search = 'walk.files'
         path = dirname
         if filename.startswith('*.'):
             filename = filename.replace('*.', '.')
         if '.*' == filename:
             if removeself:
                 search = 'walk.all'
         else:
             regex = ' regex="%s" ' % (re.escape(filename) + '$')
     else:
         search = 'glob'
         path = os.path.join(dirname, filename)
         if -1 == path.find('*'):
             search = 'file'
     action_str = '<option command="delete" search="%s" path="%s" %s/>' % \
         (search, xml_escape(path), regex)
     yield Delete(parseString(action_str).childNodes[0])
     if removeself:
         action_str = '<option command="delete" search="file" path="%s"/>' % xml_escape(
             dirname)
         yield Delete(parseString(action_str).childNodes[0])
    def search(self, item, t, langs):

        fulllang = xbmc.convertLanguage(item['preferredlanguage'], xbmc.ENGLISH_NAME)
        if fulllang == "Persian": fulllang = "Farsi/Persian"
        #xbmc.executebuiltin("Notification(Title," + item['mansearchstr'] + ")")
        QueryString = self.filename;

        if item['mansearch']:
            QueryString = item['mansearchstr'];

        addon = xbmcaddon.Addon();

        if len(QueryString) < 6:
            xbmc.executebuiltin("Notification(" + addon.getLocalizedString(32003) + "," + addon.getLocalizedString(32002) + ")")
            return

        url = "http://www.subtitool.com/api/?query=" + QueryString + "&Lang=" + langs
        subs = urllib.urlopen(url).read()
        DOMTree = minidom.parseString(subs)
        if DOMTree.getElementsByTagName('Subtitle').length == 0:
           try:
            url = "http://www.subtitool.com/api/?query=" + QueryString + "&Lang=" + langs + "&OR=1"
            subs = urllib.urlopen(url).read()
            DOMTree = minidom.parseString(subs)
           except Exception, e:
                log("Subtitool","Not Found OR")

           try:
            url = "http://www.subtitool.com/api/?query=" + QueryString + "&Lang=" + langs
            subs = urllib.urlopen(url).read()
            DOMTree = minidom.parseString(subs)
           except Exception, e:
                log("Subtitool","Not Found")
 def _generate_addons_xml_file( self ):
     # addons.xml heading block
     addons_xml = u"<?xml version=\"1.0\" encoding=\"UTF-8\" standalone=\"yes\"?>\n<addons>\n"
     # list of only folders, skip special .svn folder
     folders = [ f for f in os.listdir( os.curdir )
                if ( os.path.isdir( f ) and f != ".svn" ) ]
     # loop thru and add each addons addon.xml to the final addons.xml file
     for folder in folders:
         try:
             # new addon.xml text holder
             addon_xml = u""
             # create full path to an addon.xml file
             _path = os.path.join( folder, "addon.xml" )
             # split lines for stripping
             with open( _path, "r" ) as addon_file:
                 # loop thru cleaning each line
                 for line in addon_file:
                     # skip heading block as we already have one
                     if ( line.find( "<?xml" ) >= 0 ): continue
                     # add line
                     addon_xml += unicode( line.rstrip() + "\n", "UTF-8" )
             # check for a properly formatted xml file
             parseString( addon_xml.encode( "UTF-8" ) )
         except Exception as e:
             # missing or malformed addon.xml
             print "* Excluding {path} for {error}".format( path=_path, error=e )
         else:
             # we succeeded so add to our final addons.xml text
             addons_xml += addon_xml.rstrip() + "\n\n"
     # clean and add closing tag
     addons_xml = addons_xml.strip() + u"\n</addons>\n"
     # save file and return result
     return self._save_file( data=addons_xml.encode( "UTF-8" ), file="addons.xml" )
예제 #22
0
 def testT04ProcessesLengthDescribeProcess(self):
     """Test, if any processes are listed in the DescribeProcess document
     """
     self._setFromEnv()
     getpywps = pywps.Pywps(pywps.METHOD_GET)
     getpywps.parseRequest(self.getdescribeprocessrequest)
     getpywps.performRequest()
     xmldom = minidom.parseString(getpywps.response)
     self.assertTrue(len(xmldom.getElementsByTagName("ProcessDescription"))>0)
     self.assertEquals(len(xmldom.getElementsByTagName("ProcessDescription")),
             len(getpywps.inputs["identifier"]))
    
     getpywps = pywps.Pywps(pywps.METHOD_GET)
     getpywps.parseRequest(self.getdescribeprocessrequestall)
     getpywps.performRequest()
     xmldom = minidom.parseString(getpywps.response)
     self.assertEquals(len(xmldom.getElementsByTagName("ProcessDescription")),len(getpywps.request.processes))
     
     postpywps = pywps.Pywps(pywps.METHOD_POST)
     describeRequestFile = open(os.path.join(pywpsPath,"tests","requests","wps_describeprocess_request_dummyprocess.xml"))
     postinputs = postpywps.parseRequest(describeRequestFile)
     postpywps.performRequest(postinputs)
     xmldom = minidom.parseString(postpywps.response)
     self.assertTrue(len(xmldom.getElementsByTagName("ProcessDescription"))>0)
     self.assertEquals(len(xmldom.getElementsByTagName("ProcessDescription")),
             len(postpywps.inputs["identifier"]))
     
     postpywps = pywps.Pywps(pywps.METHOD_POST)
     describeRequestFile = open(os.path.join(pywpsPath,"tests","requests","wps_describeprocess_request_all.xml"))
     postinputs = postpywps.parseRequest(describeRequestFile)
     postpywps.performRequest(postinputs)
     xmldom = minidom.parseString(postpywps.response)
     self.assertEquals(len(xmldom.getElementsByTagName("ProcessDescription")),len(postpywps.request.processes))
예제 #23
0
파일: common.py 프로젝트: goschtl/zope
    def _compareDOM( self, found_text, expected_text, debug=False ):

        found_lines = [ x.strip() for x in found_text.splitlines() ]
        found_text = '\n'.join( filter( None, found_lines ) )

        expected_lines = [ x.strip() for x in expected_text.splitlines() ]
        expected_text = '\n'.join( filter( None, expected_lines ) )

        from xml.dom.minidom import parseString
        found = parseString( found_text )
        expected = parseString( expected_text )
        fxml = found.toxml()
        exml = expected.toxml()

        if fxml != exml:

            if debug:
                zipped = zip( fxml, exml )
                diff = [ ( i, zipped[i][0], zipped[i][1] )
                        for i in range( len( zipped ) )
                        if zipped[i][0] != zipped[i][1]
                    ]
                import pdb; pdb.set_trace()

            print 'Found:'
            print fxml
            print
            print 'Expected:'
            print exml
            print

        self.assertEqual( found.toxml(), expected.toxml() )
예제 #24
0
 def scan_browse_result(self, result, level, output_format='plain'):
     if output_format == 'plain':
         s = ""
         xml_root = minidom.parseString(result['Result'].encode('utf-8'))
         container_list = xml_root.getElementsByTagName("container")
         for container in container_list:
             dict = DidlInfo.extract_from_node(container, True)
             npath = dict["idPath"]
             adds = "C " + npath + " * " + dict["title"] + "\n"
             s += adds
             if int(level) > 0:
                  self.browse_recursive_children(npath, int(level) - 1, output_format)
         item_list = xml_root.getElementsByTagName("item")
         for item in item_list:
             dict = DidlInfo.extract_from_node(item, True)
             npath = dict["idPath"]
             s += "+ " + npath + " * " + dict["title"] + "\n"
         return s
     else:
         s = "["
         xml_root = minidom.parseString(result['Result'])
         container_list = xml_root.getElementsByTagName("container")
         for container in container_list:
             dict = DidlInfo.extract_from_node(container, True)
             s += json.dumps(dict)
             s += ","
         item_list = xml_root.getElementsByTagName("item")
         for item in item_list:
             dict = DidlInfo.extract_from_node(item, True)
             s += json.dumps(dict)
             s += ","
         if len(s) > 2:
             s = s[:-1]
         s += "]"
         return s
예제 #25
0
def main(argv):
    if len(sys.argv) < 3:
        usage(sys.argv)
        sys.exit(2)

    host = sys.argv[1]
    uc = UpnpCommand(host)
    operation = sys.argv[2]
    result = None
    if operation == 'play':
        result = uc.play()
    elif operation == 'stop':
        result = uc.stop()
    elif operation == 'getv':
        result = uc.get_volume()
    elif operation == 'getfilter':
        result = uc.get_filter()
    elif operation == 'setv':
        result = uc.set_volume(sys.argv[3])
    elif operation == 'seek':
        result = uc.seek(sys.argv[3])
    elif operation == 'prev':
        result = uc.previous()
    elif operation == 'next':
        result = uc.next()
    elif operation == 'position':
        result = uc.get_position_info()
    elif operation == 'transport':
        result = uc.get_transport_setting()
    elif operation == 'getstatevar':
        result = uc.get_state_var()
    elif operation == 'getsetting':
        result = uc.get_setting(sys.argv[3])
    elif operation == 'media':
        result = uc.get_media_info()
        result += uc.get_position_info()
    elif operation == 'allinfo':
        result = uc.get_volume()
        result += uc.get_position_info()
        result += uc.get_transport_setting()
        result += uc.get_media_info()
    elif operation == 'cap':
        result = uc.get_browse_capabilites()
    elif operation == 'browse':
        result = uc.browse(argv[3])
        xml_root = minidom.parseString(result['Result'])
        print(xml_root.toprettyxml(indent="\t"))
    elif operation == 'browsechildren':
        if argv[3].endswith('/*'):
            result = uc.browse_recursive_children(argv[3][:-2])
            print(result)
        else:
            result = uc.browsechildren(argv[3])
            xml_root = minidom.parseString(result['Result'])
            print(xml_root.toprettyxml(indent="\t"))
        return

    else:
        usage(sys.argv)
    print(result)
예제 #26
0
파일: lib.py 프로젝트: huxuan/WikiMirs
def xml2terms(xml):
    """docstring for xml2terms"""
    try:
        root = parseString(xml).documentElement
    except:
        xml = xml.join(['<mrow>', '</mrow>'])
        root = parseString(xml).documentElement
    stack = [root, ]

    while stack:
        if stack[-1].firstChild and \
            stack[-1].firstChild.nodeType != Node.TEXT_NODE:
            term_raw = stack[-1].toxml()
            term_gen = re.sub('>[^<]+?<', '><', term_raw);
            # print term_raw, term_gen, len(stack)
            term_raw = term_compress(term_raw)
            term_gen = term_compress(term_gen)
            # print term_raw, term_gen, len(stack)
            yield term_raw, term_gen, len(stack)
        if stack[-1].firstChild and \
            stack[-1].firstChild.nodeType != Node.TEXT_NODE:
            stack.append(stack[-1].firstChild)
        elif stack[-1].nextSibling:
            stack[-1] = stack[-1].nextSibling
        else:
            stack.pop()
            while stack and not stack[-1].nextSibling:
                stack.pop()
            if stack:
                stack[-1] = stack[-1].nextSibling
예제 #27
0
def compare_xml(want, got):
    """
    Try to do a 'xml-comparison' of want and got. Plain string comparison
    doesn't always work because, for example, attribute ordering should not be
    important. Ignore comment nodes and leading and trailing whitespace.

    Based on https://github.com/lxml/lxml/blob/master/src/lxml/doctestcompare.py
    """
    _norm_whitespace_re = re.compile(r'[ \t\n][ \t\n]+')

    def norm_whitespace(v):
        return _norm_whitespace_re.sub(' ', v)

    def child_text(element):
        return ''.join(c.data for c in element.childNodes
                       if c.nodeType == Node.TEXT_NODE)

    def children(element):
        return [c for c in element.childNodes
                if c.nodeType == Node.ELEMENT_NODE]

    def norm_child_text(element):
        return norm_whitespace(child_text(element))

    def attrs_dict(element):
        return dict(element.attributes.items())

    def check_element(want_element, got_element):
        if want_element.tagName != got_element.tagName:
            return False
        if norm_child_text(want_element) != norm_child_text(got_element):
            return False
        if attrs_dict(want_element) != attrs_dict(got_element):
            return False
        want_children = children(want_element)
        got_children = children(got_element)
        if len(want_children) != len(got_children):
            return False
        return all(check_element(want, got) for want, got in zip(want_children, got_children))

    def first_node(document):
        for node in document.childNodes:
            if node.nodeType != Node.COMMENT_NODE:
                return node

    want = want.strip().replace('\\n', '\n')
    got = got.strip().replace('\\n', '\n')

    # If the string is not a complete xml document, we may need to add a
    # root element. This allow us to compare fragments, like "<foo/><bar/>"
    if not want.startswith('<?xml'):
        wrapper = '<root>%s</root>'
        want = wrapper % want
        got = wrapper % got

    # Parse the want and got strings, and compare the parsings.
    want_root = first_node(parseString(want))
    got_root = first_node(parseString(got))

    return check_element(want_root, got_root)
예제 #28
0
 def pprint(self):
     if self.xml is not None:
         mxml = minidom.parseString(ElementTree.tostring(self.xml))
         mxml = minidom.parseString(
             ElementTree.tostring(self.xml.find('./cputune')))
         print mxml.toprettyxml()
     self.res.pprint()
예제 #29
0
    def test_product_delete_byname(self):
        """Delete a product by name"""
        response = self.c.post(reverse('api.views.product_delete'),
                               {'product': self.products[0].name})
        xmldoc = minidom.parseString(response.content)

        msg = xmldoc.getElementsByTagName('success')
        self.assertEqual(len(msg), 1, 'Delete successful')

        all_products = Product.objects.all()
        self.assertEquals(len(all_products), len(self.products) - 1,
                          'product was deleted')

        response = self.c.post(reverse('api.views.product_delete'),
                               {'product': self.products[0].name})
        xmldoc = minidom.parseString(response.content)

        msg = xmldoc.getElementsByTagName('error')
        errno = msg[0].getAttribute('number')
        self.assertEqual(len(msg), 1, 'Delete must only be successful once')
        self.assertEqual(int(errno), 102,
                         'must return product not found error')

        all_products = Product.objects.all()
        self.assertEquals(len(all_products), len(self.products) - 1,
                          'product was deleted only once')
예제 #30
0
    def testT06ParseExecuteLiteralInput(self):
        """Test if Execute with LiteralInput and Output is executed"""
        
        #Note, bool input should be checked for False, if there is something like this in the code: bool("False")
        #Then the output will be True and the test will fail
        
        getpywps = pywps.Pywps(pywps.METHOD_GET)
        postpywps = pywps.Pywps(pywps.METHOD_POST)
        getinputs = getpywps.parseRequest("service=wps&version=1.0.0&request=execute&identifier=literalprocess&datainputs=[int=1;string=spam%40foo.com;float=1.1;zeroset=0.0;bool=False]")
        executeRequestFile = open(os.path.join(pywpsPath,"tests","requests","wps_execute_request-literalinput.xml"))
        postinputs = postpywps.parseRequest(executeRequestFile)

        getpywps.performRequest(getinputs)
        postpywps.performRequest(postinputs)
        getxmldom = minidom.parseString(getpywps.response)
        postxmldom = minidom.parseString(postpywps.response)

        getliteraldata = getxmldom.getElementsByTagNameNS(self.wpsns,"LiteralData")
        postliteraldata = postxmldom.getElementsByTagNameNS(self.wpsns,"LiteralData")
        self.assertEquals(len(getliteraldata),4)
        self.assertEquals(len(postliteraldata),4)

        self.assertEquals(getliteraldata[0].firstChild.nodeValue,
                postliteraldata[0].firstChild.nodeValue)
        self.assertEquals(getliteraldata[1].firstChild.nodeValue,
                postliteraldata[1].firstChild.nodeValue)
        self.assertEquals(getliteraldata[2].firstChild.nodeValue,
                postliteraldata[2].firstChild.nodeValue)
        self.assertEquals(getliteraldata[3].firstChild.nodeValue,
                postliteraldata[3].firstChild.nodeValue)
        #1,1.1,False,spam
        self.assertEquals(getliteraldata[0].firstChild.nodeValue, "1")
        self.assertEquals(getliteraldata[1].firstChild.nodeValue, "1.1")
        self.assertEquals(getliteraldata[2].firstChild.nodeValue, "False")
        self.assertEquals(getliteraldata[3].firstChild.nodeValue, "*****@*****.**")
예제 #31
0
def test_PythonSuperquadricSource():
    src = PythonSuperquadricSource()
    src.Update()

    npts = src.GetOutputDataObject(0).GetNumberOfPoints()
    assert npts > 0

    src.SetThetaResolution(50)
    src.SetPhiResolution(50)
    src.Update()
    assert src.GetOutputDataObject(0).GetNumberOfPoints() > npts


def test_PythonCSVReader(fname):
    reader = PythonCSVReader()
    reader.SetFileName(fname)
    reader.Update()
    assert reader.GetOutputDataObject(0).GetNumberOfRows() > 0


if __name__ == "__main__":
    #test_PythonSuperquadricSource()
    #test_PythonCSVReader("/tmp/data.csv")

    from paraview.detail.pythonalgorithm import get_plugin_xmls
    from xml.dom.minidom import parseString
    for xml in get_plugin_xmls(globals()):
        dom = parseString(xml)
        print(dom.toprettyxml(" ", "\n"))
예제 #32
0
 def parse_xmlfile(self, path):
     element = parseString(file(path).read())
     return MachineDatabaseParser(element.firstChild)
예제 #33
0
            
if __name__ == '__main__':
    from os.path import join
    from paella.profile.base import PaellaConfig, PaellaConnection
    cfg = PaellaConfig()
    conn = PaellaConnection(cfg)
    from xmlgen import MachineDatabaseElement
    from paella.installer.base import CurrentEnvironment
    ev = CurrentEnvironment(conn, 'bard')
    
    xfile = file(join(cfg['db_bkup_path'], 'concord', 'machine_database.xml'))
    mdata = xfile.read()
    xfile.close()
    
    element = parseString(mdata)

    me = MachineDatabaseElement(conn)
    md = MachineDatabaseParser(element.firstChild)
    mh = MachineHandler(conn)

    def quick_wipe(conn):
        cursor = StatementCursor(conn)
        cursor.delete(table='machines')
        cursor.delete(table='partition_workspace')
        cursor.delete(table='partitions')
        cursor.delete(table='filesystem_mounts')
        cursor.delete(table='filesystem_disks')
        cursor.delete(table='partition_mounts')
        cursor.delete(table='machine_disks')
        cursor.delete(table='machine_types')
예제 #34
0
def nlp_totrtale2(input_dict, widget):
    '''
    Calls the totrtale web service.

    Function splits huge documents in smaller pieces and sends them separatly to totrtale webservice. If there is multiple smaller documents, this functions groups them and sends them together.
    '''
    import multiprocessing
    from xml.dom.minidom import parseString
    import time
    import math
    import copy

    progress_accumulator = 0  #progress for progress bar
    widget.progress = progress_accumulator
    widget.save()

    processes = 4  #number of processes for multiprocessing
    DOCUMENTS_SIZE = 3 * int(
        1e6)  #size of a group of documents in MB per process
    SINGLE_DOC_SIZE = 1 * int(1e6)  #size of a single document per process

    corpus = parseString(input_dict['corpus'])
    language = input_dict['lang'],
    postprocess = input_dict['postprocess'] == "true"
    xml = input_dict['xml'] == "true"

    params = {"language": language, "postprocess": postprocess, "xml": xml}

    tei_corpus = corpus.getElementsByTagName('teiCorpus')
    if tei_corpus:
        tei_head = '<?xml version="1.0" encoding="utf-8"?>\n' + \
                   '<teiCorpus xmlns="http://www.tei-c.org/ns/1.0">\n'
        tei_header = corpus.getElementsByTagName('teiHeader')[0].toxml() + "\n"
        tei_tail = '</teiCorpus>'

    pool = multiprocessing.Pool(processes=processes)
    documents = corpus.getElementsByTagName('TEI')
    documents_size, document_num, process_num = 0, 0, 1

    results, docs, single_docs = [], [], []
    for i, document in enumerate(documents):
        doc_len = len(
            document.getElementsByTagName('body')[0].getElementsByTagName('p')
            [0].childNodes[0].nodeValue)
        doc_title = document.getElementsByTagName(
            'title')[0].firstChild.nodeValue
        print doc_title
        if doc_len > SINGLE_DOC_SIZE:
            #split single huge document

            predhead = '<TEI xmlns="http://www.tei-c.org/ns/1.0">\n'
            title = '<title>' + doc_title + '</title>\n'
            head = '<text>\n<body>\n<p>\n'
            header = document.getElementsByTagName(
                'teiHeader')[0].toxml() + "\n"
            tail = '\n</p>\n</body>\n</text>\n</TEI>'

            document_text = document.getElementsByTagName(
                'body')[0].getElementsByTagName(
                    'p')[0].childNodes[0].nodeValue.strip().replace(
                        "&", "&amp;").replace("<", "&lt;").replace(
                            ">", "&gt;").replace("\"", "&quot;")

            prev_j, curr_j = 0, SINGLE_DOC_SIZE
            while (curr_j + 2) < len(document_text):
                while (curr_j + 2
                       ) < len(document_text) and document_text[curr_j:curr_j +
                                                                2] != ". ":
                    curr_j += 1
                sub_params = copy.deepcopy(params)
                if prev_j == 0:
                    sub_params[
                        "text"] = predhead + title + head + document_text[
                            prev_j:curr_j + 2] + tail
                else:
                    sub_params["text"] = predhead + head + document_text[
                        prev_j:curr_j + 2] + tail
                sub_params["doc_id"] = str(len(results))
                results.append(
                    pool.apply_async(totrtale_request, args=[sub_params]))
                if prev_j == 0:
                    single_docs.append(0)
                else:
                    single_docs.append(1)
                prev_j = curr_j + 2
                curr_j += SINGLE_DOC_SIZE
                document_num += 1
                process_num += 1

                if curr_j > doc_len:
                    sub_params = copy.deepcopy(params)
                    sub_params["text"] = predhead + head + document_text[
                        prev_j:] + tail
                    sub_params["doc_id"] = str(len(results))
                    results.append(
                        pool.apply_async(totrtale_request, args=[sub_params]))
                    document_num += 1
                    process_num += 1
                    single_docs.append(2)
            print "document was split", doc_title, len(single_docs)
        else:
            #group multiple smaller documents.
            docs.append(document.toxml())
            document_num += 1
            documents_size += doc_len

            if documents_size > DOCUMENTS_SIZE or (
                    document_num) % 10 == 0 or i == len(documents) - 1:
                documents_size = 0
                document_num = 0
                sub_params = copy.deepcopy(params)
                sub_params["text"] = "\n".join(docs)
                sub_params["doc_id"] = str(len(results))
                print "whole document was added", len(docs)
                results.append(
                    pool.apply_async(totrtale_request, args=[sub_params]))
                process_num += 1
                docs = []
                single_docs.append(-1)
    pool.close()

    #we need to join results of totrtale processing back together. Funtion also updates progress bar.
    response = ["" for i in results]
    progress = [True]
    while any(progress):
        time.sleep(1)
        progress = [not result.ready() for result in results]
        print progress
        for i, prog in enumerate(progress):
            if not prog and response[i] == "":
                try:
                    resp = json.loads(results[i].get(
                    ).content)[u'runToTrTaLeResponse'][u'runToTrTaLeResult']
                except Exception as e:
                    raise Exception(
                        "There was a problem processing your file.")

                if resp["error"] != "":
                    progress = [False]
                    raise Exception(resp["error"])
                if xml:
                    #results are in xml
                    if single_docs[i] == 0:
                        print "remove back", i
                        pos1 = resp["resp"].find("<s>")
                        pos2 = resp["resp"].find("</p>")
                        response[i] = predhead + header + head + resp["resp"][
                            pos1:pos2]
                    elif single_docs[i] == 2:
                        print "remove front", i
                        pos1 = resp["resp"].find("<s>")
                        response[i] = resp["resp"][pos1:]
                    elif single_docs[i] == 1:
                        print "remove both", i
                        pos1 = resp["resp"].find("<s>")
                        pos2 = resp["resp"].find("</p>")
                        response[i] = resp["resp"][pos1:pos2]
                    else:
                        print "nothing to remove"
                        response[i] = resp["resp"]
                else:
                    #results are tab separated
                    if single_docs[i] in [0, 1]:
                        pos2 = resp["resp"].find("</TEXT>")
                        response[i] = resp["resp"][:pos2]
                    else:
                        print "nothing to remove"
                        response[i] = resp["resp"]

                progress_accumulator += 1 / float(len(results)) * 100
                print progress_accumulator
                widget.progress = math.floor(progress_accumulator)

                widget.save()
    pool.join()

    #return output only if all processes are completed.
    if not any(progress):
        widget.progress = 100
        widget.save()
        response = "".join(response)

        if tei_corpus and xml:
            response = tei_head + tei_header + response + tei_tail
        return {'annotations': response}
예제 #35
0
def pretify(string, pretty_print=True):
    parsed = minidom.parseString(string)
    # See:http://www.hoboes.com/Mimsy/hacks/geektool-taskpaper-and-xml/
    fix = re.compile(r'((?<=>)(\n[\t]*)(?=[^<\t]))|(?<=[^>\t])(\n[\t]*)(?=<)')
    return re.sub(fix, '', parsed.toprettyxml())
예제 #36
0
#!/usr/bin/python

import urllib
import xml.dom.minidom as minidom

u = urllib.urlopen("http://www.livejournal.com/users/crschmidt/data/rss")
xmldata = u.read()
u.close()

xmldoc = minidom.parseString(xmldata)

metadata = {}

for i in xmldoc.getElementsByTagName("item"):
    link = ""
    musictext = ""
    moodtext = ""
    date = ""
    link = i.getElementsByTagName("link")[0].firstChild.nodeValue
    date = i.getElementsByTagName("pubDate")[0].firstChild.nodeValue
    music = i.getElementsByTagName("lj:music")
    mood = i.getElementsByTagName("lj:mood")
    if len(music):
        musictext = music[0].firstChild.nodeValue
    if len(mood):
        moodtext = mood[0].firstChild.nodeValue
    metadata[link] = {'music': musictext, 'mood': moodtext, 'date': date}

print metadata
예제 #37
0
def prettify(elem):
    rough_string = ElementTree.tostring(elem, 'utf-8')
    reparsed = minidom.parseString(rough_string)
    return reparsed.toprettyxml(indent="  ")
예제 #38
0
def compare_xml(want, got):
    """Tries to do a 'xml-comparison' of want and got.  Plain string
    comparison doesn't always work because, for example, attribute
    ordering should not be important. Comment nodes are not considered in the
    comparison.

    Based on http://codespeak.net/svn/lxml/trunk/src/lxml/doctestcompare.py
    """
    _norm_whitespace_re = re.compile(r'[ \t\n][ \t\n]+')
    def norm_whitespace(v):
        return _norm_whitespace_re.sub(' ', v)

    def child_text(element):
        return ''.join([c.data for c in element.childNodes
                        if c.nodeType == Node.TEXT_NODE])

    def children(element):
        return [c for c in element.childNodes
                if c.nodeType == Node.ELEMENT_NODE]

    def norm_child_text(element):
        return norm_whitespace(child_text(element))

    def attrs_dict(element):
        return dict(element.attributes.items())

    def check_element(want_element, got_element):
        if want_element.tagName != got_element.tagName:
            return False
        if norm_child_text(want_element) != norm_child_text(got_element):
            return False
        if attrs_dict(want_element) != attrs_dict(got_element):
            return False
        want_children = children(want_element)
        got_children = children(got_element)
        if len(want_children) != len(got_children):
            return False
        for want, got in zip(want_children, got_children):
            if not check_element(want, got):
                return False
        return True

    def first_node(document):
        for node in document.childNodes:
            if node.nodeType != Node.COMMENT_NODE:
                return node

    want, got = strip_quotes(want, got)
    want = want.replace('\\n','\n')
    got = got.replace('\\n','\n')

    # If the string is not a complete xml document, we may need to add a
    # root element. This allow us to compare fragments, like "<foo/><bar/>"
    if not want.startswith('<?xml'):
        wrapper = '<root>%s</root>'
        want = wrapper % want
        got = wrapper % got

    # Parse the want and got strings, and compare the parsings.
    want_root = first_node(parseString(want))
    got_root = first_node(parseString(got))

    return check_element(want_root, got_root)
예제 #39
0
    def saveToXML(self, filename):
        self.psychopyVersion = psychopy.__version__  # make sure is current
        # create the dom object
        self.xmlRoot = xml.Element("PsychoPy2experiment")
        self.xmlRoot.set('version', __version__)
        self.xmlRoot.set('encoding', 'utf-8')
        # store settings
        settingsNode = xml.SubElement(self.xmlRoot, 'Settings')
        for settingName in sorted(self.settings.params):
            setting = self.settings.params[settingName]
            self._setXMLparam(
                parent=settingsNode, param=setting, name=settingName)
        # store routines
        routinesNode = xml.SubElement(self.xmlRoot, 'Routines')
        # routines is a dict of routines
        for routineName, routine in self.routines.items():
            routineNode = self._setXMLparam(
                parent=routinesNode, param=routine, name=routineName)
            # a routine is based on a list of components
            for component in routine:
                componentNode = self._setXMLparam(
                    parent=routineNode, param=component,
                    name=component.params['name'].val)
                for paramName in sorted(component.params):
                    param = component.params[paramName]
                    self._setXMLparam(
                        parent=componentNode, param=param, name=paramName)
        # implement flow
        flowNode = xml.SubElement(self.xmlRoot, 'Flow')
        # a list of elements(routines and loopInit/Terms)
        for element in self.flow:
            elementNode = xml.SubElement(flowNode, element.getType())
            if element.getType() == 'LoopInitiator':
                loop = element.loop
                name = loop.params['name'].val
                elementNode.set('loopType', loop.getType())
                elementNode.set('name', name)
                for paramName in sorted(loop.params):
                    param = loop.params[paramName]
                    paramNode = self._setXMLparam(
                        parent=elementNode, param=param, name=paramName)
                    # override val with repr(val)
                    if paramName == 'conditions':
                        paramNode.set('val', repr(param.val))
            elif element.getType() == 'LoopTerminator':
                elementNode.set('name', element.loop.params['name'].val)
            elif element.getType() == 'Routine':
                elementNode.set('name', '%s' % element.params['name'])
        # convert to a pretty string
        # update our document to use the new root
        self._doc._setroot(self.xmlRoot)
        simpleString = xml.tostring(self.xmlRoot, 'utf-8')
        pretty = minidom.parseString(simpleString).toprettyxml(indent="  ")
        # then write to file
        if not filename.endswith(".psyexp"):
            filename += ".psyexp"

        with codecs.open(filename, 'wb', encoding='utf-8-sig') as f:
            f.write(pretty)

        self.filename = filename
        return filename  # this may have been updated to include an extension
예제 #40
0

# returns latitude and longitude from the api_url
def get_latlon(loc_ip):
    #returns an xml file 
    api_url = 'http://ip-api.com/xml/'
    #use loc_ip below for GAE local because localhost 127.0.0.1 will not show a latitude and longitude
    #comment out loc_ip when pushing the code to GAE Cloud (external Google App Engine
    loc_ip = '76.181.140.45' #Use this line for GAW
    try:
        xml = urllib2.urlopen(api_url + loc_ip).read()
    except urllib2.URLError, e:
        return "Unable to get geolocater service " + e.code
    
    if xml:
        pxml = minidom.parseString(xml)
        if pxml:
            p1 = pxml.getElementsByTagName('lat')
            lat = p1[0].childNodes[0].nodeValue
            p2 = pxml.getElementsByTagName('lon')
            lon =  p2[0].childNodes[0].nodeValue
            return lat,lon
        else:
            lat,lon = '0','0'
        return lat,lon
    return 0,0

# returns a google map
def gmaps_img(latd, longt):
    #streetview api "http://maps.googleapis.com/maps/api/streetview?size=200x200&location=40.720032,%20-73.988354&heading=235&sensor=false">
    maps_api ='http://maps.googleapis.com/maps/api/staticmap?size=420x300&sensor=false&' # minimal google map api
예제 #41
0
def write_epilog():
    parsed = minidom.parseString(ET.tostring(root, 'utf-8'))
    print(parsed.toprettyxml(indent="  "),file=output_fd)
예제 #42
0
times = open(pre+'/config/tracks.ini','r')  # path
for line in times:
	if len(line) > 0 and line[0] >= '0' and line[0] <= '9':
		tr = r.split(line)
		trk = tr[1]
		#print(tr)
		tim = tr[19]
		map[trk] = tim
		#print(trk + " " + tim)
times.close()
	
for t in trks:
	if t != '.git':
		# get road stats
		file = open(tdir+'/'+t+'/road.xml','r')
		data = file.read()
		file.close()
		
		dom = parseString(data)
		xTag = dom.getElementsByTagName('stats')[0].toxml()
		#print(xTag)
		xNew = xTag.replace('<stats','<s n="'+t+'" t="'+map.get(t,'0')+'"').replace('yaw="0"','').replace('pitch="0" ','').replace('roll="0" ','')
		xNew = xNew.replace('height=','h=').replace('length=','l=').replace('width=','w=').replace('bnkAvg=','ba=').replace('bnkMax=','bm=')
		xNew = xNew.replace('onPipe=','op=').replace('onTer=','ot=').replace('pipes=','p=')
		#print(xNew)
		print(t)
		stats.write(xNew+'\n');

stats.write('</roadstats>');
stats.close();
예제 #43
0
def main():

    import json, localization, base64
    from xml.dom.minidom import parseString
    from class_macro import Macros
    from class_plugins import Plugins
    from class_timer import Timer
    from class_custom_event import CustomEvent
    from class_xml_macro import XMLMacros
    from class_xml_timer import XMLTimer
    from class_xml_custom_event import XMLCustomEvent
    from class_xml_plugin import XMLPlugin
    from class_xml_plugin_db import XMLPluginDB
    from class_xml_resource import XMLResource
    from utils.uuid import uuid4
    from widget_localization import LocalizationWidget
    from widget_plugins import WidgetPlugins
    from VEE_resources import create_plugin_dir, ResourceFolderManager
    from VEE_sqlite3 import DatabaseManager

    lang = localization.get_lang()

    if "formbutton_save_plugin" in request.arguments:
        plugin = Plugins()
        plugin.guid = str(uuid4())
        plugin.name = request.arguments.get("formtext_name", "")
        plugin.author = request.arguments.get("formtext_author", "")
        plugin.description = request.arguments.get("formtextarea_description",
                                                   "")
        plugin.version = request.arguments.get("formtext_version", "")
        plugin.zindex = "1"

        if "uploader" in request.arguments:
            file = request.arguments.get("uploader", "", castto=Attachment)
            if file:
                plugin.picture = str(uuid4())
                application.storage.write(plugin.picture, file.handler.read())

        plugin.save()
        create_plugin_dir(plugin.guid)

    if "formbutton_upload_plugin" in request.arguments:
        if request.arguments.get("uploader", "", castto=Attachment):
            try:
                file = request.arguments.get("uploader", "", castto=Attachment)
                xml_data = file.handler.read()

                dom = parseString(xml_data)
                node = XMLPlugin(dom)

                if not Plugins.get_by_guid(node.guid):
                    plugin = Plugins()
                    plugin.name = node.name
                    plugin.description = node.description
                    plugin.guid = node.guid
                    plugin.version = node.version
                    plugin.author = node.author
                    plugin.protected = node.protected

                    plugin_picture_name = ""
                    plugin.picture = ""
                    if node.picture:
                        plugin.picture = plugin_picture_name = str(uuid4())
                        application.storage.write(
                            plugin_picture_name,
                            base64.b64decode(node.picture))
                    plugin.save()
                    create_plugin_dir(plugin.guid)

                    for child in node.childs:

                        if child.tag == "timer":
                            child = XMLTimer(child)

                            if child.name:
                                timer = Timer()
                                timer.fill_from_xml(child, node.guid)
                        elif child.tag == "custom_event":
                            child = XMLCustomEvent(child)

                            if child.name:
                                custom_event = CustomEvent()
                                custom_event.fill_from_xml(child, node.guid)
                        elif child.tag == "macro":
                            child = XMLMacros(child)
                            if child.name and child.source:
                                macros = Macros()
                                macros.fill_from_xml(child, node.guid)
                        elif child.tag == "database":
                            child = XMLPluginDB(child)
                            if child.name:
                                DatabaseManager(plugin.guid).import_db(
                                    child.name,
                                    base64.b64decode(child.db_source))
                        elif child.tag == "resource":
                            child = XMLResource(child)

                            if child.name:
                                ResourceFolderManager(plugin.guid).import_res(
                                    child.name,
                                    base64.b64decode(child.res_source))
                        else:
                            raise Exception(
                                lang.get("xml_not_correctr_error",
                                         "xml_not_correctr_error"))
                else:
                    raise Exception(lang.get("plugin_exist", "plugin_exist"))

            except Exception, ex:
                self.growl.title = lang["error"]
                self.growl.text = ex
                self.growl.show = "1"
예제 #44
0
                        help="the hypervisor for which to restrict the search")
    parser.add_argument("--id", dest="id", help="the template ID")
    parser.add_argument("--keyword", dest="keyword", help="List by keyword")
    parser.add_argument("--name", dest="name", help="the template name")
    parser.add_argument("--zoneid",
                        dest="zoneid",
                        help="list templates by zoneId")

    args = parser.parse_args()

    #Transform args to key=value list
    options = [
        "%s=%s" % (key, value) for key, value in vars(args).items()
        if not value is None
    ]

    command = "listTemplates"

    formatted_url = hu.format_url(command=command,
                                  option_list=options,
                                  url=url,
                                  apikey=apikey,
                                  secretkey=secretkey)

    try:
        response = urllib2.urlopen(formatted_url).read()
        xmldoc = minidom.parseString(response)
        print xmldoc.toprettyxml()
    except Exception, e:
        print 'Error !', e
예제 #45
0
def replace_imports(paths):
    dic = {
        'from openerp.osv import fields, osv': 'from osv import fields, osv',
        'from openerp.tools.translate import _':
        'from tools.translate import _',
        'from openerp import pooler, tools': 'import pooler, tools',
        'osv.Model': 'osv.osv'
    }

    for path in paths.get('py', []):
        #~ f1 = open(path, 'r')
        #~ text = f1.read()
        #~ f1.close()
        #~ fname = sys.argv[1]
        dict_to_write = {}
        with open(path) as fin:
            parsed = ast.parse(fin.read())
        data = []
        with open(path, 'r') as file:
            data = file.readlines()
            data_ori = list(data)
        for node in ast.walk(parsed):
            # Checks for imports with openerp
            if isinstance(node, ast.ImportFrom):
                print node.__dict__, 'FFFFFFFFFFFFF'
                if node.__dict__.get(
                        'module') and 'openerp' in node.__dict__.get('module'):
                    #Check from openerp import algo replace for import algo
                    if 'openerp' == node.__dict__.get('module'):
                        from_import_replace = "import {}\n".format(", ".join([
                            name.name for name in node.__dict__.get('names')
                        ]))
                        data[node.__dict__.get("lineno") -
                             1] = from_import_replace
                        print from_import_replace, 'impt_line_replaceimpt_line_replaceimpt_line_replaceimpt_line_replaceimpt_line_replace'
                    #Check import openerp.algo1, openerp.algo2 and remove openerp
                    else:
                        import_replace = "from {} import {}\n".format(
                            node.__dict__.get('module').replace(
                                "openerp.", ''), ", ".join([
                                    name.name
                                    for name in node.__dict__.get('names')
                                ]))
                        data[node.__dict__.get("lineno") - 1] = import_replace
                        print import_replace, 'OOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOO'
            if hasattr(node, 'body') and isinstance(node.body, list):
                assigns = [x for x in node.body if type(x) == ast.Assign]
                clases = [x for x in node.body if type(x) == ast.ClassDef]
                print path, 'CLASSSSSSSSSSSSSSSSSSSSSSSSSS', clases
                #to check attribute Model in class
                for clase in clases:
                    for att in clase.__dict__.get("bases"):
                        print att.__dict__, '$$$$$$$$$$$$$$$'
                        if att.__dict__.get("attr") and att.__dict__.get(
                                "attr") == "Model":
                            class_attr = "class {}(osv.{}):\n".format(
                                clase.__dict__.get("name"),
                                att.__dict__.get("attr").replace(
                                    "Model", "osv"))
                            print class_attr, clase.__dict__.get(
                                "lineno") - 1, '#####################'
                            data[clase.__dict__.get("lineno") - 1] = class_attr
                    if len(clases) == 1:
                        term_class = "{}()\n".format(
                            clase.__dict__.get("name"))
                        data.append(term_class)
                    else:
                        if clases.index(clase) < len(clases) - 1:
                            before_line = clases[clases.index(clase) +
                                                 1].__dict__.get('lineno') - 2
                            term_class = "{}()\n".format(
                                clase.__dict__.get("name"))
                            data.insert(before_line, term_class)
                            print data[
                                before_line - 1], data[before_line], data[
                                    before_line +
                                    1], "&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&"
                        elif clases.index(clase) == len(clases) - 1:
                            term_class = "{}()\n".format(
                                clase.__dict__.get("name"))
                            data.append(term_class)
                            print data[
                                len(data) -
                                1], "&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&"
                for assign in assigns:
                    vars_node = [
                        name_var.id for name_var in assign.targets
                        if type(name_var) == ast.Name
                    ]
                    print vars_node
                    #check to _name var with "_"
                    if '_name' in vars_node:
                        print assign.__dict__, 'FFFFFFFFFFFFFFFFFFFFf', assign.targets[
                            0].id
                        dict_test = assign.__dict__.get('value').__dict__
                        name_of_class = "{}{} = '{}'\n".format(
                            " " * assign.__dict__.get("col_offset"),
                            assign.targets[0].id,
                            dict_test.get("s").replace('_', '.'))
                        data[assign.__dict__.get("lineno") - 1] = name_of_class
                        print data[
                            assign.__dict__.get("lineno") -
                            1], "DATAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
                    if '_columns' in vars_node:
                        dict_test = assign.__dict__.get('value').__dict__
                        if dict_test.get('keys', False) and dict_test.get(
                                'values', []):
                            for field in dict_test.get('values', []):
                                if field.__dict__.get("func", False):
                                    if field.__dict__.get("func").__dict__.get(
                                            "attr", False) == "char":
                                        args_field = [
                                            hola.arg
                                            for hola in field.__dict__.get(
                                                "keywords", [])
                                        ]
                                        if not 'size' in args_field:
                                            list_args = []
                                            list_args = [
                                                "'{}'".format(
                                                    arg.__dict__.get("s"))
                                                for arg in field.__dict__.get(
                                                    "args")
                                            ]
                                            for keyword in field.__dict__.get(
                                                    "keywords"):
                                                arg_str = ""
                                                dict_types = {
                                                    ast.Str: 's',
                                                    ast.Num: "n",
                                                    ast.Name: "id"
                                                }
                                                if type(
                                                        keyword.__dict__.get(
                                                            "value")
                                                ) == ast.Str:
                                                    #~ print "{} = '{}'".format(keyword.__dict__.get("arg"), keyword.__dict__.get("value").__dict__.get(dict_types.get(type(keyword.__dict__.get("value")))))
                                                    arg_str = "{} = '{}'".format(
                                                        keyword.__dict__.get(
                                                            "arg"),
                                                        keyword.__dict__.get(
                                                            "value").__dict__.
                                                        get(
                                                            dict_types.get(
                                                                type(
                                                                    keyword.
                                                                    __dict__.
                                                                    get("value"
                                                                        )))))
                                                else:
                                                    arg_str = "{} = {}".format(
                                                        keyword.__dict__.get(
                                                            "arg"),
                                                        keyword.__dict__.get(
                                                            "value").__dict__.
                                                        get(
                                                            dict_types.get(
                                                                type(
                                                                    keyword.
                                                                    __dict__.
                                                                    get("value"
                                                                        )))))
                                                list_args.append(arg_str)
                                            list_args.append("size=64")
                                            pre_format_field = "'{}': fields.char({}),".format(
                                                dict_test.get('keys')[
                                                    dict_test.get(
                                                        'values',
                                                        []).index(field)].
                                                __dict__.get("s"),
                                                ", ".join(list_args))
                                            print dict_test.get(
                                                'keys'
                                            )[dict_test.get(
                                                'values', []
                                            ).index(
                                                field
                                            )].__dict__, 'YYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYY'
                                            data[
                                                field.__dict__.get("lineno") -
                                                1] = "{}{}\n".format(
                                                    " " * dict_test.get(
                                                        'keys')[dict_test.get(
                                                            'values',
                                                            []).index(field)].
                                                    __dict__.get("col_offset"),
                                                    pre_format_field)
                                            #~ print data[field.__dict__.get("lineno")-1], 'HHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHH'
                                            #~ print path, field.__dict__, 'DICTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTT'
        if data != data_ori:
            with open(path, 'w') as file:
                file.writelines(data)
    for path in paths.get('xml', []):
        f = open(path, 'r')
        parser = etree.XMLParser(remove_blank_text=True)
        tree = ET.parse(f, parser)
        f.close()
        print tree.getroot()
        read = tree.findall("./data/record")
        print read, 'READDDDDDDDDDDDDDDDDD'
        for record in read:
            types = record.findall('field')
            list_fields = [data.get('name') for data in types]
            print record.get('id', False), 'EEEEEEEEEEEEEEEEE'
            if not 'type' in list_fields and 'model' in list_fields:
                if 'arch' in list_fields:
                    pos = list_fields.index('arch')
                    x = etree.Element('field')
                    if types[pos].findall('./form'):
                        form_node = types[pos].findall('./form')
                        x.text = "form"
                        form_node[0].attrib.pop('version')
                        if form_node[0].findall('./sheet'):
                            sheet_nodes = list(
                                form_node[0].findall('./sheet')[0])
                            form_node[0].remove(
                                form_node[0].findall('./sheet')[0])
                            import pdb
                            pdb.set_trace()
                            for node in sheet_nodes:
                                print node, 'NODEEEEEEEEEEEEEEEEE'
                                form_node[0].insert(0, node)
                                #~ form_node.append(node)
                            print "DESPUESSSSSSSSSSSSs", form_node[0].items()
                    elif types[pos].findall('./tree'):
                        x.text = "tree"
                    elif types[pos].findall('./search'):
                        x.text = "search"
                    else:
                        continue
                    x.set('name', 'type')
                    record.insert(0, x)
        #~ print minidom.parseString(ET.tostring(tree.getroot())).toprettyxml(indent="    ").encode('ascii', 'xmlcharrefreplace')
        with open(path, 'w') as out:
            doc_xml = minidom.parseString(ET.tostring(
                tree.getroot())).toprettyxml(indent="    ").encode(
                    'ascii', 'xmlcharrefreplace')
            out.write(doc_xml)
예제 #46
0
def Count_Tags(filename_1='test.xml',tagname_1= 'out'):
    file = open(filename_1,'r')
    data = file.read()
    file.close()
    dom= parseString(data)
    return len(dom.getElementsByTagName(tagname_1))
#!/usr/bin/python
# -*- coding: utf-8 -*-

import urllib2
from xml.dom import minidom

gid = '103582791434443354'
file = '/home/gmod/ttt_three/garrysmod/data/gmn/whitelist'

try:
	response = urllib2.urlopen('http://steamcommunity.com/gid/' + gid + '/memberslistxml/?xml=1')

	xmldoc = minidom.parseString(response.read())
	users = xmldoc.getElementsByTagName('steamID64')

	f = open(,'w')
	
	for user in users :
		SteamID = int(user.firstChild.data)
		f.write( str(SteamID) + '\n')
		#print steamID
		
	f.close()
	#print "Wrote " + str(len(users)) + " SteamIDs"
	
except urllib2.HTTPError, e:
    #print e.code
	pass
except urllib2.URLError, e:
    #print e.args
	pass
예제 #48
0
                            raise Exception(
                                lang.get("xml_not_correctr_error",
                                         "xml_not_correctr_error"))
                else:
                    raise Exception(lang.get("plugin_exist", "plugin_exist"))

            except Exception, ex:
                self.growl.title = lang["error"]
                self.growl.text = ex
                self.growl.show = "1"

    if "formbutton_import" in request.arguments:
        if request.arguments.get("uploader", "", castto=Attachment):
            file = request.arguments.get("uploader", "", castto=Attachment)
            xml_data = file.handler.read()
            dom = parseString(xml_data)
            node = XMLMacros(dom)

            for child in node.childs:
                child = XMLMacros(child)
                if child.name and child.source:
                    macros = Macros()
                    macros.name = child.name
                    macros.code = child.source
                    macros.class_name = child.class_name
                    macros.is_button_macros = child.is_button
                    macros.on_board = child.on_board

                    picture_name = ""
                    macros.macros_picture = ""
                    if child.macros_picture:
예제 #49
0
def parser_thinged(xml):
    """Parse the response from the ThingISBN service."""
    dom = parseString(xml)
    nodes = dom.getElementsByTagName("idlist")[0].getElementsByTagName("isbn")
    return [EAN13(_get_text(isbn)) for isbn in nodes]
예제 #50
0
def prettifyXML(doc):
    uglyString = ElementTree.tostring(doc, 'utf-8')
    reparsed = minidom.parseString(uglyString)
    return reparsed.toprettyxml(indent='\t', encoding='utf-8')
예제 #51
0
 def __str__(self):
     return minidom.parseString(ET.tostring(self.getroot())).toprettyxml(indent=" "*3)
예제 #52
0
import os
import sys

from time import sleep
from xml.dom.minidom import parseString

sys.path.append(os.getcwd(
))  # trick to allow importing python files from the current directory
try:
    import OSC
except:
    pqt.QMessageBox.critical(0, "Error", "Could not load OSC.py")

d = q.loadDocument("GoOSC.csd")

widgets = parseString(q.getSelectedWidgetsText())
widgetList = widgets.getElementsByTagName("bsbObject")


def getText(nodelist):
    rc = []
    for node in nodelist:
        if node.nodeType == node.TEXT_NODE:
            rc.append(node.data)
    return ''.join(rc)


address = pqt.QInputDialog.getText(0, "IP Address", 'IP address',
                                   pqt.QLineEdit.Normal, '192.168.0.5:9001')
send_address = (address[:address.index(':')],
                int(address[address.index(':') + 1:]))
예제 #53
0
                50,  #this can default to a high number per user
                "hauth": 1,
                "q": term
            }
            searchURL = providerurl + "search/?%s" % urllib.urlencode(params)
            try:
                data = getNewzbinURL(searchURL)
            except exceptions.NewzbinAPIThrottled:
                #try again if we were throttled
                data = getNewzbinURL(searchURL)
            if data:
                logger.info(u'Parsing results from <a href="%s">%s</a>' %
                            (searchURL, providerurl))

                try:
                    d = minidom.parseString(data)
                    node = d.documentElement
                    items = d.getElementsByTagName("item")
                except ExpatError:
                    logger.info(
                        'Unable to get the NEWZBIN feed. Check that your settings are correct - post a bug if they are'
                    )
                    items = []

            if len(items):

                for item in items:

                    sizenode = item.getElementsByTagName(
                        "report:size")[0].childNodes
                    titlenode = item.getElementsByTagName(
예제 #54
0
    input_file = sys.argv[1]
    output_file = sys.argv[2]
    neverallow_only = (sys.argv[3] == "neverallow_only=t")
    policy = SELinuxPolicy()
    policy.from_file_name(input_file)  #load data from file

    # expand rules into 4-tuples for SELinux.h checkAccess() check
    xml_root = Element('SELinux_AVC_Rules')
    if not neverallow_only:
        count = 1
        for a in policy.allow_rules:
            expanded_xml = SELinux_CTS.expand_avc_rule_to_xml(
                policy, a, str(count), 'allow')
            if len(expanded_xml):
                xml_root.append(expanded_xml)
                count += 1
    count = 1
    for n in policy.neverallow_rules:
        expanded_xml = SELinux_CTS.expand_avc_rule_to_xml(
            policy, n, str(count), 'neverallow')
        if len(expanded_xml):
            xml_root.append(expanded_xml)
            count += 1

    #print out the xml file
    s = tostring(xml_root)
    s_parsed = minidom.parseString(s)
    output = s_parsed.toprettyxml(indent="    ")
    with open(output_file, 'w') as out_file:
        out_file.write(output)
예제 #55
0
 def _extract_metadata_container(self, datastring):
     dom = minidom.parseString(datastring)
     metadata_node = self.find_first_child_named(dom, "metadata")
     metadata = self.extract_metadata(metadata_node)
     return {'body': {'metadata': metadata}}
예제 #56
0
# <xbar.image>http://i.imgur.com/Q1PuzbN.png</xbar.image>
# <xbar.dependencies>python</xbar.dependencies>
# <xbar.abouturl>https://github.com/parvez/bitbar-plugins</xbar.abouturl>
#
# by Parvez

import urllib2
rateRequest = urllib2.Request(
    url='http://wu-converter.apple.com/dgw?apptype=finance',
    data=
    '<?xml version="1.0" encoding="utf8"?><request app="YGoAppleCurrencyWidget" appver="1.0" api="finance" apiver="1.0.0"><query id="0" type="convertcurrency"><from/><to/><amount/></query></request>',
    headers={'Content-Type': 'text/xml'})
rateResponse = urllib2.urlopen(rateRequest).read()

from xml.dom import minidom
xmlResponse = minidom.parseString(rateResponse)
xmlConversionList = xmlResponse.getElementsByTagName('conversion')

list = []
for item in xmlConversionList:
    nValue1 = item.childNodes[1].firstChild.nodeValue
    nValue2 = str(round(float(item.childNodes[3].firstChild.nodeValue), 2))
    if nValue1 == "INR":
        primaryValue = 'Rs ' + nValue2
    elif nValue1 == "USD":
        discard = ''
    else:
        list.append(nValue1 + ': ' + nValue2)

print primaryValue
print("---")
예제 #57
0
 def deserialize(self, text):
     dom = minidom.parseString(text)
     metadata_node = self.find_first_child_named(dom, "metadata")
     metadata = self.extract_metadata(metadata_node)
     return {'body': {'metadata': metadata}}
예제 #58
0
 def update(self, datastring):
     dom = minidom.parseString(datastring)
     metadata_item = self.extract_metadata(dom)
     return {'body': {'meta': metadata_item}}
예제 #59
0
def read_azure_ovf(contents):
    try:
        dom = minidom.parseString(contents)
    except Exception as e:
        raise BrokenAzureDataSource("invalid xml: %s" % e)

    results = find_child(dom.documentElement,
                         lambda n: n.localName == "ProvisioningSection")

    if len(results) == 0:
        raise NonAzureDataSource("No ProvisioningSection")
    if len(results) > 1:
        raise BrokenAzureDataSource("found '%d' ProvisioningSection items" %
                                    len(results))
    provSection = results[0]

    lpcs_nodes = find_child(
        provSection,
        lambda n: n.localName == "LinuxProvisioningConfigurationSet")

    if len(results) == 0:
        raise NonAzureDataSource("No LinuxProvisioningConfigurationSet")
    if len(results) > 1:
        raise BrokenAzureDataSource(
            "found '%d' %ss" %
            ("LinuxProvisioningConfigurationSet", len(results)))
    lpcs = lpcs_nodes[0]

    if not lpcs.hasChildNodes():
        raise BrokenAzureDataSource("no child nodes of configuration set")

    md_props = 'seedfrom'
    md = {'azure_data': {}}
    cfg = {}
    ud = ""
    password = None
    username = None

    for child in lpcs.childNodes:
        if child.nodeType == dom.TEXT_NODE or not child.localName:
            continue

        name = child.localName.lower()

        simple = False
        value = ""
        if (len(child.childNodes) == 1
                and child.childNodes[0].nodeType == dom.TEXT_NODE):
            simple = True
            value = child.childNodes[0].wholeText

        attrs = dict([(k, v) for k, v in child.attributes.items()])

        # we accept either UserData or CustomData.  If both are present
        # then behavior is undefined.
        if name == "userdata" or name == "customdata":
            if attrs.get('encoding') in (None, "base64"):
                ud = base64.b64decode(''.join(value.split()))
            else:
                ud = value
        elif name == "username":
            username = value
        elif name == "userpassword":
            password = value
        elif name == "hostname":
            md['local-hostname'] = value
        elif name == "dscfg":
            if attrs.get('encoding') in (None, "base64"):
                dscfg = base64.b64decode(''.join(value.split()))
            else:
                dscfg = value
            cfg['datasource'] = {DS_NAME: util.load_yaml(dscfg, default={})}
        elif name == "ssh":
            cfg['_pubkeys'] = load_azure_ovf_pubkeys(child)
        elif name == "disablesshpasswordauthentication":
            cfg['ssh_pwauth'] = util.is_false(value)
        elif simple:
            if name in md_props:
                md[name] = value
            else:
                md['azure_data'][name] = value

    defuser = {}
    if username:
        defuser['name'] = username
    if password and DEF_PASSWD_REDACTION != password:
        defuser['passwd'] = encrypt_pass(password)
        defuser['lock_passwd'] = False

    if defuser:
        cfg['system_info'] = {'default_user': defuser}

    if 'ssh_pwauth' not in cfg and password:
        cfg['ssh_pwauth'] = True

    return (md, ud, cfg)
예제 #60
0
 def deserialize(self, text):
     dom = minidom.parseString(text)
     metadata_item = self.extract_metadata(dom)
     return {'body': {'meta': metadata_item}}