Ejemplo n.º 1
0
def applyAtomText(node, text):
    """Text according to RFC4248 Section 3.1.
    """
    if isinstance(text, str):
        node.text = cgi.escape(text)
    elif isinstance(text, dict):
        content = text['text']
        type = text.get('type', 'text')
        assert type in ('text', 'html', 'xhtml')
        node.attrib['type'] = type
        if type == 'xhtml':
            body = '<div xmlns="http://www.w3.org/1999/xhtml">%s</div>'
            body = body % text['text']
            # TODO: try catch around, and if not xml fallback to html
            try:
                body = fromstring(body.encode('utf-8'))
            except Exception, e:
                print 80 * '-'
                print body
                print e
                body = '<div xmlns="http://www.w3.org/1999/xhtml">%s</div>'
                body = body % 'Invalid XHTML Content. Could not render.'
                body = fromstring(body)
            node.append(body)
        else:
            node.text = cgi.escape(text['text'])                
Ejemplo n.º 2
0
 def test_connect(self, username="******", password="******"):
     server = V1Server(username=username, password=password)
     code, body = server.get("/rest-1.v1/Data/Story")
     print "\n\nCode: ", code
     print "Body: ", body
     elem = fromstring(body)
     self.assertThat(elem.tag, Equals("Assets"))
Ejemplo n.º 3
0
def httpcallswrapper(url=None, method=None, body=None, headers=None):
    fibonic = [1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89]
    http = Http()
    for i in range(0, len(fibonic)):
        try:
            responseheaders, rawbody = http.request(uri=url, method=method, body=body, headers=headers)
            if str(responseheaders['status']) != '200':
                if ETL_DEBUG_PRINT:
                    print "failed " + str(url)
                    #log_critical_message("failed " + str(url))
                time.sleep(fibonic[i])
            else:
                isxml = False
                try:
                    xml = fromstring(rawbody) 
                    namespaces = ['http://www.ebay.com/marketplace/search/v1/services', 'urn:ebay:apis:eBLBaseComponents']
                    for namespace in namespaces:
                        remove_namespace(xml, namespace)
                    if xml.findtext('ack') in ["Failure", "Error"] or xml.findtext('Ack') in ["Failure", "Error"]:
                        print "ACK failure retrying status code " +str(responseheaders)+" on url "+ str(url)
                        #log_critical_message("ACK failure retrying status code " +str(responseheaders)+" on url "+ str(url))
                        print str(rawbody)
                        #log_critical_message(str(rawbody))
                        time.sleep(fibonic[i])
                    else:
                        return [responseheaders, rawbody]
                except Exception, e:
                    print e
                if isxml == False:
                    return [responseheaders, rawbody]
                
        except Exception, e:
            log_exception_message(str(e))
            time.sleep(fibonic[i])
Ejemplo n.º 4
0
 def test_connect(self, username='******', password='******'):
   server = V1Server(address='www14.v1host.com', username=username, password=password,instance='v1sdktesting')
   code, body = server.fetch('/rest-1.v1/Data/Story?sel=Name')
   print "\n\nCode: ", code
   print "Body: ", body
   elem = fromstring(body)
   self.assertThat(elem.tag, Equals('Assets'))
Ejemplo n.º 5
0
    def test_get_multiple_items_detail(self):
        """
            Test for valid response for officeshoes for get item detail data
        """
        xml = get_item_detail(itemid=130531566351)
        xml = fromstring(xml)
        namespace = 'urn:ebay:apis:eBLBaseComponents'
        remove_namespace(xml, namespace)
        self.assertTrue(xml.findtext('Ack') == "Success", "Ack node got failure")

        self.assertTrue(type(xml.findtext('Item/ItemID')) is StringType, "Response node Item/ItemID should be string")
        self.assertTrue(type(xml.findtext('Item/PictureDetails/GalleryURL')) is StringType, "Response node Item/PictureDetails/GalleryURL should be string")
        self.assertTrue(type(xml.findtext('Item/PictureDetails/PhotoDisplay')) is StringType, "Response node Item/PictureDetails/PhotoDisplay should be string")
        
        itemspecifics = xml.findall('Item/ItemSpecifics/NameValueList')
        self.assertTrue(len(itemspecifics) > 0, "There should be item specifics but got none")        
        for tt in itemspecifics:
            self.assertTrue(tt.findtext('Name'), "item specific node has no Name node")
            self.assertTrue(tt.findtext('Value'), "item specific node has no Value node")
        variations = xml.findall('Item/Variations/Variation')
        self.assertTrue(len(variations) > 0, "There should be variations but got none")
        for variation in variations:
            self.assertTrue(variation.findtext('SKU'), "item variation node has no Name node")
            self.assertTrue(variation.findtext('Quantity'), "item variation node has no Quantity node")
            self.assertTrue(variation.findtext('SellingStatus/QuantitySold'), "item variation node has no SellingStatus/QuantitySold node")
            variationnamevalue = variation.findall('VariationSpecifics/NameValueList')
            self.assertTrue(len(variationnamevalue) > 0, "There should be Name value list for each variation but got none")
            for subSpe in variationnamevalue:
                self.assertTrue(subSpe.findtext('Name'), "item specific node has no Name node")
                self.assertTrue(subSpe.findtext('Value'), "item specific node has no Value node")
Ejemplo n.º 6
0
 def test_get_multiple_items_analytics(self):
     """
         Test for valid response for officeshoes for bestmatch data
     """
     token = 'AgAAAA**AQAAAA**aAAAAA**5XalTQ**nY+sHZ2PrBmdj6wVnY+sEZ2PrA2dj6wBkISmCpKGpQ2dj6x9nY+seQ**e8IAAA**AAMAAA**gGyOR7+2j3LDKDyi71k34bfoFk6AQgIZ5LVIsPuUFnqfBua1/sb4VDLXeovINnWEeg6jEJgjnD7akKId38j23A54A4vfkg3nggilz+tJHNZIx8ZV6s8UZvZqgA696Lbwqz61eiImZ7KZeFjeS5BsvHam38acaSqbMj8fu3UHdcd+F15NNqXwEdXOLCO34I+TkAontsGLDo/VZM87adUDLU80t7m7dCzkLqsmQbN9a7XVQTn0bNdM/I58tyVnaQ8wvRGd4EF2Kkd38Ksm61mLsY4u67vcFeTjIUBuiI3foyfit64T0Zy2RbgVgTq3q1tK479fabdBHvWeDEW7yKhfJ60hLkJ4b0THnkz2xN6cXkp84YYi4xvQfruyRBsuR7156eLMfpqd6vlgYdXrds01PyUJWpTX1gNwu4UCTg1gHXCpNMoOUmQNhhSefApk1HeOA6ziYOfq0LwuW5kQEDNxcOuh1FCHnCwd4VEj9YqBBYQVsAz7utFO/05Qsxd2WbqJGlFh0b4JioVZgSbcwBFvdd5fSbLUDwGcw8rl+SPPpfCenk2HQsLKdFPI3QC9y4rFW6LUW5dM+fjA4tHKGzbQpIM1ofF2VmnfEpmPCwxAvF0N6BX5SqSDdO1gQf7GWATjy3i3fz2NViE79f7xEEdA+ShgjQ6pZVExo7WwF+R1tEo3rupwYCZ6eNPfV0C3OW3LtHLpOyqY3foZegV1CnGY04IiGgd1UxS6BJC/oYtEU7iJhArEatLD8soYHddMz0Q0'
     locale = 'GB'
     xml = get_items_bestmatch_data(locale=locale, token=token, itemid='<itemId>130531566351</itemId>')
     xml = fromstring(xml)
     namespace = 'http://www.ebay.com/marketplace/search/v1/services'
     remove_namespace(xml, namespace)
     self.assertTrue(xml.findtext('ack') == "Success", "Ack node got failure")
     results = xml.findall('item')
     self.assertEqual(type(results), list, "Response is not list")
     self.assertTrue(len(results) > 0, "Item not present when it should be")
     for item in results:
         self.assertTrue(item.findtext('itemId'), "item has no itemId node")
         self.assertTrue(item.findtext('itemRank'), "item has no itemRank node")
         self.assertTrue(item.findtext('primaryCategory/categoryId'), "item has no primaryCategory/categoryId node")
         self.assertTrue(item.findtext('bestMatchData/salesCount'), "item has no bestMatchData/salesCount node")
         self.assertTrue(item.findtext('quantityAvailable'), "item has no quantityAvailable node")
         self.assertTrue(item.findtext('quantitySold'), "item has no quantitySold node")
         self.assertTrue(item.findtext('bestMatchData/viewItemCount'), "item has no bestMatchData/viewItemCount node")
         self.assertTrue(item.findtext('bestMatchData/salesPerImpression'), "item has no bestMatchData/salesPerImpression node")
         self.assertTrue(item.findtext('sellingStatus/currentPrice'), "item has no sellingStatus/currentPrice node")
         self.assertTrue(item.findtext('bestMatchData/viewItemPerImpression'), "item has no bestMatchData/viewItemPerImpression node")
         self.assertTrue(item.findtext('bestMatchData/impressionCountRange/min'), "item has no bestMatchData/impressionCountRange/min node")
         self.assertTrue(item.findtext('bestMatchData/impressionCountRange/max'), "item has no bestMatchData/impressionCountRange/max node")
Ejemplo n.º 7
0
	def parse_results(self):
		##lookup result fields in db
		## insert into dictionary with field names as lookup key
		## parse structure hardcoded for now, variables returned from db (above)
		#returns result_data

		##get response heading tags --> can make this db call in future
		if (re.search("Stats",self.func)):
			self.response_headers = ['trafficStats','listingStats']
			self.response_globals = {'trafficStats':[], 'listingStats':['weekEndingDate']}
			#print self.response_globals['listingStats']
		else:
			##update this!
			print 'np'

		root = fromstring(self.utext)
		for header in self.response_headers:
			trunk = root.findall('.//'+header)
			for treetop in trunk:
				for tree in treetop:
					#print tree
					self.res_dat = []; globe = {}
					self.traverse(tree, globe, header)
					#print self.res_dat
					if (header in self.response_data.keys()):
						self.response_data[header] = self.response_data[header] + self.res_dat
					else:
						self.response_data[header] = self.res_dat
Ejemplo n.º 8
0
    def parseErrorAndResponse(self, data):
        """Parse returned XML for errors, then convert into
        appropriate Python objects."""
        try:
            xml = fromstring(data)
        except Exception:
            raise (
                CIMError(
                    0, 'Incorrect XML response for {0}'.format(self.classname)
                )
            )

        error = xml.find('.//ERROR')

        if error is not None:
            msg = error.get('DESCRIPTION')
            if msg and "context cannot be found" in msg:
                error.set("DESCRIPTION",
                          "Response is not complete for {} classname. "
                          "Please check zWBEMOperationTimeout and "
                          "zWBEMMaxObjectCount properties".format(self.classname)
                )
        else:
            return xml

        try:
            code = int(error.attrib['CODE'])
        except ValueError:
            code = 0

        raise (CIMError(code, error.attrib['DESCRIPTION']))
Ejemplo n.º 9
0
    def parseErrorAndResponse(self, data):
        """Parse returned XML for errors, then convert into
        appropriate Python objects."""
        try:
            xml = fromstring(data)
        except Exception:
            self.deferred.errback(
                CIMError(
                    0,
                    'Incorrect XML response for {0}'.format(self.classname)))
            return

        error = xml.find('.//ERROR')

        if error is not None:
            msg = error.get('DESCRIPTION')
            if msg and "context cannot be found" in msg:
                error.set(
                    "DESCRIPTION",
                    "Response is not complete for {} classname. "
                    "Please check zWBEMOperationTimeout and "
                    "zWBEMMaxObjectCount properties".format(self.classname))
        else:
            self.deferred.callback(self.parseResponse(xml))
            return

        try:
            code = int(error.attrib['CODE'])
        except ValueError:
            code = 0

        self.deferred.errback(CIMError(code, error.attrib['DESCRIPTION']))
Ejemplo n.º 10
0
 def test_connect(self, username='******', password='******'):
     server = V1Server(username=username, password=password)
     code, body = server.get('/rest-1.v1/Data/Story')
     print "\n\nCode: ", code
     print "Body: ", body
     elem = fromstring(body)
     self.assertThat(elem.tag, Equals('Assets'))
Ejemplo n.º 11
0
def parse_item (data):
    
    tree = fromstring(data)
    title = lxml.html.fromstring(tree.find('title').text).text_content()
    text = lxml.html.fromstring(tostring(tree.find('text'))).text_content()
    doc_id = tree.attrib.get('itemid')

    return doc_id, title, text
Ejemplo n.º 12
0
 def test_search(self):
     search_request = getData('search_request.txt')
     search_response = getData('search_response.txt')
     c = SolrConnection(host='localhost:8983', persistent=True)
     output = fakehttp(c, search_response)
     res = c.search(q='+id:[* TO *]', wt='xml', rows='10', indent='on')
     res = fromstring(res.read())
     self.failUnlessEqual(str(output), search_request)
     self.failUnless(res.find(('.//doc')))
Ejemplo n.º 13
0
 def test_search(self):
     search_request = getData('search_request.txt')
     search_response = getData('search_response.txt')
     c = SolrConnection(host='localhost:8983', persistent=True)
     output = fakehttp(c, search_response)
     res = c.search(q='+id:[* TO *]', wt='xml', rows='10', indent='on')
     res = fromstring(res.read())
     self.failUnlessEqual(str(output), search_request)
     self.failUnless(res.find(('.//doc')))
 def test_get_talk_details_broken_date(self):
     """
     It just seems likely this will break sooner or later, check that we handle gracefully.
     """
     document = fromstring(minimal_item)
     document.find('./pubDate').text = "Sat, 04 02 2012 08:14:00" # Same date, different formatting
     details = self.talks.get_talk_details(document)
     date_now = datetime.strftime(datetime.now(), "%d.%m.%Y")
     self.assertEqual(date_now, details['date'])
Ejemplo n.º 15
0
 def test_token_user(self):
     """
         Test for valid response for officeshoes for get item detail data
     """
     xml = token_user("AgAAAA**AQAAAA**aAAAAA**fAKrTA**nY+sHZ2PrBmdj6wVnY+sEZ2PrA2dj6AEkYWmDpGFowidj6x9nY+seQ**e8IAAA**AAMAAA**tffEcQxfrIgX54HR4wY2wmBbvpC+9DsqfFdrNzmpXAzTE+CXC+OOIFsWUYpzjoyT6ILhG9IlUYV74PdsJtOWVIRyBvvpaHdNb/4C+gPSCu9NTdk552eTI7ioqgVjvCLVr10Fw8StkIK+IS4WlL2LBINat1n2qZH+ySc8ktFCktp8rGHUMfn4m97Vl3hjxPJVKIdB5zdHwy0uLh/cKFURy6FeMeJGNZdBKnYpO1znBzZ9gILVngAPlJofggkPWYa4NFQ7dFY+ylY2VQpPBkEz/t4ZCMT+u9AqiRTrjIclDvmE9Z5TkELXJGVSIxPCQe1WqspLndwCPlm2Nd1h9GrXqFElIkFWFLmXPbOpTZ6ZT81rGusasxI3GwyytjnRKpR8WqUhc20aU0ZD1EnSfGMjHDyE5fPef1LXN0DKXCmz8QIsrZKkb9R1DJYGomK7toCfKGUry3VCmtuiNrH116OS+6fUmUyAC73a93Sb/4ZH3RloYCCJnZtOQF+/WqomCyOR5CAzXMeuXIRRKfKfuaek40H+g5h4CsGLuYooesEounOGQTkNk496fmSfZh+pK/ch8w1Mxu450j0HwD+olnVbNeKy0OIHt9HcHo7QjQwr25wE6G+TPpBK3yr9J0nORN5IQI8uDccFxABYeCFTLuAPMA2foH84gOoN0nfSSGt2U5Hl3LH8rwkU05o+1HCIGI14CpWhDYq5gTIcv2jHzIqnnjqDGZg4h0DcNAf6opoCkAEpAwKzWvHUciKTDaC77K3o")
     xml = fromstring(xml)
     namespace = 'urn:ebay:apis:eBLBaseComponents'
     remove_namespace(xml, namespace)
     self.assertTrue(xml.findtext('Ack') == "Success", "Ack node got failure")
     self.assertTrue(xml.findtext('User/UserID') == "company_test", "User/UserID node for company_test value")
Ejemplo n.º 16
0
 def test_connect(self, username='******', password='******'):
     server = V1Server(address='www14.v1host.com',
                       username=username,
                       password=password,
                       instance='v1sdktesting')
     code, body = server.fetch('/rest-1.v1/Data/Story?sel=Name')
     print "\n\nCode: ", code
     print "Body: ", body
     elem = fromstring(body)
     self.assertThat(elem.tag, Equals('Assets'))
Ejemplo n.º 17
0
 def test_get_talk_details_broken_date(self):
     """
     It just seems likely this will break sooner or later, check that we handle gracefully.
     """
     document = fromstring(minimal_item)
     document.find('./pubDate').text = "Sat, 04 02 2012 08:14:00"  # Same date, different formatting
     details = self.talks.get_talk_details(document)
     date_now = datetime.strftime(datetime.now(), '%d.%m.%Y')
     self.assertEqual(date_now, details['date'])
     self.logger.assert_called_with("Could not parse date 'Sat, 04 02 2012 08': time data 'Sat, 04 02 2012 08' does not match format '%a, %d %b %Y %H:%M:%S'")
Ejemplo n.º 18
0
 def test_search(self):
     search_request = getData('search_request.txt')
     search_response = getData('search_response.txt')
     c = SolrConnection(host='localhost:8983', persistent=True)
     output = fakehttp(c, search_response)
     res = c.search(q='+id:[* TO *]', fl='* score', wt='xml', rows='10', indent='on')
     res = fromstring(res.read())
     normalize = lambda x: sorted(x.split('&'))      # sort request params
     self.assertEqual(normalize(output.get()), normalize(search_request))
     self.failUnless(res.find(('.//doc')))
Ejemplo n.º 19
0
def main():
    host = None
    port = None

    try:
        opts, args = getopt.getopt(sys.argv[1:], "hH:P:", ["help", "host=", "port="])
    except getopt.GetoptError:
        usage()
        sys.exit(3)

    for key, val in opts:
        if key in ("-H", "--host"):
            host = val
        if key in ("-P", "--port"):
            port = int(val)
        if key in ("-h", "--help"):
            usage()
            sys.exit(3)

    if host == None or port == None:
        usage()
        sys.exit(3)

    conn = httplib.HTTPConnection(host+":"+str(port))
    try:
        conn.request("GET", "/_status?format=xml")
    except:
        print "Unable to connect to %s port %s"%(host,port)
        sys.exit(2)

    try:
        doc = fromstring(conn.getresponse().read())
        iter = doc.findall("service")
    except:
        print "Unable to parse XML"
        sys.exit(2)
    
    mylist = []
    for element in iter:
        try:
            name = element.find("name")
            status = element.find("status")
        except:
            print "Unable to parse XML"
            sys.exit(2)

        if "0" != status.text:
            mylist.append(name.text)

    if len(mylist) > 0:
        print "Monit services down:", str(mylist)
        sys.exit(2)

    print "Monit: All services OK"
    sys.exit(0)
Ejemplo n.º 20
0
    def get_new_talks(self):
        """
        Returns talks as dicts {title:, author:, thumb:, date:, duration:, link:}.
        """
        talksByTitle = {}
        rss = get_document('http://feeds.feedburner.com/tedtalks_video')
        for item in fromstring(rss).findall('channel/item'):
            talk = self.get_talk_details(item)
            talksByTitle[talk['title']] = talk

        return talksByTitle.itervalues()
Ejemplo n.º 21
0
def get_interface_devices(dom):
    if mypythonversion == '2.6':
        etree = ElementTree.fromstring(dom.XMLDesc(0))
    else:
        etree = fromstring(dom.XMLDesc(0))
    devs = []
    for target in etree.findall("devices/interface/target"):
        dev = target.get("dev")
        if not dev in devs:
            devs.append(dev)
    return devs
Ejemplo n.º 22
0
def parsekmlpoint(kmlstring):
    e = fromstring(kmlstring)
    coords = coords = e.find('{http://www.opengis.net/kml/2.2}Placemark/{http://www.opengis.net/kml/2.2}Point/{http://www.opengis.net/kml/2.2}coordinates').text
    coords = coords.lstrip(' ').rstrip(' ').replace('\n', '').replace('\t', '');
    lra = []
    for yxz in coords.split(' '):
        a = yxz.split(',')
        if len(a) > 1:
            lra.append((float(a[0]), float(a[1])))
    point = Point(lra[0])
    return point
Ejemplo n.º 23
0
def get_interface_devices(dom):
    if mypythonversion == '2.6':
        etree=ElementTree.fromstring(dom.XMLDesc(0))
    else:
        etree=fromstring(dom.XMLDesc(0))
    devs=[]
    for target in etree.findall("devices/interface/target"):
	dev=target.get("dev")
	if not dev in devs:
	    devs.append(dev)
    return devs
Ejemplo n.º 24
0
    def get_new_talks(self):
        """
        Returns talks as dicts {title:, author:, thumb:, date:, duration:, link:}.
        """
        talksByTitle = {}
        rss = get_document('http://feeds.feedburner.com/tedtalks_video')
        for item in fromstring(rss).findall('channel/item'):
            talk = self.get_talk_details(item)
            talksByTitle[talk['title']] = talk

        return talksByTitle.itervalues()
Ejemplo n.º 25
0
def GetShowByID(showID):
  """
  Gets a show by a TVRage ID.
  Returns the full information (Show object) or None if the ID is invalid.
  """
  # tvrage provides no/inconsistent error-reporting so we must try/except
  url = URL_SHOWINFO % {'id':showID}
  uf = urllib.urlopen(url)
  xml = uf.read()
  xShow = fromstring(xml)
  return Show(xShow)
Ejemplo n.º 26
0
Archivo: llsd.py Proyecto: Boy/netbook
def parse(something):
    try:
        if something.startswith('<?llsd/binary?>'):
            just_binary = something.split('\n', 1)[1]
            return LLSDBinaryParser().parse(just_binary)
        # This should be better.
        elif something.startswith('<'):
            return to_python(fromstring(something)[0])
        else:
            return LLSDNotationParser().parse(something)
    except KeyError, e:
        raise Exception('LLSD could not be parsed: %s' % (e,))
Ejemplo n.º 27
0
def parse(something):
    try:
        if something.startswith('<?llsd/binary?>'):
            just_binary = something.split('\n', 1)[1]
            return LLSDBinaryParser().parse(just_binary)
        # This should be better.
        elif something.startswith('<'):
            return to_python(fromstring(something)[0])
        else:
            return LLSDNotationParser().parse(something)
    except KeyError, e:
        raise Exception('LLSD could not be parsed: %s' % (e, ))
 def test_get_talk_details_minimal(self):
     details = self.talks.get_talk_details(fromstring(minimal_item))
     expected_details = {
         'author':'Dovahkiin',
         'date':'04.02.2012',
         'link':'invalid://nowhere/nothing.html',
         'thumb':'invalid://nowhere/nothing.jpg',
         'title':'fus ro dah',
         'plot':'Unrelenting Force',
         'duration':3723
     }
     self.assertEqual(expected_details, details)
Ejemplo n.º 29
0
def parsekmlpoly(kmlstring):
    e = fromstring(kmlstring)
    coords = coords = e.find('{http://www.opengis.net/kml/2.2}Placemark/{http://www.opengis.net/kml/2.2}Polygon/{http://www.opengis.net/kml/2.2}outerBoundaryIs/{http://www.opengis.net/kml/2.2}LinearRing/{http://www.opengis.net/kml/2.2}coordinates').text
    coords = coords.lstrip(' ').rstrip(' ').replace('\n', '').replace('\t', '');
    lra = []
    for yxz in coords.split(' '):
        a = yxz.split(',')
        if len(a) > 1:
            lra.append((float(a[0]), float(a[1])))
    lr = LinearRing(lra)
    poly = Polygon(lr)
    return poly
Ejemplo n.º 30
0
def parsekmllinestring(kmlstring):
    e = fromstring(kmlstring)
    coords = coords = e.find(
        "{http://www.opengis.net/kml/2.2}Placemark/{http://www.opengis.net/kml/2.2}LineString/{http://www.opengis.net/kml/2.2}coordinates"
    ).text
    coords = coords.lstrip(" ").rstrip(" ").replace("\n", "").replace("\t", "")
    lra = []
    for yxz in coords.split(" "):
        a = yxz.split(",")
        if len(a) > 1:
            lra.append((float(a[0]), float(a[1])))
    linestring = LineString(lra)
    return linestring
Ejemplo n.º 31
0
 def test_search(self):
     search_request = getData('search_request.txt')
     search_response = getData('search_response.txt')
     c = SolrConnection(host='localhost:8983', persistent=True)
     output = fakehttp(c, search_response)
     res = c.search(q='+id:[* TO *]',
                    fl='* score',
                    wt='xml',
                    rows='10',
                    indent='on')
     res = fromstring(res.read())
     normalize = lambda x: sorted(x.split('&'))  # sort request params
     self.assertEqual(normalize(output.get()), normalize(search_request))
     self.failUnless(res.find(('.//doc')))
Ejemplo n.º 32
0
    def decode_XMLDict(self, xml):
        """!TXT!"""

        def decode(element):
            value = None
            element_type = element.get('type', 'none')
            if element_type == 'none':
                value = None
            elif element_type == 'bool':
                value = element.get('value') == 'true' and true or false
            elif element_type == 'complex':
                value = complex(float(element.get('re')), float(element.get('im')))
            elif element_type == 'float':
                value = float(element.get('value'))
            elif element_type == 'int':
                value = int(element.get('value'))
            elif element_type == 'long':
                value = long(element.get('value'))
            elif element_type == 'str':
                value = unicode(element.text)
            elif element_type == 'cdata':
                value = unicode(element.text)[9: -3]
            elif element_type == 'dict':
                value = {}
                for subelement in element.getchildren():
                    subname, subvalue = decode(subelement)
                    value[subname] = subvalue
            elif element_type == 'list':
                value = []
                for subelement in element.getchildren():
                    subname, subvalue = decode(subelement)
                    value.append(subvalue)
            elif element_type == 'tuple':
                value = []
                for subelement in element.getchildren():
                    subname, subvalue = decode(subelement)
                    value.append(subvalue)
                value = tuple(value)
            else:
                raise TypeError('Decoding of type "%s" not supported (Source: %s)' % (element_type, element))
            return element.tag, value

        result = {}
        root = fromstring(xml)
        for element in root.getchildren():
            key, value = decode(element)
            result[key] = value
        return result
Ejemplo n.º 33
0
 def doSendXML(self, request):
     try:
         rsp = self.doPost(self.solrBase+'/update', request,
             self.xmlheaders)
         data = rsp.read()
     finally:
         if not self.persistent:
             self.conn.close()
     #detect old-style error response (HTTP response code of
     #200 with a non-zero status.
     parsed = fromstring(self.decoder(data)[0])
     status = parsed.attrib.get('status', 0)
     if status != 0:
         reason = parsed.documentElement.firstChild.nodeValue
         raise SolrException(rsp.status, reason)
     return parsed
Ejemplo n.º 34
0
    def parseErrorAndResponse(self, data):
        """Parse returned XML for errors, then convert into
        appropriate Python objects."""

        xml = fromstring(data)
        error = xml.find('.//ERROR')

        if error is None:
            return xml

        try:
            code = int(error.attrib['CODE'])
        except ValueError:
            code = 0

        raise CIMError(code, error.attrib['DESCRIPTION'])
Ejemplo n.º 35
0
 def doSendXML(self, request):
     try:
         rsp = self.doPost(self.solrBase + '/update', request,
                           self.xmlheaders)
         data = rsp.read()
     finally:
         if not self.persistent:
             self.conn.close()
     #detect old-style error response (HTTP response code of
     #200 with a non-zero status.
     parsed = fromstring(self.decoder(data)[0])
     status = parsed.attrib.get('status', 0)
     if status != 0:
         reason = parsed.documentElement.firstChild.nodeValue
         raise SolrException(rsp.status, reason)
     return parsed
Ejemplo n.º 36
0
    def parseErrorAndResponse(self, data):
        """Parse returned XML for errors, then convert into
        appropriate Python objects."""

        xml = fromstring(data)
        error = xml.find('.//ERROR')

        if error is None:
            self.deferred.callback(self.parseResponse(xml))
            return

        try:
            code = int(error.attrib['CODE'])
        except ValueError:
            code = 0

        self.deferred.errback(CIMError(code, error.attrib['DESCRIPTION']))
Ejemplo n.º 37
0
 def test_get_category_aspects(self):
     """
     """
     xml = get_categories(sellerid='officeshoes', categoryid=11498, locale="GB")
     xml = fromstring(xml)
     namespace = 'http://www.ebay.com/marketplace/search/v1/services'
     remove_namespace(xml, namespace)
     self.assertTrue(xml.findtext('ack') == "Success", "Ack node got failure")        
     aspectsxml = xml.findall('aspectHistogramContainer/aspect')
     self.assertTrue(len(aspectsxml) > 0, "Response aspect lenght is less then zero")
     for aspect in aspectsxml:
         self.assertTrue(aspect.attrib['name'], "aspect has no attrib name")        
         valueHistograms = aspect.findall('valueHistogram')
         self.assertTrue(len(valueHistograms) > 0, "Response aspect valuehistogram length is less then zero")            
         for valueHistogram in valueHistograms:
             self.assertTrue(valueHistogram.attrib['valueName'], "aspect valuehistogram valuename has no attrib valueName")
             self.assertTrue(valueHistogram.findtext('count'), "aspect valuehistogram  has no count")
    def process(self, xml):
        """Initiate an SSL connection and return the
        xmlresponse
        """
        conn = ssl.HTTPSConnection(self._site)

        # setup the HEADERS
        log.info('Setting up request headers')
        conn.putrequest('GET', self._path+'?xmldata='+quote(tostring(xml)))
        conn.putheader('Content-Type', 'text/xml')
        conn.endheaders()

        log.info('Getting the response from the Virtual Merchant site')
        result = conn.getresponse().read()
        xmlresponse = fromstring(result)
        
        return xmlresponse        
Ejemplo n.º 39
0
    def __init__(self,tree,parent=None):
        
        self._parent = parent
        
        if isinstance(tree,str):
            self._tree = fromstring(tree)
        else:
            self._tree = tree

        #this is required to call on all the children
        self._children = [pythonic_objectify(child,self) for child in self._tree]
        
        #assigning attributes to the parent
        if parent is not None:
            
            #making the tags more pythonic - don't hate me!
            tag = self._tree.tag
            tag = tag.replace('-','_')

            #getting the tags value
            value = self._tree.text
            #known type conversion
            if 'type' in self._tree.attrib and value is not None:
                kind = self._tree.attrib['type']
                if kind == 'integer':
                    value = int(value)
                elif kind == 'float':
                    value = float(value)
                elif kind == 'boolean':
                    if value == 'false':
                        value = False
                    elif value == 'true':
                        value = True
                    else:
                        raise ValueError("I don't know how to handle this!")
                elif kind == 'date':
                    year, month, day = value.split('-')
                    value = datetime.datetime(int(year),int(month),int(day))
                elif kind == 'datetime':
                    year, month, day = value.split('-')
                    day, time = day.replace('Z', '').split('T')
                    hours, minutes, seconds = time.split(':')
                    value = datetime.datetime(int(year), int(month), int(day), int(hours), int(minutes), int(seconds))
                
            #apply it to it's parent
            setattr(self._parent,tag,value)
Ejemplo n.º 40
0
    def parseErrorAndResponse(self, data):
        """Parse returned XML for errors, then convert into
        appropriate Python objects."""

        xml = fromstring(data)
        error = xml.find(".//ERROR")

        if error is None:
            self.deferred.callback(self.parseResponse(xml))
            return

        try:
            code = int(error.attrib["CODE"])
        except ValueError:
            code = 0

        self.deferred.errback(CIMError(code, error.attrib["DESCRIPTION"]))
Ejemplo n.º 41
0
def deducting_item(date="", **args):
    
    p_rel = get_dump_location()
    date_str = date
    path = p_rel + "/%s/notifications/" % (str(date_str))
    listing = os.listdir(path)
    files_allowed = ["ItemRevised", "ItemSold", "ItemListed", "ItemClosed"]
    i = 1
    excp = 0
    summary = {}
    temp_list = []
    item_count = 1
    temporary_dict = {"ItemOther":[], "ItemClosed":[]}
    no_of_files = len(listing)
    if "summary.json" in listing:
        no_of_files = no_of_files - 1
    if "inbox_progress.txt" in listing:
        no_of_files = no_of_files - 1
    if "summary_progress.txt" in listing:
        no_of_files = no_of_files - 1
    for infile in listing:
        filename = infile.split("_", 1)[0]            
        if filename in files_allowed:
            with codecs.open(path + "%s" % (infile), "r") as f:
                temp = f.read()
                temp = temp.replace("", "=")
                try:
                    rootElement = fromstring(temp)
                except Exception, e:
                    print " ia m here"
                    print e
                
                itemElement_root = rootElement.find("{http://schemas.xmlsoap.org/soap/envelope/}Body")
                itemElement = itemElement_root.find("{urn:ebay:apis:eBLBaseComponents}GetItemResponse")
                itemElement_it = itemElement.find("{urn:ebay:apis:eBLBaseComponents}Item")
                itemElement = itemElement_it.find("{urn:ebay:apis:eBLBaseComponents}Seller")
                seller_id = itemElement.find("{urn:ebay:apis:eBLBaseComponents}UserID").text
                cat_id = itemElement_it.find("{urn:ebay:apis:eBLBaseComponents}PrimaryCategory/{urn:ebay:apis:eBLBaseComponents}CategoryID").text
                item_id = itemElement_it.find("{urn:ebay:apis:eBLBaseComponents}ItemID").text
                locale = itemElement_it.find("{urn:ebay:apis:eBLBaseComponents}Country").text
                c_seller_id = seller_id + "@" + locale
                if filename == "ItemClosed":
                    temporary_dict[filename].append(item_id)
                else:
                    temporary_dict["ItemOther"].append(item_id)
Ejemplo n.º 42
0
	def getParser(self, descParseInstruction):						
		
		#tree = ElementTree().parse(descParseInstruction)
		fp = open(descParseInstruction, 'r')
		tree = fromstring(fp.read())
		fp.close()	
					
		grammarNode = tree.find('GameGrammar')
		if(grammarNode == None):
			print "no valid parserConfig"
			return None
					
		attributes = grammarNode.attrib
		
		parserType = attributes.get('type')					
		if(parserType == 'multiline'):
			return DescriptionParserFlatFile(grammarNode)			
		elif(parserType == 'xml'):
			return DescriptionParserXml(grammarNode)
		else:
			print "Unknown parser: " +parserType
			return None
Ejemplo n.º 43
0
def recentReviews():
    result = fetch("""
PREFIX rev: <http://purl.org/stuff/rev#>
PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>
SELECT ?thing ?name ?review ?createdOn ?rating
WHERE
{
  ?thing rdfs:label ?name ;
         rev:hasReview ?review .
  ?review rev:reviewer <people/drewp> ;
          rev:createdOn ?createdOn ;
          rev:rating ?rating .
} 
ORDER BY DESC(?createdOn)
LIMIT 10
""")

    et = fromstring(result)
    headers = [e.get('name') for e in et.find(SPARQL_RESULTS + 'head')]
    rows = []
    for result in et.find(SPARQL_RESULTS + 'results').getchildren():
        bindings = dict([(b.get('name').replace('?', ''),
                          nodeElement(b.getchildren()))
                         for b in result.findall(SPARQL_RESULTS + 'binding')])
        rows.append(bindings)

    rows.sort(key=lambda row: row['createdOn'], reverse=True)
    return flat.ten.flatten(
        T.table(class_="recentReviews")
        [T.tr[T.th(class_="recentReviews title", colspan=3)[
            "Recent reviews on ",
            T.a(class_="recentReviews", href="http://revyu.com")["revyu.com"],
            #" (-tmp)"
        ]], T.tr[T.th["Date"], T.th["Review"], T.th["Rating"]], [
            T.tr[T.td(class_="date")[row['createdOn'].split('T')[0]],
                 T.td(class_="subj")[T.a(href=row['review'])[row['name']]],
                 T.td(class_="rate")[row['rating']]] for row in rows
        ]])
Ejemplo n.º 44
0
    def get_new_talks(self):
        """
        Returns talks as dicts {title:, author:, thumb:, date:, duration:, link:}.
        """
        
        sd_rss_url = "http://feeds.feedburner.com/tedtalks_video"
        hd_rss_url = "http://feeds.feedburner.com/TedtalksHD"
        rss_urls = [sd_rss_url, hd_rss_url] # Prefer HD, but get SD if that's all there is (my friends)
        
        document_fetchers = []
        if do_multi_threading:
            pool = Pool(processes=1) # Maybe it would be better to fetch them simultaneously?
            for url in rss_urls:
                result = pool.apply_async(get_document, [url])
                document_fetchers.append(lambda x: result.get(30))
        else:
            for url in rss_urls:
                document_fetchers.append(lambda x: get_document(url))

        talksByTitle = {}
        for documentFetcher in document_fetchers:
            rss = documentFetcher(None) # Is this evil? We have to pass something into the lambda.
            for item in fromstring(rss).findall('channel/item'):
                talk = self.get_talk_details(item)
                talksByTitle[talk['title']] = talk
        
        if do_multi_threading:
            # pool.close()
            # pool.join()
            # If I close Pool using close/join, then it logs 
            # ERROR: Error Type: <type 'exceptions.OSError'>
            # ERROR: Error Contents: [Errno 3] No such process
            # when the app exits (i.e. finalization occurs).
            # Whereas this seems to be OK.
            pool._terminate()
        
        return talksByTitle.itervalues()
Ejemplo n.º 45
0
import unittest
from chaski_plugin import ChaskiPlugin
from elementtree.ElementTree import fromstring

# the example is borrowed from http://www.w3schools.com/xml/xml_tree.asp
xml_test_text = """<?xml version="1.0" encoding="ISO-8859-1"?>
<note>
<to>Tove</to>
<from>Jani</from>
<heading>Reminder</heading>
<body>Don't forget me this weekend!</body>
</note>
"""
#print xml_test_text
xml_test_etree = fromstring(xml_test_text)


class ChaskiPluginTest(unittest.TestCase):
    def do_test(self, method, plugin_params):
        plugin = ChaskiPlugin(plugin_params)
        self.assertTrue(method(plugin, xml_test_etree))

    def test_xpath_exists_true(self):
        params = {'child_xpath': u'to'}
        self.do_test(ChaskiPlugin.xpath_exists, params)

    def test_xpath_exists_false(self):
        params = {'child_xpath': u'no/such/path'}
        self.do_test(ChaskiPlugin.xpath_dont_exist, params)

    def test_any_child_text_matches_re_true(self):
Ejemplo n.º 46
0
 def response_to_et(self, response):
     return fromstring(response.content)
Ejemplo n.º 47
0
def process(directory, option, file_out, use_file_out, xml_file, group, verbose, recurse, progress):
    if verbose: print "Inside process..."

    col = commands.getoutput("echo \"$COLUMNS\"")
    try:
        columns = int(col)
    except:
        columns = 60
    pb = progress_bar.pb("Progress: ", "-", columns, sys.stderr)

    tree = ElementTree(file=xml_file)
    elem = tree.getroot()

    if verbose: print "Getting rpm_names"

    rpm_names = get_names_from_dir(directory, recurse, pb, progress)

    if verbose: print "Processing names"

    if option == comps_opt.ERASE:
        """ Handle the ERASE operations """
        for subelem in elem:
            for subsub in subelem:
                p = 0.0
                for subsubsub in subsub:
                    p = p + 1.0
                    if progress:
                        percentage = p / len(subsub)
                        pb.progress(percentage)

                    if subsubsub.tag == 'packagereq' and subsubsub.text in rpm_names:
                        subsub.remove(subsubsub)
                        if verbose: print "Found %s, removing" % subsubsub.text
    elif option == comps_opt.ADD:
        """ Handle the ADD operations """
        text = "<group>\n"
        text += "<id>%s</id>\n" % group
        text += "<name>%s</name>\n" % group
        text += "<packagelist>\n"

        p = 0.0
        for name in rpm_names:
            p = p + 1.0
            if progress:
                percentage = p / len(rpm_names)
                pb.progress(percentage)

            text += "<packagereq type=\"mandatory\">%s</packagereq>\n" % name

        text += "</packagelist>\n"
        text += "</group>\n"
        node = fromstring(text)
        elem.append(node)
    else:
        die("Some unknown error has occured. Neither 'ADD' nor 'ERASE' was specified, somehow")

    if progress: pb.clear()

    if verbose: print "Ending, outputing XML"

    if use_file_out:
        ElementTree(tree).write(file_out)
    else:
        dump(tree)
Ejemplo n.º 48
0
def generate_docs():
    """The main function."""
    tree = read_xml()

    # Create a simple list to capture all of the STIGs
    stig_ids = []

    # Create defaultdicts to hold information to build our table of
    # contents files for sphinx.
    all_deployer_notes = defaultdict(list)
    severity = defaultdict(list)
    tag = defaultdict(list)
    status = defaultdict(list)

    # Loop through the groups and extract rules
    group_elements = tree.findall(".//{}Group".format(XCCDF_NAMESPACE))
    for group_element in group_elements:
        rule_element = group_element.find("{}Rule".format(XCCDF_NAMESPACE))

        # Build a dictionary with all of our rule data.
        rule = {
            'id':
            group_element.attrib['id'],
            'title':
            rule_element.find("{}title".format(XCCDF_NAMESPACE)).text,
            'severity':
            rule_element.attrib['severity'],
            'fix':
            rule_element.find("{}fixtext".format(XCCDF_NAMESPACE)).text,
            'check':
            rule_element.find(
                "{0}check/{0}check-content".format(XCCDF_NAMESPACE)).text,
            'ident': [
                x.text for x in rule_element.findall("{}ident".format(
                    XCCDF_NAMESPACE))
            ],
        }

        # The description has badly formed XML in it, so we need to hack it up
        # and turn those tags into a dictionary.
        description = rule_element.find(
            "{}description".format(XCCDF_NAMESPACE)).text
        parser = XMLParser()
        temp = fromstring("<root>{0}</root>".format(description), parser)
        rule['description'] = {x.tag: x.text for x in temp.iter()}

        # Get the deployer notes from YAML
        deployer_notes = get_deployer_notes(rule['id'])
        if deployer_notes:
            rule['deployer_notes'] = deployer_notes

            all_deployer_notes[rule['id']] = rule
            stig_ids.append(rule['id'])
            severity[rule['severity']].append(rule['id'])
            status[deployer_notes['status']].append(rule['id'])
            tag[deployer_notes['tag']].append(rule['id'])

    keyorder = ['high', 'medium', 'low']
    severity = OrderedDict(
        sorted(severity.items(), key=lambda x: keyorder.index(x[0])))
    status = OrderedDict(sorted(status.items(), key=lambda x: x[0]))
    tag = OrderedDict(sorted(tag.items(), key=lambda x: x[0]))

    all_toc = render_all(stig_ids, all_deployer_notes)
    severity_toc = render_toc('severity', severity, all_deployer_notes)
    status_toc = render_toc('implementation status', status,
                            all_deployer_notes)

    # Make sure auto_ files exist for all domains to avoid sphinx include errors
    domains = glob.glob("{}/rhel7/domains/*.rst".format(DOC_SOURCE_DIR))
    for domain in domains:
        domain = os.path.splitext(os.path.basename(domain))[0]
        if not domain.startswith("auto_"):
            fname = "{0}/rhel7/domains/auto_{1}.rst".format(
                DOC_SOURCE_DIR, domain)
            open(fname, 'a').close()

    # Write the docs for each tag to individual files so we can include them
    # from doc files in the domains folder.
    unique_tags = [x for x, y in tag.items()]
    for unique_tag in unique_tags:
        tag_toc = render_toc_partial(None, {unique_tag: tag[unique_tag]},
                                     all_deployer_notes)
        write_file("rhel7/domains/auto_{}.rst".format(unique_tag), tag_toc)

    write_file("rhel7/auto_controls-all.rst", all_toc)
    write_file("rhel7/auto_controls-by-severity.rst", severity_toc)
    write_file("rhel7/auto_controls-by-status.rst", status_toc)
Ejemplo n.º 49
0
 def __init__(self, rules):
     self._tree = fromstring(rules)
     self._maxDepth = int(self._tree.get("max_depth"))
     self._maxThreads = multiprocessing.cpu_count()
     self._availableThreads = self._maxThreads - 1
     self._progressCount = 0
Ejemplo n.º 50
0
def Evaluate(rules, seed=0):
    """
    Takes an XML string (see the Library) and return a list of shapes.
    Each shape is a 2-tuple: (shape name, transform matrix).
    """
    def radians(d):
        return float(d * 3.141 / 180.0)

    def pick_rule(tree, name):

        rules = tree.findall("rule")
        elements = []
        for r in rules:
            if r.get("name") == name:
                elements.append(r)

        if len(elements) == 0:
            print "Error, no rules found with name '%s'" % name
            quit()

        sum, tuples = 0, []
        for e in elements:
            weight = int(e.get("weight", 1))
            sum = sum + weight
            tuples.append((e, weight))
        n = random.randint(0, sum - 1)
        for (item, weight) in tuples:
            if n < weight:
                break
            n = n - weight
        return item

    def parse_xform(xform_string):
        matrix = Matrix4.new_identity()
        tokens = xform_string.split(' ')
        t = 0
        while t < len(tokens) - 1:
            command, t = tokens[t], t + 1

            # Translation
            if command == 'tx':
                x, t = float(tokens[t]), t + 1
                matrix *= Matrix4.new_translate(x, 0, 0)
            elif command == 'ty':
                y, t = float(tokens[t]), t + 1
                matrix *= Matrix4.new_translate(0, y, 0)
            elif command == 'tz':
                z, t = float(tokens[t]), t + 1
                matrix *= Matrix4.new_translate(0, 0, z)
            elif command == 't':
                x, t = float(tokens[t]), t + 1
                y, t = float(tokens[t]), t + 1
                z, t = float(tokens[t]), t + 1
                matrix *= Matrix4.new_translate(x, y, z)

            # Rotation
            elif command == 'rx':
                theta, t = radians(float(tokens[t])), t + 1
                matrix *= Matrix4.new_rotatex(theta)
            elif command == 'ry':
                theta, t = radians(float(tokens[t])), t + 1
                matrix *= Matrix4.new_rotatey(theta)
            elif command == 'rz':
                theta, t = radians(float(tokens[t])), t + 1
                matrix *= Matrix4.new_rotatez(theta)

            # Scale
            elif command == 'sx':
                x, t = float(tokens[t]), t + 1
                matrix *= Matrix4.new_scale(x, 1, 1)
            elif command == 'sy':
                y, t = float(tokens[t]), t + 1
                matrix *= Matrix4.new_scale(1, y, 1)
            elif command == 'sz':
                z, t = float(tokens[t]), t + 1
                matrix *= Matrix4.new_scale(1, 1, z)
            elif command == 'sa':
                v, t = float(tokens[t]), t + 1
                x, y, z = v, v, v
                matrix *= Matrix4.new_scale(x, y, z)
            elif command == 's':
                x, t = float(tokens[t]), t + 1
                y, t = float(tokens[t]), t + 1
                z, t = float(tokens[t]), t + 1
                matrix *= Matrix4.new_scale(x, y, z)

            else:
                print "unrecognized transformation: '%s' at position %d in '%s'" % (
                    command, t, xform_string)
                quit()

        return matrix

    random.seed(seed)
    tree = fromstring(rules)
    entry = pick_rule(tree, "entry")
    shapes = []
    stack = []
    stack.append((entry, 0, Matrix4.new_identity()))
    max_depth = int(tree.get("max_depth"))

    progressCount = 0
    print "Evaluating Lindenmayer system",
    while len(stack) > 0:

        if len(shapes) > progressCount + 1000:
            print ".",
            progressCount = len(shapes)

        rule, depth, matrix = stack.pop()

        local_max_depth = max_depth
        if "max_depth" in rule.attrib:
            local_max_depth = int(rule.get("max_depth"))

        if len(stack) >= max_depth:
            shapes.append(None)
            continue

        if depth >= local_max_depth:
            if "successor" in rule.attrib:
                successor = rule.get("successor")
                rule = pick_rule(tree, successor)
                stack.append((rule, 0, matrix))
            shapes.append(None)
            continue

        for statement in rule:
            xform = parse_xform(statement.get("transforms", ""))
            count = int(statement.get("count", 1))
            for n in xrange(count):
                matrix *= xform
                if statement.tag == "call":
                    rule = pick_rule(tree, statement.get("rule"))
                    cloned_matrix = matrix.copy()
                    stack.append((rule, depth + 1, cloned_matrix))
                elif statement.tag == "instance":
                    name = statement.get("shape")
                    if name == "curve":
                        P = Point3(0, 0, 0)
                        N = Vector3(0, 0, 1)
                        P = matrix * P
                        N = matrix.upperLeft() * N
                        shapes.append((P, N))
                    elif name == "box":
                        P = matrix * Point3(0, 0, 0)
                        X = 0.2
                        U = matrix.upperLeft() * Vector3(X, 0, 0)
                        V = matrix.upperLeft() * Vector3(0, X, 0)
                        W = matrix.upperLeft() * Vector3(0, 0, X)
                        shapes.append((P, U, V, W))
                    else:
                        shape = (name, matrix)
                        shapes.append(shape)
                else:
                    print "malformed xml"
                    quit()

    print "\nGenerated %d shapes." % len(shapes)
    return shapes
Ejemplo n.º 51
0
def generate_docs(app, config):
    """The main function."""
    metadata_dir = "{0}/{1}".format(DOC_SOURCE_DIR, config.stig_metadata_dir)
    ansible_dir = "{0}/{1}".format(DOC_SOURCE_DIR, config.stig_ansible_dir)
    ansible_task_filenames = config.stig_ansible_task_filenames
    xccdf_file = config.stig_xccdf_file
    xccdf_namespace = config.stig_xccdf_namespace
    control_statuses = config.stig_control_statuses
    control_statuses_order = config.stig_control_statuses_order
    control_severities = config.stig_control_severities

    jinja_env = jinja2.Environment(
        loader=jinja2.FileSystemLoader(metadata_dir),
        trim_blocks=True,
        keep_trailing_newline=False,
    )
    jinja_env.filters['addmonospace'] = add_monospace

    tree = read_xml(metadata_dir, xccdf_file)

    # Read in control implementations from Ansible task files
    tasks = parse_ansible_tasks(ansible_dir, ansible_task_filenames)

    # Create a simple list to capture all of the STIGs
    stig_ids = []

    # Create defaultdicts to hold information to build our table of
    # contents files for sphinx.
    all_rules = defaultdict(list)

    # Prepopulate with control severities
    severity = defaultdict(list)
    [severity[s] for s in control_severities]

    # Prepopulate possible control statuses
    status = defaultdict(list)
    [status[v] for k, v in control_statuses.items() if k != 'missing']

    # Loop through the groups and extract rules
    group_elements = tree.findall(".//{}Group".format(xccdf_namespace))
    for group_element in group_elements:
        rule_element = group_element.find("{}Rule".format(xccdf_namespace))

        # Build a dictionary with all of our rule data.
        rule = {
            'id':
            rule_element.find("{}version".format(xccdf_namespace)).text,
            'vuln_id':
            group_element.attrib['id'],
            'title':
            rule_element.find("{}title".format(xccdf_namespace)).text,
            'severity':
            rule_element.attrib['severity'],
            'fix':
            rule_element.find("{}fixtext".format(xccdf_namespace)).text,
            'check':
            rule_element.find(
                "{0}check/{0}check-content".format(xccdf_namespace)).text,
            'ident': [
                x.text for x in rule_element.findall("{}ident".format(
                    xccdf_namespace))
            ],
        }

        rule_tasks = tasks[rule['id']]
        rule['status'] = control_statuses['default']
        rule['vars'] = []
        rule['tags'] = []

        if not rule_tasks:
            rule['status'] = control_statuses['missing']

        for item in rule_tasks:
            tags = item.get('tags')
            conditionals = item.get('when')

            # All controls have an on/off var named after the STIG ID in form
            # rhel_07_###### so we add that here without relying on parser.
            # rule['vars'].append({'key': rule['id'].lower().replace('-','_'), 'value': 'true'})
            if conditionals is None:
                rule['vars'].append(rule['id'].lower().replace('-', '_'))
            else:
                if type(conditionals) is str:
                    conditionals = [conditionals]

                for c in conditionals:
                    rule['vars'].append(c)

            # Implementation status parsing
            for key, value in control_statuses.items():
                if key in str(item.get('when')):
                    rule['status'] = value

            # Grab the tags
            if tags:
                rule['tags'] = tags
                # Check if notimplemented is in tags and update status
                if 'notimplemented' in tags:
                    rule['status'] = control_statuses['missing']

        # The description has badly formed XML in it, so we need to hack it up
        # and turn those tags into a dictionary.
        description = rule_element.find(
            "{}description".format(xccdf_namespace)).text
        parser = XMLParser()
        temp = fromstring("<root>{0}</root>".format(description), parser)
        rule['description'] = {x.tag: x.text for x in temp.iter()}

        # Get the deployer notes
        deployer_notes = get_deployer_notes(metadata_dir + '/notes',
                                            rule['id'])
        rule['deployer_notes'] = deployer_notes

        all_rules[rule['id']] = rule
        stig_ids.append(rule['id'])
        severity[rule['severity']].append(rule['id'])
        status[rule['status']].append(rule['id'])

    sev_sort_order = {s: i for i, s in enumerate(control_severities)}
    status_sort_order = {s: i for i, s in enumerate(control_statuses_order)}

    all_toc = render_all(jinja_env, stig_ids, all_rules)
    severity_toc = render_toc(jinja_env, 'severity', severity, all_rules,
                              sev_sort_order)
    status_toc = render_toc(jinja_env, 'status', status, all_rules,
                            status_sort_order)

    write_file("auto_controls-all.rst", all_toc)
    write_file("auto_controls-by-severity.rst", severity_toc)
    write_file("auto_controls-by-status.rst", status_toc)