def find_group(request, grouptitle, pgSz):

    # TIMESTAMP = time.strftime("%b %d %Y %H:%M:%S", time.localtime())

    asquery = '%s?as=%s_common%%3Atitle%%3D%%27%s%%27&wf_deleted=false&pgSz=%s' % ('groups', 'groups', grouptitle, pgSz)

    # Make authenticated connection to cspace server...
    (groupurl, grouprecord, dummy, elapsedtime) = getfromCSpace(asquery, request)
    if grouprecord is None:
        return(None, None, 0, [], 'Error: the search for group \'%s.\' failed.' % grouptitle)
    grouprecordtree = fromstring(grouprecord)
    groupcsid = grouprecordtree.find('.//csid')
    if groupcsid is None:
        return(None, None, 0, [], None)
    groupcsid = groupcsid.text

    uri = 'collectionobjects?rtObj=%s&pgSz=%s' % (groupcsid, pgSz)
    try:
        (groupurl, groupmembers, dummy, elapsedtime) = getfromCSpace(uri, request)
        groupmembers = fromstring(groupmembers)
        totalItems = groupmembers.find('.//totalItems')
        totalItems = int(totalItems.text)
        objectcsids = [e.text for e in groupmembers.findall('.//csid')]
    except urllib2.HTTPError, e:
        return (None, None, 0, [], 'Error: we could not make list of group members')
Example #2
0
 def resolve(self, item, captcha_cb=None, select_cb=None):
     result = []
     item = item.copy()
     url = item['url']
     if url.endswith('live.html'):
         channel = re.search(r'http://(\w+)\.joj\.sk', url).group(1)
         for original, replacement in {'www': 'joj', 'plus': 'jojplus'}.items():
             if channel == original:
                 channel = replacement
                 break
         for quality, resolution in {'lq': '180p', 'mq': '360p', 'hq': '540p'}.items():
             item = self.video_item()
             item['quality'] = resolution
             item['url'] = 'http://http-stream.joj.sk/joj/' + channel + '/index-' + quality + '.m3u8'
             result.append(item)
     else:
         data = util.request(url)
         playerdata = re.search(r'<div\ class=\"jn-player\"(.+?)>', data).group(1)
         pageid = re.search(r'data-pageid=[\'\"]([^\'\"]+)', playerdata).group(1)
         basepath = re.search(r'data-basepath=[\'\"]([^\'\"]+)', playerdata).group(1)
         videoid = re.search(r'data-id=[\'\"]([^\'\"]+)', playerdata).group(1)
         playlisturl = basepath + 'services/Video.php?clip=' + videoid + 'pageId=' + pageid
         playlist = fromstring(util.request(playlisturl))
         balanceurl = basepath + 'balance.xml?nc=%d' % random.randint(1000, 9999)
         balance = fromstring(util.request(balanceurl))
         for video in playlist.find('files').findall('file'):
             item = self.video_item()
             item['img'] = playlist.attrib.get('large_image')
             item['length'] = playlist.attrib.get('duration')
             item['quality'] = video.attrib.get('quality')
             item['url'] = self.rtmp_url(video.attrib.get('path'), playlist.attrib.get('url'),
                                         video.attrib.get('type'), balance)
             result.append(item)
         result.reverse()
     return select_cb(result)
Example #3
0
def main(argv):
    hostn = 'http://localhost:8080' #localhost at default port
    output = 'odfdump.xml' #default file
    try:
        opts, args = getopt.getopt(argv,"ho:")
    except getopt.GetoptError:
        print('getAllData.py -o <outputfile> host')
        sys.exit(2)
    for opt, arg in opts:
        if opt == '-h':
            print('getAllData.py -o <outputfile> host')
            sys.exit()
        elif opt == '-o':
            output = arg
    if len(args) >= 1:
        hostn = args[0]

    #request for odf hierarchy
    hierarchyRequest = """<omiEnvelope xmlns="http://www.opengroup.org/xsd/omi/1.0/"  version="1.0" ttl="0">
  <read msgformat="odf">
    <msg>
      <Objects xmlns="http://www.opengroup.org/xsd/odf/1.0/"/>
    </msg>
  </read>
</omiEnvelope>"""
    #request for 9000 newest values(should be enough for now) 
    fullRequest = """<omiEnvelope xmlns="http://www.opengroup.org/xsd/omi/1.0/" version="1.0" ttl="0">
  <read msgformat="odf" newest="9000">
    <msg>
    </msg>
  </read>
</omiEnvelope>"""


    #register namespaces so that we don't get wrong namespaces in the request
    register_namespace("omi","omi.xsd")
    register_namespace("odf", "odf.xsd")
    register_namespace("", "http://www.w3.org/2001/XMLSchema-instance")
    headers = {'Content-Type': 'application/xml'}

    #current hierarchy
    r = requests.post(hostn, data = hierarchyRequest, headers = headers).text

    root = fromstring(r)
    
    objects = root.find(".//{http://www.opengroup.org/xsd/odf/1.0/}Objects")
    #remove values and add metadata and description tags
    update_odf(objects)

    fullRoot = fromstring(fullRequest)

    fullRoot.find(".//{http://www.opengroup.org/xsd/omi/1.0/}msg").append(objects)

    #write result to file. note: result might be big so iterate over the result
    with open(output,'wb') as handle:
        r2 = requests.post(hostn, data = tostring(fullRoot, encoding="utf-8"), headers = headers, stream = True)
        if not r2.ok:
            print("INVALID RESPONSE")
        for block in r2.iter_content(1024):
            handle.write(block)
Example #4
0
 def test_compareElement2 (self) :
     clearGlobals()
     x = fromstring("<red><blue></blue></red>")
     iterElement(x)
     y = fromstring("<red><green><blue></blue><yellow></yellow></green></red>")
     z = compareElement(y,x)
     self.assertTrue(z == [])
Example #5
0
 def test_searchFunction (self) :
     x = [("mez", 1),("mer", 2)]
     clearGlobals()
     xx = fromstring("<mez><mer></mer></mez>")
     y = fromstring("<mez><mer></mer></mez>")
     z = searchFunction(y,xx, x)
     self.assertTrue(z == True)
Example #6
0
    def test_proxy_validate_view_pt_success(self):
        """
        When called with a valid ``ProxyTicket``, a ``GET`` request to
        the view should return a validation success and the
        ``ProxyTicket`` should be consumed and invalid for future
        validation attempts.
        """
        query_str = "?service=%s&ticket=%s" % (self.service_url, self.pt.ticket)
        response = self.client.get(reverse("cas_proxy_validate") + query_str)
        tree = ElementTree(fromstring(response.content))
        elem = tree.find(XMLNS + "authenticationSuccess/" + XMLNS + "user")
        self.assertIsNotNone(elem)
        self.assertEqual(elem.text, "ellen")
        self.assertEqual(response.status_code, 200)
        self.assertEqual(response.get("Content-Type"), "text/xml")
        elem = tree.find(XMLNS + "authenticationSuccess/" + XMLNS + "proxies")
        proxy = list(elem.getiterator(XMLNS + "proxy"))
        self.assertEqual(len(proxy), 1)
        self.assertEqual(proxy[0].text, "http://www.example.com")

        # This request should fail as the ticket was consumed in the preceeding test
        response = self.client.get(reverse("cas_proxy_validate") + query_str)
        tree = ElementTree(fromstring(response.content))
        elem = tree.find(XMLNS + "authenticationFailure")
        self.assertIsNotNone(elem)
        self.assertEqual(elem.get("code"), "INVALID_TICKET")
        self.assertEqual(response.status_code, 200)
        self.assertEqual(response.get("Content-Type"), "text/xml")
Example #7
0
 def test_xml_check (self) :
     s = "<A><B><C></C><D></D></B></A>"
     c = fromstring(s)
     t = "<A><B></B></A>"
     p = fromstring(t)
     r = xml_check(c, p)
     self.assertTrue(r == True)
Example #8
0
    def resolve(self, item, captcha_cb=None, select_cb=None):
        result = []
        item = item.copy()
        url = item['url']
        if url.endswith('live.html'):
            for quality in ['360','540','720']:
                item = self.video_item()
                item['quality'] = quality + 'p'
                item['url'] = self.rtmp_url(fix_path(re.search('http://(\w+).joj.sk', url).group(1)) + '-' + quality, url)
                result.append(item)
        else:
            data = util.request(url)
            playerdata = re.search(r'<div\ class=\"jn-player\"(.+?)>',data).group(1)
            pageid = re.search(r'data-pageid=[\'\"]([^\'\"]+)',playerdata).group(1) 
            basepath = re.search(r'data-basepath=[\'\"]([^\'\"]+)',playerdata).group(1)
	    videoid = re.search(r'data-id=[\'\"]([^\'\"]+)',playerdata).group(1)
            playlisturl = basepath + 'services/Video.php?clip=' + videoid + 'pageId=' + pageid
            playlist = fromstring(util.request(playlisturl))
            balanceurl = basepath + 'balance.xml?nc=%d' % random.randint(1000, 9999)
            balance = fromstring(util.request(balanceurl))
            for video in playlist.find('files').findall('file'):
                item = self.video_item()
                item['img'] = playlist.attrib.get('large_image')
                item['length'] = playlist.attrib.get('duration')
                item['quality'] = video.attrib.get('quality')
                item['url'] = self.rtmp_url(video.attrib.get('path'), playlist.attrib.get('url'), video.attrib.get('type'), balance)
                result.append(item)
        result.reverse()
        return select_cb(result)
Example #9
0
def process(fc):
	root = fromstring(fc)
	ns = "{http://www.yworks.com/xml/graphml}"

	new_node_code = \
	"""<?xml version="1.0" encoding="UTF-8" standalone="no"?>
	<graphml xmlns="http://graphml.graphdrawing.org/xmlns" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:y="http://www.yworks.com/xml/graphml" xmlns:yed="http://www.yworks.com/xml/yed/3" xsi:schemaLocation="http://graphml.graphdrawing.org/xmlns http://www.yworks.com/xml/schema/graphml/1.1/ygraphml.xsd">
		<y:StyleProperties>
		    <y:Property class="com.yworks.yfiles.bpmn.view.TaskTypeEnum" name="com.yworks.bpmn.taskType" value="TASK_TYPE_ABSTRACT"/>
		    <y:Property class="com.yworks.yfiles.bpmn.view.ActivityTypeEnum" name="com.yworks.bpmn.activityType" value="ACTIVITY_TYPE_TASK"/>
		    <y:Property class="java.awt.Color" name="com.yworks.bpmn.icon.fill2" value="#d4d4d4"/>
		    <y:Property class="java.awt.Color" name="com.yworks.bpmn.icon.fill" value="#ffffff"/>
		    <y:Property class="com.yworks.yfiles.bpmn.view.BPMNTypeEnum" name="com.yworks.bpmn.type" value="ACTIVITY_TYPE"/>
		    <y:Property class="java.awt.Color" name="com.yworks.bpmn.icon.line.color" value="#000000"/>
		</y:StyleProperties>
	</graphml>
	"""
	new_node = fromstring(new_node_code).find("{}StyleProperties".format(ns))

	for node in root.findall('.//{}ShapeNode'.format(ns)):
		node.tag = "{}GenericNode".format(ns)
		node.attrib['configuration'] = "com.yworks.bpmn.Activity.withShadow"

		color = node.find("{}Fill".format(ns))
		del color.attrib['color']
		color.attrib['color1'] = "#FFFFFFE6"
		color.attrib['color2'] = "#D4D4D4CC"
		del color

		node.find("{}BorderStyle".format(ns)).attrib['color'] = '#123EA2'
		node.remove(node.find("{}Shape".format(ns)))

		node.append(new_node)

	return tostring(root, "UTF-8")
	def __init__(self, firstArgument, robotAddr='', responseValue='', comment=''):
	# single-argument: build from XML
	# multiple-argument: build this class from its component data with optional  comment
		if (robotAddr == '' and responseValue == '' and comment == ''):
			tree = fromstring(firstArgument)
			ts = SubElement(tree, 't')
			ts.text = microtime()
			self.XML = tree

			self.driverAddr = self.XML.findtext('d')
			self.robotAddr = self.XML.findtext('r')
			self.responseValue = self.XML.findtext('re')
 			self.comment = self.XML.findtext('co')
		else:
			self.driverAddr = firstArgument
			self.robotAddr = robotAddr
			self.responseValue = responseValue
 			self.comment = comment
			
			tree = fromstring('<m></m>')
			ts = SubElement(tree, 't')
			ts.text = microtime()
			ds = SubElement(tree, 'd')
			ds.text = self.driverAddr
			rs = SubElement(tree, 'r')
			rs.text = self.robotAddr
			res = SubElement(tree, 're')
			res.text = self.responseValue
			cos = SubElement(tree, 'co')
			cos.text = self.comment
			self.XML = tree
# endclass messageFromRobot
Example #11
0
def getOrganizations(root):
    """
    root is an ElementTree object
    for each object in the module, attributes are assigned to specified organization element and
    ElementTree is creating subelements to store data from the database module
    getOrganizations parses orgs from the ElementTree to xml
    """
    assert root is not None
    for o in Organization.objects.all():
        node = ET.SubElement(root, 'Organization')
        node.set('ID', o.id)
        node.set('Name', o.name)

        attr = ET.SubElement(node, 'Crises')
        for c in o.crises.all():
            ET.SubElement(attr, 'Crisis').set('ID', c.id)

        attr = ET.SubElement(node, 'People')
        for p in o.people.all():
            ET.SubElement(attr, 'Person').set('ID', p.id)

        if o.kind is not None:
            ET.SubElement(node, 'Kind').text = o.kind
        if len(o.location) > 0:
            ET.SubElement(node, 'Location').text = o.location
        if len(o.history) > 0:
            node.append(fromstring('<History>' + o.history + '</History>'))
        if len(o.contact) > 0:
            node.append(fromstring('<ContactInfo>' + o.contact + '</ContactInfo>'))
        getCommon(node, o)
 def test_eval_3 (self) :
     # Query tree does not exist in source tree
     a = fromstring("<a><b><c></c><d></d></b></a>")  
     b = fromstring("<b><e></e></b>")
     v = xmleval(a,b)
     print (str(v))
     self.assertTrue(v == [0])
 def test_eval_2 (self) :
     # Query tree have more than 2 layers
     a = fromstring("<a><b><c></c><d><f></f></d></b></a>")  
     b = fromstring("<b><d><f></f></d></b>")
     v = xmleval(a,b)
     print (str(v))
     self.assertTrue(v == [1,2])
Example #14
0
    def test_proxy_validate_view_pt_success(self):
        """
        When called with a valid ``ProxyTicket``, a ``GET`` request to
        the view should return a validation success and the
        ``ProxyTicket`` should be consumed and invalid for future
        validation attempts.
        """
        query_str = "?service=%s&ticket=%s" % (self.service_url,
                                               self.pt.ticket)
        response = self.client.get(reverse('cas_proxy_validate') + query_str)
        tree = ElementTree(fromstring(response.content))
        elem = tree.find(XMLNS + 'authenticationSuccess/' + XMLNS + 'user')
        self.assertIsNotNone(elem)
        self.assertEqual(elem.text, 'ellen')
        self.assertEqual(response.status_code, 200)
        self.assertEqual(response.get('Content-Type'), 'text/xml')
        elem = tree.find(XMLNS + 'authenticationSuccess/' + XMLNS + 'proxies')
        proxy = list(elem.getiterator(XMLNS + 'proxy'))
        self.assertEqual(len(proxy), 1)
        self.assertEqual(proxy[0].text, 'http://www.example.com')

        # This second validation request attempt should fail as the
        # ticket was consumed in the preceeding test
        response = self.client.get(reverse('cas_proxy_validate') + query_str)
        tree = ElementTree(fromstring(response.content))
        elem = tree.find(XMLNS + 'authenticationFailure')
        self.assertIsNotNone(elem)
        self.assertEqual(elem.get('code'), 'INVALID_TICKET')
        self.assertEqual(response.status_code, 200)
        self.assertEqual(response.get('Content-Type'), 'text/xml')
Example #15
0
    def test_service_validate_view_success(self):
        """
        When called with correct parameters, a ``GET`` request to the
        view should return a validation success and the
        ``ServiceTicket`` should be consumed and invalid for future
        validation attempts.
        """
        query_str = "?service=%s&ticket=%s" % (self.service_url,
                                               self.st.ticket)
        response = self.client.get(reverse('cas_service_validate') + query_str)
        tree = ElementTree(fromstring(response.content))
        elem = tree.find(XMLNS + 'authenticationSuccess/' + XMLNS + 'user')
        self.assertIsNotNone(elem)
        self.assertEqual(elem.text, 'ellen')
        self.assertEqual(response.status_code, 200)
        self.assertEqual(response.get('Content-Type'), 'text/xml')

        # This should fail as the ticket was consumed in the preceeding test
        response = self.client.get(reverse('cas_service_validate') + query_str)
        tree = ElementTree(fromstring(response.content))
        elem = tree.find(XMLNS + 'authenticationFailure')
        self.assertIsNotNone(elem)
        self.assertEqual(elem.get('code'), 'INVALID_TICKET')
        self.assertEqual(response.status_code, 200)
        self.assertEqual(response.get('Content-Type'), 'text/xml')
 def test3_findPattern(self): # Verify that 2nd element is correct
     xmltree = "<xml><outer><middle><inner></inner></middle></outer></xml>"
     globals1 = StoreGlob1(fromstring(xmltree))
     xml_findPattern(globals1,fromstring(xmltree))
     pattern = globals1.curPattern
     testelement = fromstring("<middle></middle>")
     self.assertTrue(pattern[2].tag == testelement.tag)
def index(request):
    connection = cspace.connection.create_connection(mainConfig, request.user)
    (url, data, statusCode) = connection.make_get_request('cspace-services/reports')
    reportXML = fromstring(data)
    reportCsids = [csidElement.text for csidElement in reportXML.findall('.//csid')]
    reportNames = [csidElement.text for csidElement in reportXML.findall('.//name')]
    fileNames = []
    #print reportCsids
    for csid in reportCsids:
        try:
            (url, data, statusCode) = connection.make_get_request('cspace-services/reports/%s' % csid)
            reportXML = fromstring(data)
            fileName = reportXML.find('.//filename')
            fileName = fileName.text
            fileName = fileName.replace('.jasper','.jrxml')
            parms,displayReport,fileFound = getReportparameters(fileName)
            fileName = fileName if displayReport else 'CSpace only'
            fileName = fileName if fileFound else 'Found in CSpace, not configured for this webapp'
            fileNames.append(fileName)
            #print fileName
        except:
            fileNames.append('Error getting report payload from CSpace: %s' % csid)
    reportData = zip(reportCsids, reportNames, fileNames)
    reportData = sorted(reportData, key=itemgetter(1))
    context = {'reportData': reportData, 'labels': 'name file'.split(' '), 'apptitle': TITLE}
    context['additionalInfo'] = AdditionalInfo.objects.filter(live=True)
    return render(request, 'listReports.html', context)
Example #18
0
 def test_get_child_title(self):
     e = fromstring("<a><title xmlns='http://www.w3.org/2005/Atom'>fred</title></a>")
     self.assertEqual("fred", model.get_child_title(e))
     e = fromstring("<a><title type='html' xmlns='http://www.w3.org/2005/Atom'>fred</title></a>")
     self.assertEqual("fred", model.get_child_title(e))
     e = fromstring("<a><title type='xhtml' xmlns='http://www.w3.org/2005/Atom'><div xmlns='http://www.w3.org/1999/xhtml'>fred <b>and</b> barney.</div></title></a>")
     self.assertEqual("fred and barney.", model.get_child_title(e))
Example #19
0
def mergeLogs(fromFilePath, toFilePath, outputFilePath):
    utf8open = lambda s: open(s, 'r', 'utf8')

    outputDoc = E('html')

    with utf8open(fromFilePath) as fromFile, utf8open(toFilePath) as toFile:

        # the body and HTML tags are left open so the app can just append
        # when a new message comes in. we have to close them.
        # note: this could also be taken care of by BeautifulSoup or
        # perhaps lxml.html

        fromDoc = fromstring(fromFile.read() + '</BODY></HTML>')
        toDoc = fromstring(toFile.read() + '</BODY></HTML>')

        # copy the head tag so formatting and stuff is preserved in our new doc
        outputDoc.append(fromDoc.find('HEAD').copy())

        fromMessages = fromDoc.findall('./BODY/div')
        toMessages = toDoc.findall('./BODY/div')

        allMessages = list(fromMessages) + list(toMessages)
        allMessages.sort(key = lambda e: time.strptime(e.attrib['timestamp'], '%Y-%m-%d %H:%M:%S'))

        body = SE(outputDoc, 'BODY', attrib = fromDoc.find('BODY').attrib)
        body.extend(x.copy() for x in allMessages)

    ElementTree(outputDoc).write(outputFilePath, 'utf8')
Example #20
0
 def test_findMatches3(self):
     sourceElement = fromstring("<THU><Team><ACRush></ACRush><Jelly></Jelly><Cooly></Cooly></Team><JiaJia><Team><Ahyangyi></Ahyangyi><Dragon></Dragon><Cooly><Amber></Amber></Cooly></Team></JiaJia></THU>")
     searchPatternElement = fromstring("<Team><Cooly></Cooly></Team>")
     matches = []
     FindMatchesInElement(sourceElement, searchPatternElement, ReferenceType(1), matches)
     answer = [2, 7]
     self.assert_(len(answer) == len(matches) and all(answer[i] == matches[i] for i in range(len(answer)-1)))
Example #21
0
 def test_findMatches2(self):
     sourceElement = fromstring("<red><green><blue></blue><yellow></yellow></green></red>")
     searchPatternElement = fromstring("<green><blue></blue></green>")
     matches = []
     FindMatchesInElement(sourceElement, searchPatternElement, ReferenceType(1), matches)
     answer = [2]
     self.assert_(len(answer) == len(matches) and all(answer[i] == matches[i] for i in range(len(answer)-1)))
Example #22
0
	def test_merge_history3(self) :
		root = fromstring(orgtag)
		model = createOrganization(root)
		root = fromstring(orgtag2)
		mergeOrganization(root, model)
		self.assert_(model.orginfo.history == historytag2)
		mergeOrganization(root, model)
		self.assert_(model.orginfo.history == historytag2)
Example #23
0
    def test_searchFunction2 (self) :
        x = [("a", 1),("b", 2),("c",2),("d",3),("e",3)]
        clearGlobals()
        xx = fromstring("<a><b></b><c><d></d><e></e></c></a>")
        y = fromstring("<a><b></b><c><d></d><e></e></c></a>")
        z = searchFunction(y,xx, x)

        self.assertTrue(z == True)
Example #24
0
    def test_searchFunction1 (self) :
        x = [("a", 1),("b", 2)]
        clearGlobals()
        xx = fromstring("<a><b></b></a>")
        y = fromstring("<b><a></a></b>")
        z = searchFunction(y,xx, x)

        self.assertTrue(z == False)
Example #25
0
 def test_xml_matches (self) :
     s = "<A><B><C></C><A></A><D></D></B></A>"
     c = fromstring(s)
     t = "<A><B></B></A>"
     p = fromstring(t)
     l = [c, c.find("B").find("A")]
     r = xml_matches(l, p)
     self.assertTrue(r == [c])
 def test_eval_5 (self) :
     # children in query tree have siblings
     a = fromstring("<a><c><f></f><h></h></c><c><d><e></e></d><f></f><h></h></c>"\
                    +"<g><h><c><f></f><h><i></i></h></c></h></g></a>")  
     b = fromstring("<c><f></f><h></h></c>")
     v = xmleval(a,b)
     print (str(v))
     self.assertTrue(v == [3,2,5,12])
Example #27
0
 def test_scan3 (self) :
   a = [fromstring("<Team><cooly><rek></rek></cooly></Team>"), fromstring("<cooly><rek></rek></cooly>")]
   try:
     xml_scan_recur(a[0],a[1])
     # Designed to pass this test
     self.assertTrue(True)
   except:
     self.assertTrue(False)
Example #28
0
 def test_xml_check_5 (self) :
     s = "<A><B><C></C><A></A></B><A><B><C></C></B></A></A>"
     c = fromstring(s)
     t = "<A><B><C></C></B></A>"
     p = fromstring(t)
     first_potential = list(c)[0]
     F = xml_check(first_potential, p)
     self.assertTrue(F == False)
 def test_eval_6 (self) :
     # query tree is on top root
     a = fromstring("<a><c><f></f><h></h></c><c><d><e></e></d><f></f><h></h></c>"\
                    +"<g><h><c><f></f><h><i></i></h></c></h></g></a>")  
     b = fromstring("<a><c><f></f></c></a>")
     v = xmleval(a,b)
     print (str(v))
     self.assertTrue(v == [1,1])
Example #30
0
 def test_xml_matches_2 (self) :
     s = "<Z><A><B><C></C><A></A><D></D></B></A><D><A><B><C></C></B></A></D></Z>"
     c = fromstring(s)
     t = "<A><B><C></C></B></A>"
     p = fromstring(t)
     l = [c.find("A"), c.find("A").find("B").find("A"), c.find("D").find("A")]
     r = xml_matches(l, p)
     self.assertTrue(r == [ l[0], l[1] ])    
Example #31
0
    def test(self, args, targets):
        """
        :type args: SanityConfig
        :type targets: SanityTargets
        :rtype: TestResult
        """
        skip_file = 'test/sanity/shellcheck/skip.txt'
        skip_paths = set(
            read_lines_without_comments(skip_file,
                                        remove_blank_lines=True,
                                        optional=True))

        exclude_file = 'test/sanity/shellcheck/exclude.txt'
        exclude = set(
            read_lines_without_comments(exclude_file,
                                        remove_blank_lines=True,
                                        optional=True))

        paths = sorted(i.path for i in targets.include
                       if os.path.splitext(i.path)[1] == '.sh'
                       and i.path not in skip_paths)

        if not paths:
            return SanitySkipped(self.name)

        cmd = [
            'shellcheck',
            '-e',
            ','.join(sorted(exclude)),
            '--format',
            'checkstyle',
        ] + paths

        try:
            stdout, stderr = run_command(args, cmd, capture=True)
            status = 0
        except SubprocessError as ex:
            stdout = ex.stdout
            stderr = ex.stderr
            status = ex.status

        if stderr or status > 1:
            raise SubprocessError(cmd=cmd,
                                  status=status,
                                  stderr=stderr,
                                  stdout=stdout)

        if args.explain:
            return SanitySuccess(self.name)

        # json output is missing file paths in older versions of shellcheck, so we'll use xml instead
        root = fromstring(stdout)  # type: Element

        results = []

        for item in root:  # type: Element
            for entry in item:  # type: Element
                results.append(
                    SanityMessage(
                        message=entry.attrib['message'],
                        path=item.attrib['name'],
                        line=int(entry.attrib['line']),
                        column=int(entry.attrib['column']),
                        level=entry.attrib['severity'],
                        code=entry.attrib['source'].replace('ShellCheck.', ''),
                    ))

        if results:
            return SanityFailure(self.name, messages=results)

        return SanitySuccess(self.name)
Example #32
0
    def test(self, args, targets):
        """
        :type args: SanityConfig
        :type targets: SanityTargets
        :rtype: TestResult
        """
        exclude_file = os.path.join(SANITY_ROOT, 'shellcheck', 'exclude.txt')
        exclude = set(
            read_lines_without_comments(exclude_file,
                                        remove_blank_lines=True,
                                        optional=True))

        settings = self.load_processor(args)

        paths = [target.path for target in targets.include]

        if not find_executable('shellcheck', required='warning'):
            return SanitySkipped(self.name)

        cmd = [
            'shellcheck',
            '-e',
            ','.join(sorted(exclude)),
            '--format',
            'checkstyle',
        ] + paths

        try:
            stdout, stderr = run_command(args, cmd, capture=True)
            status = 0
        except SubprocessError as ex:
            stdout = ex.stdout
            stderr = ex.stderr
            status = ex.status

        if stderr or status > 1:
            raise SubprocessError(cmd=cmd,
                                  status=status,
                                  stderr=stderr,
                                  stdout=stdout)

        if args.explain:
            return SanitySuccess(self.name)

        # json output is missing file paths in older versions of shellcheck, so we'll use xml instead
        root = fromstring(stdout)  # type: Element

        results = []

        for item in root:  # type: Element
            for entry in item:  # type: Element
                results.append(
                    SanityMessage(
                        message=entry.attrib['message'],
                        path=item.attrib['name'],
                        line=int(entry.attrib['line']),
                        column=int(entry.attrib['column']),
                        level=entry.attrib['severity'],
                        code=entry.attrib['source'].replace('ShellCheck.', ''),
                    ))

        results = settings.process_errors(results, paths)

        if results:
            return SanityFailure(self.name, messages=results)

        return SanitySuccess(self.name)
Example #33
0
def html2plaintext(html, body_id=None, encoding='utf-8'):
    """ From an HTML text, convert the HTML to plain text.
    If @param body_id is provided then this is the tag where the
    body (not necessarily <body>) starts.
    """
    # (c) Fry-IT, www.fry-it.com, 2007
    # <*****@*****.**>
    # download here: http://www.peterbe.com/plog/html2plaintext
    assert isinstance(html, str)
    url_index = []
    try:
        tree = fromstring(html)

        if body_id is not None:
            source = tree.xpath('//*[@id=%s]' % (body_id,))
        else:
            source = tree.xpath('//body')
        if len(source):
            tree = source[0]

        i = 0
        for link in tree.findall('.//a'):
            url = link.get('href')
            if url:
                i += 1
                link.tag = 'span'
                link.text = '%s [%s]' % (link.text, i)
                url_index.append(url)

        html = tostring(tree, encoding=encoding)
    except:
        # Don't fail if the html is invalid.
        pass
    # \r char is converted into &#13;, must remove it
    html = html.replace('&#13;', '')

    html = html.replace('<strong>', '*').replace('</strong>', '*')
    html = html.replace('<b>', '*').replace('</b>', '*')
    html = html.replace('<h3>', '*').replace('</h3>', '*')
    html = html.replace('<h2>', '**').replace('</h2>', '**')
    html = html.replace('<h1>', '**').replace('</h1>', '**')
    html = html.replace('<em>', '/').replace('</em>', '/')
    html = html.replace('<tr>', '\n')
    html = html.replace('</p>', '\n')
    html = re.sub('<br\s*/?>', '\n', html)
    html = re.sub('<.*?>', ' ', html)
    html = html.replace(' ' * 2, ' ')
    html = html.replace('&gt;', '>')
    html = html.replace('&lt;', '<')
    html = html.replace('&amp;', '&')

    # strip all lines
    html = '\n'.join([x.strip() for x in html.splitlines()])
    html = html.replace('\n' * 2, '\n')

    for i, url in enumerate(url_index):
        if i == 0:
            html += '\n\n'
        html += '[%s] %s\n' % (i + 1, url)

    return html.strip('\n')
Example #34
0
import time

# open xml file
tree = ET.parse('90029-50.xml')

# get root and convert it to a string
root = tree.getroot()
data = tostring(root)

# start a file to write to
f = open('90029-50.json', 'w')

# this is where all the beautiful data will go
clinics = []

locations = bf.data(fromstring(data))['feed']['entry']

# start your parsing
for clinic in locations:
    # name
    name = clinic['title']['$']

    # street
    street = clinic['summary']['div']['div']['div'][1]['div']['$']

    # city
    city = clinic['summary']['div']['div']['div'][1]['span'][0]['$']

    # state
    state = clinic['summary']['div']['div']['div'][1]['span'][1]['$']
def xmlToJson(xmlStr):
    return dumps(bf.data(fromstring(xmlStr)), sort_keys=True, indent=4)
Example #36
0
            print
            print

        else:
            first_run = False

        task_xml_string = None
        with open(_task_file) as infile:
            task_xml_string = unicode(infile.read(), "utf-8")

        # Validate XML before trying to import anything from the given file.
        # validate_task_xml_file(task_xml_string) # working?

        _errors = 0
        _total = 0
        _tree = fromstring(task_xml_string.encode("utf-8"))

        task_id = _tree.attrib["id"]
        # needed? Not passed on yet ****TODO*****
        language_pair = '{0}2{1}'.format(_tree.attrib["source-language"],
                                         _tree.attrib["target-language"])
        if args.dry_run_enabled:
            _ = EvaluationTask(task_xml=_task_file,
                               task_name=task_id,
                               task_type="7")
        else:
            # making the task_name equal to the task_id
            t = EvaluationTask(task_xml=_task_file,
                               task_name=task_id,
                               task_type="7")
            try:  # trying to catch the exception
Example #37
0

class UserGetter(GetterDict):
    def get(self, key: str, default: Any) -> Any:

        # element attributes
        if key in {'Id', 'Status'}:
            return self._obj.attrib.get(key, default)

        # element children
        else:
            try:
                return self._obj.find(key).attrib['Value']
            except (AttributeError, KeyError):
                return default


class User(BaseModel):
    Id: int
    Status: Optional[str]
    FirstName: Optional[str]
    LastName: Optional[str]
    LoggedIn: bool

    class Config:
        orm_mode = True
        getter_dict = UserGetter


user = User.from_orm(fromstring(xmlstring))
Example #38
0
File: workflow.py Project: gem/sidd
 def from_xml_string(self, xmlstr):
     """ build workflow from xml string """
     self._build(fromstring(xmlstr))
Example #39
0
def load_alignments_from_db(session: Session, phase: Phase,
                            logger: Logger) -> List[Alignment]:

    headlines = session \
        .query(Headline.article_id,
               Headline.tag_tokens,
               Headline.t,
               cast(extract('epoch', Headline.t), Integer).label('unixtime'),
               cast(extract('hour', in_jst(Headline.t)), Integer).label('jst_hour')) \
        .filter(Headline.is_used.is_(True), Headline.phase == phase.value) \
        .order_by(Headline.t) \
        .all()
    headlines = list(headlines)

    rics = fetch_rics(session)

    alignments = []
    seqtypes = [
        SeqType.RawShort, SeqType.RawLong, SeqType.MovRefShort,
        SeqType.MovRefLong, SeqType.NormMovRefShort, SeqType.NormMovRefLong,
        SeqType.StdShort, SeqType.StdLong
    ]
    logger.info(
        'start creating alignments between headlines and price sequences.')

    for h in tqdm(headlines):

        # Find the latest prices before the article is published
        chart = dict([
            fetch_latest_vals(session, h.t, ric, seqtype)
            for (ric, seqtype) in itertools.product(rics, seqtypes)
        ])

        # Replace tags with price tags
        tag_tokens = h.tag_tokens

        short_term_vals = chart[stringify_ric_seqtype(Code.N225.value,
                                                      SeqType.RawShort)]
        long_term_vals = chart[stringify_ric_seqtype(Code.N225.value,
                                                     SeqType.RawLong)]

        processed_tokens = []
        for i in range(len(tag_tokens)):
            t = tag_tokens[i]
            if t.startswith('<yen val="') and t.endswith('"/>'):
                ref = fromstring(t).attrib['val']

                if len(short_term_vals) > 0 and len(long_term_vals) > 0:

                    prev_trading_day_close = Decimal(long_term_vals[0])
                    latest = Decimal(short_term_vals[0])
                    p = find_operation(ref, prev_trading_day_close, latest)
                    processed_tokens.append(p)
                else:
                    processed_tokens.append('<yen val="z"/>')
            else:
                processed_tokens.append(tag_tokens[i])

        alignment = Alignment(h.article_id, str(h.t), h.jst_hour,
                              processed_tokens, chart)
        alignments.append(alignment.to_dict())
    logger.info(
        'end creating alignments between headlines and price sequences.')
    return alignments
Example #40
0
 def _response_to_json(self, api, response):
     jsondata = abdera.data(fromstring(response.content.decode('UTF-8')))
     return json.loads(json.dumps(jsondata))
# Creates string to pass into with HMAC authentication
signing_string = api_id + '\n' + str(
    api_ts) + '\n' + api_baseCall + '\n' + api_param
# Creates HMAC authentication, uses API secret, 'signing_string'
api_mac = hmac.new(api_secret.encode(), signing_string.encode(),
                   hashlib.sha1).hexdigest()
# Creates full address of API call, inserts API Id, time created, HMAC authentication code, and form ID
api_url = 'http://battletx.bsd.net/page/api/signup/get_signups_by_form_id?api_ver=2&api_id=' + api_id + '&api_ts=' + \
          str(api_ts) + '&api_mac=' + api_mac + '&signup_form_id=' + str(signup_form_id)

#Reformating BSD XML:
api_xml_data = urllib.request.urlopen(
    api_url).read()  # Uses urllib library to read XML data from BSD API URL
doc = dumps(
    yh.data(fromstring(api_xml_data))
)  # Parses XML data using xmljson library, parses using yahoo standard
loaded_doc = json.loads(doc)  # Deserializes data
name_of_list_in_use = 'cmi_list'  # will be used in title of CSV file


# Function iterates over dictionary and checks keys, if keys match strings, count is altered
def indiv_dict_length(tuple):
    count = 0  # declares temporary count variable, returns it at end of function
    for k, v in tuple:
        if v != {}:
            if k == 'firstname':
                count += 1
            if k == 'lastname':
                count += 1
            if k == 'email':
Example #42
0
 def __init__(self, grid2=False):
     self.get_config()
     self.lines = []
     if grid2:
         kml_file = self.kml_file2
     else:
         kml_file = self.kml_file
     if not os.path.exists(kml_file):
         if grid2:
             self.kml_file = ''
         else:
             self.kml_file2 = ''
         return
     style = {}
     styl = ''
     zipped = False
     if kml_file[-4:] == '.kmz':  # zipped file?
         zipped = True
         zf = zipfile.ZipFile(kml_file, 'r')
         inner_file = ''
         for name in zf.namelist():
             if name[-4:] == '.kml':
                 inner_file = name
                 break
         if inner_file == '':
             return
         memory_file = io.BytesIO()
         memory_file.write(zf.open(inner_file).read())
         root = ElementTree(fromstring(memory_file.getvalue()))
     else:
         kml_data = open(kml_file, 'rb')
         root = ElementTree(fromstring(kml_data.read()))
     # Create an iterator
     if sys.version_info[1] < 9:  # before python 3.9
         iterat = root.getiterator()
     else:
         iterat = root.iter()
     placemark_id = ''
     line_names = []
     stylm = ''
     for element in iterat:
         elem = element.tag[element.tag.find('}') + 1:]
         if elem == 'Style':
             for name, value in list(element.items()):
                 if name == 'id':
                     styl = value
         elif elem == 'StyleMap':
             for name, value in list(element.items()):
                 if name == 'id':
                     stylm = value
         elif elem == 'color':
             if styl in self.colors:
                 style[styl] = self.colors[styl]
             else:
                 style[styl] = '#' + element.text[-2:] + element.text[
                     -4:-2] + element.text[-6:-4]
             if stylm != '':
                 if stylm in self.colors:
                     style[stylm] = self.colors[stylm]
                 else:
                     style[stylm] = '#' + element.text[-2:] + element.text[
                         -4:-2] + element.text[-6:-4]
         elif elem == 'name':
             line_name = element.text
             if placemark_id != '':
                 line_name += placemark_id
                 placemark_id = ''
         elif elem == 'Placemark' and grid2:
             for key, value in list(element.items()):
                 if key == 'id':
                     if value[:4] == 'kml_':
                         placemark_id = value[3:]
                     else:
                         placemark_id = value
         elif elem == 'SimpleData' and grid2:
             for key, value in list(element.items()):
                 if key == 'name' and (value == 'CAPACITY_kV'
                                       or value == 'CAPACITYKV'):
                     try:
                         styl = self.grid2_colors[element.text]
                     except:
                         styl = self.grid2_colors['66']
         elif elem == 'styleUrl':
             styl = element.text[1:]
         elif elem == 'coordinates':
             coords = []
             coordinates = ' '.join(element.text.split()).split()
             for i in range(len(coordinates)):
                 coords.append([
                     float(coordinates[i].split(',')[1]),
                     float(coordinates[i].split(',')[0])
                 ])
             inmap = False
             for coord in coords:
                 if within_map(coord[0], coord[1], self.map_polygon):
                     inmap = True
                     break
             if inmap:
                 if self.default_length >= 0:
                     grid_len = 0.
                     for j in range(1, len(coords)):
                         grid_len += self.actualDistance(
                             coords[j - 1][0], coords[j - 1][1],
                             coords[j][0], coords[j][1])
                 else:
                     grid_len = self.default_length
                 if line_name in line_names:
                     i = 2
                     while line_name + '#' + str(i) in line_names:
                         i += 1
                     line_name += '#' + str(i)
                 line_names.append(line_name)
                 if grid2:
                     self.lines.append(
                         Line(line_name, styl, coords, length=grid_len))
                 else:
                     try:
                         self.lines.append(
                             Line(line_name,
                                  style[styl],
                                  coords,
                                  length=grid_len))
                         try:
                             self.lines[-1].initial = styl[2:].replace(
                                 'kv', 'kV')
                         except:
                             pass
                     except:
                         style[styl] = '#FFFFFF'
                         self.lines.append(
                             Line(line_name,
                                  style[styl],
                                  coords,
                                  length=grid_len))
     if zipped:
         memory_file.close()
         zf.close()
     else:
         kml_data.close()
    def test_export_with_multiple_sessions(self):
        """Test to check event with sessions in pentabarfxml format"""
        with self.app.test_request_context():
            keynote = SessionFactory(
                title='Keynote',
                starts_at=datetime(2019, 10, 15, 10, 25, 46),
                ends_at=datetime(2019, 10, 15, 11, 10, 46),
                track__name='Amazing Track',
                microlocation__name='Great Hall',
                event__name='Awesome Conference',
                event__starts_at=datetime(2019, 10, 15),
                event__ends_at=datetime(2019, 10, 16, 13, 30, 00),
            )

            UserFactory()
            mario = SpeakerFactoryBase.build(name='Mario Behling', user_id=1)
            keynote.speakers = [
                mario,
                SpeakerFactoryBase.build(name='Hong Phuc Dang', user_id=1),
            ]

            SessionFactoryBasic(
                title='Hot Session',
                starts_at=datetime(2019, 10, 15, 11, 30, 00),
                ends_at=datetime(2019, 10, 15, 12, 00, 54),
            )

            future_session = SessionFactoryBasic(
                title='Future Session',
                starts_at=datetime(2019, 10, 16, 9, 15, 30),
                ends_at=datetime(2019, 10, 16, 10, 30, 45),
            )

            future_session.speakers = [
                SpeakerFactoryBase.build(name='Pranav Mistry', user_id=1)
            ]

            MicrolocationFactoryBase(name='Assembly Hall')
            end_session = SessionFactoryBasic(
                title='Bye Bye Session',
                starts_at=datetime(2019, 10, 16, 11, 30, 20),
                ends_at=datetime(2019, 10, 16, 13, 00, 30),
                microlocation_id=2,
            )

            end_session.speakers = [mario]

            db.session.commit()
            pentabarf_export = PentabarfExporter()
            pentabarf_string = pentabarf_export.export(keynote.event.id)
            pentabarf_original = fromstring(pentabarf_string)

            self.assertEqual(
                pentabarf_original.find('conference/title').text,
                "Awesome Conference")
            self.assertEqual(
                pentabarf_original.find('conference/start').text, '2019-10-15')
            self.assertEqual(
                pentabarf_original.find('conference/end').text, '2019-10-16')
            self.assertEqual(
                pentabarf_original.find('conference/days').text, '1')

            self.assertEqual(
                pentabarf_original.find('day/room').attrib['name'],
                'Great Hall')
            self.assertEqual(
                pentabarf_original.find('day/room/event/title').text,
                'Keynote')
            self.assertEqual(
                pentabarf_original.find('day/room/event/track').text,
                'Amazing Track')
            self.assertEqual(
                pentabarf_original.find('day/room/event/start').text, '10:25')
            self.assertEqual(
                pentabarf_original.find('day/room/event/duration').text,
                '00:45')
            self.assertEqual(
                pentabarf_original.find(
                    'day/room/event/persons/person[@id="2"]').text,
                'Hong Phuc Dang',
            )
            self.assertEqual(
                len(
                    pentabarf_original.find(
                        'day/room/event/persons').getchildren()), 2)

            self.assertEqual(
                pentabarf_original.find('day/room/event[2]/title').text,
                'Hot Session')

            self.assertEqual(
                pentabarf_original.find('day[2]/room/event/title').text,
                'Future Session')
            self.assertEqual(
                pentabarf_original.find(
                    'day[2]/room/event/persons/person').text,
                'Pranav Mistry',
            )

            self.assertEqual(
                pentabarf_original.find('day[2]/room[2]').attrib['name'],
                'Assembly Hall')
            self.assertEqual(
                pentabarf_original.find('day[2]/room[2]/event/title').text,
                'Bye Bye Session',
            )
            self.assertEqual(
                pentabarf_original.find('day[2]/room[2]/event/duration').text,
                '01:30')
            self.assertEqual(
                pentabarf_original.find(
                    'day[2]/room[2]/event/persons/person').text,
                'Mario Behling',
            )
Example #44
0
 def __init__(self):
     self.get_config()
     self.lines = []
     if not os.path.exists(self.kml_file):
         return
     style = {}
     styl = ''
     zipped = False
     if self.kml_file[-4:] == '.kmz':  # zipped file?
         zipped = True
         zf = zipfile.ZipFile(kml_file, 'r')
         inner_file = ''
         for name in zf.namelist():
             if name[-4:] == '.kml':
                 inner_file = name
                 break
         if inner_file == '':
             return
         memory_file = io.BytesIO()
         memory_file.write(zf.open(inner_file).read())
         root = ElementTree(fromstring(memory_file.getvalue()))
     else:
         kml_data = open(self.kml_file, 'rb')
         root = ElementTree(fromstring(kml_data.read()))
     # Create an iterator
     if sys.version_info[1] < 9:  # before python 3.9
         iterat = root.getiterator()
     else:
         iterat = root.iter()
     for element in iterat:
         elem = element.tag[element.tag.find('}') + 1:]
         if elem == 'Style':
             for name, value in list(element.items()):
                 if name == 'id':
                     styl = value
         elif elem == 'color':
             style[styl] = self.colour
         elif elem == 'name':
             line_name = element.text
         elif elem == 'styleUrl':
             styl = element.text[1:]
         elif elem == 'coordinates':
             coords = []
             coordinates = ' '.join(element.text.split()).split()
             for i in range(len(coordinates)):
                 coords.append([
                     round(float(coordinates[i].split(',')[1]), 6),
                     round(float(coordinates[i].split(',')[0]), 6)
                 ])
             i = int(len(coords) / 2)
             if within_map(coords[0][0], coords[0][1], self.map_polygon) and \
                within_map(coords[i][0], coords[i][1], self.map_polygon):
                 try:
                     self.lines.append(Line(line_name, style[styl], coords))
                 except:
                     self.lines.append(Line(line_name, self.colour, coords))
     if zipped:
         memory_file.close()
         zf.close()
     else:
         kml_data.close()
def main():
    # Parse arguments
    ap = argparse.ArgumentParser()
    ap.add_argument("-c",
                    "--config",
                    dest="config_path",
                    required=True,
                    help="path to config file",
                    type=validate_file,
                    metavar="FILE")
    ap.add_argument("-o",
                    "--output",
                    dest="outout_folder",
                    required=True,
                    help="path to output folder",
                    type=validate_file)
    args = ap.parse_args()

    # Parse config to extract API keys
    config = configparser.ConfigParser()
    config.read(args.config_path)
    client_id = config['settings']['client_id']
    api_key = config['settings']['api_key']
    domainIP = config['settings']['domainIP']

    try:
        # http://docs.python-requests.org/en/master/user/advanced/
        # Using a session object gains efficiency when making multiple requests
        session = requests.Session()
        session.auth = (client_id, api_key)

        # Define URL for extraction of all policies
        policy_url = 'https://{}/v1/policies'.format(domainIP)
        response = session.get(policy_url, verify=False)
        # Get Headers
        headers = response.headers
        # Decode JSON response
        response_json = response.json()
        print("[+] Total number of policies: {}".format(
            response_json['metadata']['results']['total']))
        policies = response_json['data']
        # Enumerate all policies and download them to specified folder
        for policy_detail in policies:
            policy_url = 'https://{}/v1/policies/{}'.format(
                domainIP, policy_detail['guid'])
            # Reuse session, get XML out
            response = session.get(policy_url, verify=False)
            headers = response.headers
            # Parse out response as JSON so we can extract at least basic metadata which can be used for further processing
            response_json = response.json()
            # Print out basic details about policy
            print(
                "[+] Downloading Policy. NAME: {} GUID: {} PRODUCT: {}  DEFAULT: {} SERIAL NUMBER: {} URL: {}"
                .format(policy_detail['name'], policy_detail['guid'],
                        policy_detail['product'], policy_detail['default'],
                        policy_detail['serial_number'],
                        policy_detail['links']['policy']))
            policy_url = 'https://{}/v1/policies/{}.xml'.format(
                domainIP, policy_detail['guid'])
            # Final request - get policy XML file, parse and write to file
            response = session.get(policy_url, verify=False)
            #Parse and prettify XML file before writing
            tree = ElementTree(fromstring(response.content))
            # Get Root of XML policy file
            root = tree.getroot()
            b4_xml_obj = BeautifulSoup(tostring(root), "xml")
            # Construct path for writing out policy as XML file
            filename = os.path.join(
                args.outout_folder,
                "{}_{}.xml".format(policy_detail['guid'],
                                   policy_detail['product']))
            f = open(filename, "w")
            # Write out file to disk, prettified
            f.write(b4_xml_obj.prettify())
            # Close stream
            f.close()
    finally:
        print("[+] Done")
        gc.collect()
Example #46
0
    def getInventory(self,
                     network,
                     station='*',
                     location='*',
                     channel='*',
                     starttime=UTCDateTime(),
                     endtime=UTCDateTime(),
                     instruments=True,
                     min_latitude=-90,
                     max_latitude=90,
                     min_longitude=-180,
                     max_longitude=180,
                     modified_after=None,
                     format='SUDS'):
        """
        Returns information about the available networks and stations in that
        particular space/time region.

        :type network: str
        :param network: Network code, e.g. ``'BW'``.
        :type station: str
        :param station: Station code, e.g. ``'MANZ'``. Station code may contain
            wild cards.
        :type location: str
        :param location: Location code, e.g. ``'01'``. Location code may
            contain wild cards.
        :type channel: str
        :param channel: Channel code, e.g. ``'EHE'``. Channel code may contain
            wild cards.
        :type starttime: :class:`~obspy.core.utcdatetime.UTCDateTime`
        :param starttime: Start date and time.
        :type endtime: :class:`~obspy.core.utcdatetime.UTCDateTime`
        :param endtime: End date and time.
        :type instruments: boolean, optional
        :param instruments: Include instrument data. Default is ``True``.
        :type min_latitude: float, optional
        :param min_latitude: Minimum latitude, defaults to ``-90.0``
        :type max_latitude: float, optional
        :param max_latitude: Maximum latitude, defaults to ``90.0``
        :type min_longitude: float, optional
        :param min_longitude: Minimum longitude, defaults to ``-180.0``
        :type max_longitude: float, optional
        :param max_longitude: Maximum longitude, defaults to ``180.0``.
        :type modified_after: :class:`~obspy.core.utcdatetime.UTCDateTime`,
            optional
        :param modified_after: Returns only data modified after given date.
            Default is ``None``, returning all available data.
        :type format: ``'XML'`` or ``'SUDS'``, optional
        :param format: Output format. Either returns a XML document or a
            parsed SUDS object. Defaults to ``SUDS``.
        :return: XML document or a parsed SUDS object containing inventory
            information.

        .. rubric:: Example

        >>> from obspy.neries import Client
        >>> from obspy import UTCDateTime
        >>> client = Client(user='******')
        >>> dt = UTCDateTime("2011-01-01T00:00:00")
        >>> result = client.getInventory('GE', 'SNAA', '', 'BHZ', dt, dt+10,
        ...                              instruments=True)
        >>> paz = result.ArclinkInventory.inventory.responsePAZ
        >>> print(paz.poles)  # doctest: +ELLIPSIS
        (-0.037004,0.037016) (-0.037004,-0.037016) (-251.33,0.0) ...
        """
        # enable logging if debug option is set
        if self.debug:
            import logging
            logging.basicConfig(level=logging.INFO)
            logging.getLogger('suds.client').setLevel(logging.DEBUG)
        # initialize client
        client = SudsClient(SEISMOLINK_WSDL, retxml=(format == 'XML'))
        # set prefixes for easier debugging
        client.add_prefix('gml', 'http://www.opengis.net/gml')
        client.add_prefix('ogc', 'http://www.opengis.net/ogc')
        client.add_prefix('xlin', 'http://www.w3.org/1999/xlink')
        client.add_prefix('urn', 'urn:xml:seisml:orfeus:neries:org')
        # set cache of 5 days
        cache = client.options.cache
        cache.setduration(days=5)
        # create user token
        usertoken = client.factory.create('UserTokenType')
        usertoken.email = self.user
        usertoken.password = self.password
        usertoken.label = self.user_agent.replace(' ', '_')
        usertoken.locale = ""
        # create station filter
        stationid = client.factory.create('StationIdentifierType')
        stationid.NetworkCode = network
        stationid.StationCode = station
        stationid.ChannelCode = channel
        stationid.LocId = location
        stationid.TimeSpan.TimePeriod.beginPosition = \
            UTCDateTime(starttime).strftime("%Y-%m-%dT%H:%M:%S")
        stationid.TimeSpan.TimePeriod.endPosition = \
            UTCDateTime(endtime).strftime("%Y-%m-%dT%H:%M:%S")
        # create spatial filters
        spatialbounds = client.factory.create('SpatialBoundsType')
        spatialbounds.BoundingBox.PropertyName = "e gero"
        spatialbounds.BoundingBox.Envelope.lowerCorner = "%f %f" %\
            (min(min_latitude, max_latitude),
             min(min_longitude, max_longitude))
        spatialbounds.BoundingBox.Envelope.upperCorner = "%f %f" %\
            (max(min_latitude, max_latitude),
             max(min_longitude, max_longitude))
        # instruments attribute
        if instruments:
            client.options.plugins.append(
                _AttributePlugin({'Instruments': 'true'}))
        else:
            client.options.plugins.append(
                _AttributePlugin({'Instruments': 'false'}))
        # modified_after attribute
        if modified_after:
            dt = UTCDateTime(modified_after).strftime("%Y-%m-%dT%H:%M:%S")
            client.options.plugins.append(
                _AttributePlugin({'ModifiedAfter': dt}))
        # add version attribute needed for instruments
        client.options.plugins.append(_AttributePlugin({'Version': '1.0'}))
        # request data
        response = client.service.getInventory(usertoken, stationid,
                                               spatialbounds)
        if format == 'XML':
            # response is a full SOAP response
            from xml.etree.ElementTree import fromstring, tostring
            temp = fromstring(response)
            xpath = '*/*/{urn:xml:seisml:orfeus:neries:org}ArclinkInventory'
            inventory = temp.find(xpath)
            # export XML prepending a XML declaration
            XML_DECLARATION = "<?xml version='1.0' encoding='UTF-8'?>\n\n"
            return XML_DECLARATION + tostring(inventory, encoding='utf-8')
        else:
            # response is a SUDS object
            return response
Example #47
0
            print
            print

        else:
            first_run = False

        hits_xml_string = None
        with open(_hits_file) as infile:
            hits_xml_string = unicode(infile.read(), "utf-8")

        # Validate XML before trying to import anything from the given file.
        validate_hits_xml_file(hits_xml_string)

        _errors = 0
        _total = 0
        _tree = fromstring(hits_xml_string.encode("utf-8"))

        for _child in _tree:
            block_id = _child.attrib["block-id"]
            language_pair = '{0}2{1}'.format(_child.attrib["source-language"],
                                             _child.attrib["target-language"])

            # Hotfix potentially wrong ISO codes;  we are using ISO-639-3.
            iso_639_2_to_3_mapping = {
                'cze': 'ces',
                'fre': 'fra',
                'ger': 'deu',
                'ron': 'rom',
                'tur': 'trk',
                'eus': 'baq'
            }
Example #48
0
		<agent-identifier>
			<name id="[email protected]"/>
			<addresses>
				<url href="xmpp://[email protected]"/>
			</addresses>
		</agent-identifier>
	</receiver>
	<content>[email protected]|45|NS</content>
	<language>OWL-S</language>
	<ontology>init</ontology>
	<conversation-id>677446f6a372433aa7e281becde007eb</conversation-id>
</fipa-message>"""
# xmldoc = minidom.parse(s)
# itemlist = xmldoc.getElementsByTagName('sender')
# print itemlist
# print itemlist[0].attributes['name'].value
# for s in itemlist:
#     print(s.attributes['name'].value)
from xml.etree.ElementTree import XML, fromstring, tostring

root = fromstring(s)
print root

receiver = root.findall('receiver')[0]
agent_identifier = receiver.find('agent-identifier')
print agent_identifier
name_id = agent_identifier.find("name")
print name_id.attrib

content = root.find('content')
print content.text.split("|")
Example #49
0
    def iter_upload(
        self,
        dandiset: RemoteDandiset,
        metadata: dict[str, Any],
        jobs: Optional[int] = None,
        replacing: Optional[RemoteAsset] = None,
    ) -> Iterator[dict]:
        """
        Upload the file as an asset with the given metadata to the given
        Dandiset, returning a generator of status `dict`\\s.

        :param RemoteDandiset dandiset:
            the Dandiset to which the file will be uploaded
        :param dict metadata:
            Metadata for the uploaded asset.  The "path" field will be set to
            the value of the instance's ``path`` attribute if no such field is
            already present.
        :param int jobs: Number of threads to use for uploading; defaults to 5
        :param RemoteAsset replacing:
            If set, replace the given asset, which must have the same path as
            the new asset
        :returns:
            A generator of `dict`\\s containing at least a ``"status"`` key.
            Upon successful upload, the last `dict` will have a status of
            ``"done"`` and an ``"asset"`` key containing the resulting
            `RemoteAsset`.
        """
        asset_path = metadata.setdefault("path", self.path)
        client = dandiset.client
        yield {"status": "calculating etag"}
        etagger = get_dandietag(self.filepath)
        filetag = etagger.as_str()
        lgr.debug("Calculated dandi-etag of %s for %s", filetag, self.filepath)
        digest = metadata.get("digest", {})
        if "dandi:dandi-etag" in digest:
            if digest["dandi:dandi-etag"] != filetag:
                raise RuntimeError(
                    f"{self.filepath}: File etag changed; was originally"
                    f" {digest['dandi:dandi-etag']} but is now {filetag}")
        yield {"status": "initiating upload"}
        lgr.debug("%s: Beginning upload", asset_path)
        total_size = self.size
        try:
            resp = client.post(
                "/uploads/initialize/",
                json={
                    "contentSize": total_size,
                    "digest": {
                        "algorithm": "dandi:dandi-etag",
                        "value": filetag,
                    },
                    "dandiset": dandiset.identifier,
                },
            )
        except requests.HTTPError as e:
            if e.response.status_code == 409:
                lgr.debug("%s: Blob already exists on server", asset_path)
                blob_id = e.response.headers["Location"]
            else:
                raise
        else:
            upload_id = resp["upload_id"]
            parts = resp["parts"]
            if len(parts) != etagger.part_qty:
                raise RuntimeError(
                    f"Server and client disagree on number of parts for upload;"
                    f" server says {len(parts)}, client says {etagger.part_qty}"
                )
            parts_out = []
            bytes_uploaded = 0
            lgr.debug("Uploading %s in %d parts", self.filepath, len(parts))
            with RESTFullAPIClient("http://nil.nil") as storage:
                with self.filepath.open("rb") as fp:
                    with ThreadPoolExecutor(max_workers=jobs or 5) as executor:
                        lock = Lock()
                        futures = [
                            executor.submit(
                                _upload_blob_part,
                                storage_session=storage,
                                fp=fp,
                                lock=lock,
                                etagger=etagger,
                                asset_path=asset_path,
                                part=part,
                            ) for part in parts
                        ]
                        for fut in as_completed(futures):
                            out_part = fut.result()
                            bytes_uploaded += out_part["size"]
                            yield {
                                "status": "uploading",
                                "upload": 100 * bytes_uploaded / total_size,
                                "current": bytes_uploaded,
                            }
                            parts_out.append(out_part)
                lgr.debug("%s: Completing upload", asset_path)
                resp = client.post(
                    f"/uploads/{upload_id}/complete/",
                    json={"parts": parts_out},
                )
                lgr.debug(
                    "%s: Announcing completion to %s",
                    asset_path,
                    resp["complete_url"],
                )
                r = storage.post(resp["complete_url"],
                                 data=resp["body"],
                                 json_resp=False)
                lgr.debug(
                    "%s: Upload completed. Response content: %s",
                    asset_path,
                    r.content,
                )
                rxml = fromstring(r.text)
                m = re.match(r"\{.+?\}", rxml.tag)
                ns = m.group(0) if m else ""
                final_etag = rxml.findtext(f"{ns}ETag")
                if final_etag is not None:
                    final_etag = final_etag.strip('"')
                    if final_etag != filetag:
                        raise RuntimeError(
                            "Server and client disagree on final ETag of uploaded file;"
                            f" server says {final_etag}, client says {filetag}"
                        )
                # else: Error? Warning?
                resp = client.post(f"/uploads/{upload_id}/validate/")
                blob_id = resp["blob_id"]
        lgr.debug("%s: Assigning asset blob to dandiset & version", asset_path)
        yield {"status": "producing asset"}
        if replacing is not None:
            lgr.debug("%s: Replacing pre-existing asset", asset_path)
            r = client.put(
                replacing.api_path,
                json={
                    "metadata": metadata,
                    "blob_id": blob_id
                },
            )
        else:
            r = client.post(
                f"{dandiset.version_api_path}assets/",
                json={
                    "metadata": metadata,
                    "blob_id": blob_id
                },
            )
        a = RemoteAsset.from_data(dandiset, r)
        lgr.info("%s: Asset successfully uploaded", asset_path)
        yield {"status": "done", "asset": a}
import math
import os
import json
from constants import *
from utils import create_data_folder
from xmljson import badgerfish as bf
from xml.etree.ElementTree import fromstring

#get and save data to file
home = os.path.expanduser("~")
key = open(home + "/keys/neaweather.key").read().strip()

path = create_data_folder()

two_hour_file = path + "/" + TWO_HOUR_FILENAME
heavy_rain_file = path + "/" + HEAVY_RAIN_FILENAME
psi_file = path + "/" + PSI_FILENAME

two_hour = requests.get(TWO_HOUR + key).text
heavy_rain = requests.get(HEAVY_RAIN + key).text
psi_update = requests.get(PSI + key).text

with open(two_hour_file, "w") as f:
    json.dump(bf.data(fromstring(two_hour)), f)

with open(heavy_rain_file, "w") as f:
    json.dump(bf.data(fromstring(heavy_rain)), f)

with open(psi_file, "w") as f:
    json.dump(bf.data(fromstring(psi_update)), f)
Example #51
0
def test_get_pom_info_no_parent(pom, expected):
    with patch('vang.maven.pom.parse',
               autospec=True,
               return_value=fromstring(pom)):
        assert expected == get_pom_info('pom_path')
Example #52
0
def get_all_courses(department_url):
    api_response = requests.get(department_url)
    api_json = bf.data(fromstring(api_response.text))
    courses_arr = api_json['{http://rest.cis.illinois.edu}subject']['courses'][
        'course']
    return courses_arr
Example #53
0
def xml_to_dict(path_xml):
    with open(path_xml, 'r', encoding='utf-8') as r:
        ss = r.read()
        print(ss)
        return BadgerFish(dict_type=dict).data(fromstring(ss))
Example #54
0
def get_a_course(course_url):
    api_response = requests.get(course_url)
    api_json = bf.data(fromstring(api_response.text))
    course = api_json['{http://rest.cis.illinois.edu}course']
    return course
Example #55
0
def convert_XML2Json(f_path):
    with open(f_path) as f:
        data_read = f.read()
    data_json = abdera.data(fromstring(data_read))
    return data_json
Example #56
0
    <color name="WindowTitleBackground" color="black" />
  </windowstyle>
  <fonts>
    <font filename="nmsbd.ttf" name="Regular" scale="95" />
    <font filename="lcd.ttf" name="LCD" scale="100" />
    <font filename="ae_AlMateen.ttf" name="Replacement" scale="90" replacement="1" />
    <font filename="tuxtxt.ttf" name="Console" scale="100" />
    <font filename="/usr/share/enigma2/Elgato-HD/num.ttf" name="Num" scale="100" />
  </fonts>
  </foo>
"""

infile = open(sys.argv[1])
main = parse(infile).getroot()

header = fromstring(elegantohd)

root = Element("skin")
map(root.append, header)
root.append(main)

if len(sys.argv) > 2:
    outfile = sys.argv[2]
else:
    outfile = 'skin.xml'
    from os.path import exists
    if exists(outfile):
        from shutil import copy
        copy(outfile, outfile+'.backup')

tree = ElementTree(root)        
 def load_fixtures(self, commands=None):
     """ loading data """
     config_file = 'pfsense_alias_config.xml'
     self.parse.return_value = ElementTree(
         fromstring(load_fixture(config_file)))
Example #58
0
 def __init__(self, xml_el):
     if type(xml_el) == str:
         xml_el = fromstring(xml_el)
     self.xml_el = xml_el
Example #59
0
 def test_parse_post_datetime_converstion(self):
     post = fromstring(wp_export_fragment(self.post_text))
     parsed_post = parse_post(post)
     self.assertEquals(parsed_post['pubDate'],
                       datetime.datetime(2012, 5, 27, 17, 15, 14))
Example #60
0
    if not stderr:
        for line in stdout.split("\n"):
            if "first job" in line:
                jobid = (line.split(":")[1]).strip(" ")
            if "job(s)" in line:
                nJobs = (line.split(" ")[0]).strip(" ")
        theSub = args.name
        print theSub
        print "Found jobid:", jobid
        print "nJobs: ", nJobs
        nomvar = "nom" if ("nom" in theSub) else "var"
        print nomvar
        jobid_pre = jobid.split("@")[0]
        print jobid.split("@")
        builder.start("Sub", {"Name": theSub, "Sample": sample, "Type": "nom"})
        for i in range(0, int(nJobs)):
            jobid_end_num = int(jobid_pre[-1]) + i
            new_jobid = jobid_pre[0:-1] + str(jobid_end_num)
            builder.start("Job", {"ID": new_jobid, "N": str(i)})
            builder.end("Job")
        builder.end("Sub")
    else:
        print "Error\n", stderr
builder.end("Jobs")

root = builder.close()
rough = tostring(root, 'utf-8')
reparsed = xdm.parseString(rough)
tree = ElementTree(fromstring(reparsed.toprettyxml(indent=" ")))
tree.write(args.o)