def _parse(self):
        out = io.BytesIO()

        with io.open(self.resource_file, mode='rb') as f:
            out.write(xor(bytearray(f.read())))

        try:
            temp = zipfile.ZipFile(out)
        except zipfile.BadZipfile:
            raise zipfile.BadZipfile

        if self.alias in temp.namelist():
            gw_info = xor(bytearray(temp.read(self.alias)))
        else:
            raise ValueError('Invalid alias {0} for resource file {1}, {2}'.format(self.alias,
                                                                              os.path.basename(self.resource_file),
                                                                              temp.namelist()))
        # PY 3
        try:
            xml = et.fromstring(gw_info)
        except TypeError:
            xml = et.fromstring(str(gw_info))

        for node in xml:
            if node.tag in self.gw:
                self.gw[node.tag] = node.text or ''
Example #2
0
    def test_rpm(self):
        namespaces = [
            utils.Namespace('rpm', primary.RPM_SPEC_URL),
        ]
        raw_xml = utils.element_to_raw_xml(self.rpm_element, namespaces, primary.COMMON_SPEC_URL)

        # make sure it stripped out any namespace declarations and root elements
        self.assertTrue(re.match(r'^<package +type="rpm">', raw_xml))
        # make sure there are no stray closing elements, like </metadata>
        self.assertTrue(raw_xml.rstrip().endswith('</package>'))
        # make sure it preserved the "rpm" prefix
        self.assertTrue(re.search(r'<rpm:license *>GPLv2</rpm:license>', raw_xml))
        # make sure it got the requires and provides entries
        self.assertTrue(raw_xml.find('dolphin') >= 0)
        self.assertTrue(raw_xml.find('penguin') >= 0)
        # these should all be stripped out
        self.assertTrue(raw_xml.find('xmlns') == -1)
        # had this problem on python 2.6 where it treated the default namespace
        # as a namespace with prefix ''
        self.assertTrue(raw_xml.find('<:') == -1)

        # try to re-parse the XML to make sure it's valid. fake tag is necessary
        # to declare the prefix "rpm"
        fake_xml = '<fake xmlns:rpm="http://pulpproject.org">%s</fake>' % raw_xml
        # fromstring just to make sure this is valid
        ET.fromstring(fake_xml)
Example #3
0
File: tr.py Project: mpetyx/pyrif
def main():
    if len(sys.argv) == 1:
        stream = sys.stdin
        doc = etree.fromstring(stream.read())
        stream.close()
        base = None
    else:
        src = sys.argv[1]
        if ":" in src:
            stream = urllib2.urlopen(src) 
            base = src
            doc = etree.fromstring(stream.read())
            stream.close()
        else:
            with open(src) as stream:
                doc = etree.fromstring(stream.read())
                base = "file:/"+src
    
    if doc.tag != rif.Document:
        error(doc, "Root element is not rif:Document.")
    if base:
        focus = LabeledNode(focus)
    else:
        focus = BlankNode()
    (focus, triples) = describe(doc, focus, base)

    print "# RIF focus is", focus.as_turtle()
    print "# %d triples" % len(triples)
    for (s,p,o) in triples:
        print s.as_turtle(), "<"+p+">", o.as_turtle(),"."
Example #4
0
    def _sendNMJ(self, host):
        """
        Send a NMJ update command to the specified machine

        host: The hostname/IP to send the request to (no port)
        database: The database to send the request to
        mount: The mount URL to use (optional)

        return: True if the request succeeded, False otherwise
        """
        # if a host is provided then attempt to open a handle to that URL
        try:
            url_scandir = 'http://' + host + ':8008/metadata_database?arg0=update_scandir&arg1=' + app.NMJv2_DATABASE + '&arg2=&arg3=update_all'
            log.debug(u'NMJ scan update command sent to host: {0}', host)
            url_updatedb = 'http://' + host + ':8008/metadata_database?arg0=scanner_start&arg1=' + app.NMJv2_DATABASE + '&arg2=background&arg3='
            log.debug(u'Try to mount network drive via url: {0}', host)
            prereq = Request(url_scandir)
            req = Request(url_updatedb)
            handle1 = urlopen(prereq)
            response1 = handle1.read()
            time.sleep(0.3)
            handle2 = urlopen(req)
            response2 = handle2.read()
        except IOError as error:
            log.warning(u'Warning: Unable to contact popcorn hour on host {0}: {1}', host, error)
            return False
        try:
            et = etree.fromstring(response1)
            result1 = et.findtext('returnValue')
        except SyntaxError as error:
            log.error(u'Unable to parse XML returned from the Popcorn Hour: update_scandir, {0}', error)
            return False
        try:
            et = etree.fromstring(response2)
            result2 = et.findtext('returnValue')
        except SyntaxError as error:
            log.error(u'Unable to parse XML returned from the Popcorn Hour: scanner_start, {0}', error)
            return False

        # if the result was a number then consider that an error
        error_codes = ['8', '11', '22', '49', '50', '51', '60']
        error_messages = ['Invalid parameter(s)/argument(s)',
                          'Invalid database path',
                          'Insufficient size',
                          'Database write error',
                          'Database read error',
                          'Open fifo pipe failed',
                          'Read only file system']
        if int(result1) > 0:
            index = error_codes.index(result1)
            log.error(u'Popcorn Hour returned an error: {0}', error_messages[index])
            return False
        else:
            if int(result2) > 0:
                index = error_codes.index(result2)
                log.error(u'Popcorn Hour returned an error: {0}', error_messages[index])
                return False
            else:
                log.info(u'NMJv2 started background scan')
                return True
Example #5
0
 def create_etree(self, url):
     content = self.get_content(url)
     try:
         etree = ET.fromstring(content)
     except SyntaxError:
         etree = ET.fromstring('<null></null>')
     return etree
Example #6
0
 def __new__(cls,tag,thing = None,*args,**kwargs):
   if hasattr(tag,'__xml__'):
     return tag.__xml__()
   self = object.__new__(xml)
   if cElementTree.iselement(tag):
     self.__content = tag
   elif isinstance(tag,cElementTree.ElementTree):
     self.__content = tag.getroot()
   elif is_file(tag):
     self.__content = cElementTree.parse(tag).getroot()
   elif isinstance(tag,str) and len(tag) > 0 and tag[0] == '<':
     self.__content = cElementTree.fromstring(tag)
   else:
     if type(tag) != str:
       raise TypeError("Cannot convert %s object to xml" % str(type(tag)))
     self.__content = cElementTree.fromstring('<%s/>' % tag)
     if is_text(thing) or type(thing) == int:
       self.__content.text = text(thing)
     elif thing != None:
       self.append(xml(thing))
     for subthing in args:
       self.append(xml(subthing))
     for key,value in kwargs.items():
       if key == '__class' or key == 'klass':
         self['class'] = value
       else:
         self[key] = value
   if '{' in self.__content.tag:
     self.__prefix = PREFIX_PAT.search(self.__content.tag).groups()[0]
   else:
     self.__prefix = ''
   return self
Example #7
0
    def _getetsrc(self, url, language=None):
        """Loads a URL using caching, returns an ElementTree of the source
        """
        src = self._loadUrl(url, language=language)


        # TVDB doesn't sanitize \r (CR) from user input in some fields,
        # remove it to avoid errors. Change from SickBeard, from will14m
        if not IS_PY2:
            # Remove trailing \r byte
            src = src.replace(b"\r", b"")
        else:
            src = src.rstrip("\r") # FIXME: this seems wrong

        try:
            return ElementTree.fromstring(src)
        except SyntaxError:
            src = self._loadUrl(url, recache=True, language=language)
            try:
                return ElementTree.fromstring(src)
            except SyntaxError as exceptionmsg:
                errormsg = "There was an error with the XML retrieved from thetvdb.com:\n%s" % (
                    exceptionmsg
                )

                if self.config['cache_enabled']:
                    errormsg += "\nFirst try emptying the cache folder at..\n%s" % (
                        self.config['cache_location']
                    )

                errormsg += "\nIf this does not resolve the issue, please try again later. If the error persists, report a bug on"
                errormsg += "\nhttp://dbr.lighthouseapp.com/projects/13342-tvdb_api/overview\n"
                raise tvdb_error(errormsg)
    def test_integration(self):

        print("Start Integration-Tests")

        test_context = self.get_testcontext(self.env)

        command_package = ["swid_generator", "swid", "--full", "--pretty", "--package", test_context['package_name']]
        output_swid_tag = self.get_tree_output_from_cmd(command_package)
        expected_swid_tag = test_context['template_full_pretty_cmd_package']
        self.check_equality(expected_swid_tag, output_swid_tag)

        command_swid = ["swid_generator", "swid", "--pretty", "--package", test_context['package_name']]
        output_swid_tag = self.get_tree_output_from_cmd(command_swid)
        expected_swid_tag = test_context['template_no_payload_cmd_package']
        self.check_equality(expected_swid_tag, output_swid_tag)

        command_package = "swid_generator swid --pretty --full --package {PACKAGE} --pkcs12 {CERTIFICATE} --pkcs12-pwd R4onQ7UdCbDoFPeH"
        command_package = command_package.format(CERTIFICATE=test_context['certificate'], PACKAGE=test_context['package_name'])
        output_swid_tag = self.get_string_output_from_cmd(command_package.split(' '))
        expected_swid_tag = test_context['template_full_pretty_signed_cmd_package']

        self.validate_signature(output_swid_tag)
        self.check_equality(expected_swid_tag, ET.fromstring(output_swid_tag))

        command_package_file = "swid_generator swid --full --pretty --package-file {PACKAGE_FILE}"
        command_package_file = command_package_file.format(PACKAGE_FILE=test_context['package_path'])
        output_swid_tag = self.get_tree_output_from_cmd(command_package_file.split(' '))
        expected_swid_tag = test_context['template_full_pretty_cmd_package_file']
        self.check_equality(expected_swid_tag, output_swid_tag)

        command_package_file = "swid_generator swid --pretty --package-file {PACKAGE_FILE}"
        command_package_file = command_package_file.format(PACKAGE_FILE=test_context['package_path'])
        output_swid_tag = self.get_tree_output_from_cmd(command_package_file.split(' '))
        expected_swid_tag = test_context['template_no_payload_cmd_package_file']
        self.check_equality(expected_swid_tag, output_swid_tag)

        command_package_file = "swid_generator swid --pretty --full --package-file {PACKAGE_FILE} --pkcs12 {CERTIFICATE} --pkcs12-pwd R4onQ7UdCbDoFPeH"
        command_package_file = command_package_file.format(CERTIFICATE=test_context['certificate'], PACKAGE_FILE=test_context['package_path'])
        output_swid_tag = self.get_string_output_from_cmd(command_package_file.split(' '))
        expected_swid_tag = test_context['template_full_pretty_signed_cmd_package_file']

        self.validate_signature(output_swid_tag)
        self.check_equality(expected_swid_tag, ET.fromstring(output_swid_tag))

        # Prepare Folders and Files for evidence
        self.create_folder("/tmp/evidence-test")
        self.create_folder("/tmp/evidence-test/sub1")
        self.create_folder("/tmp/evidence-test/sub2")
        self.create_folder("/tmp/evidence-test/sub3")

        self.touch("/tmp/evidence-test/sub1/testfile1")
        self.touch("/tmp/evidence-test/sub1/testfile2")
        self.touch("/tmp/evidence-test/sub2/testfile2")
        self.touch("/tmp/evidence-test/sub3/testfile3")

        command_evidence = "swid_generator swid --full --pretty --evidence {EVIDENCE_PATH} --name evidence --version-string 1.0"
        command_evidence = command_evidence.format(EVIDENCE_PATH=test_context['evidence_test_folder'])
        output_swid_tag = self.get_tree_output_from_cmd(command_evidence.split(' '))
        expected_swid_tag = test_context['template_evidence']
        self.check_equality(expected_swid_tag, output_swid_tag)
Example #9
0
def to_xml(obj, root="object", pretty=False, header=True, dasherize=True):
    """Convert a dictionary or list to an XML string.

    Args:
        obj: The dictionary/list object to convert.
        root: The name of the root xml element.
        pretty: Whether to pretty-format the xml (default False).
        header: Whether to include an xml header (default True).
        dasherize: Whether to convert underscores to dashes in
                   attribute names (default True).
    Returns:
        An xml string.
    """
    root = dasherize and root.replace("_", "-") or root
    root_element = ET.Element(root)
    if isinstance(obj, list):
        root_element.set("type", "array")
        for i in obj:
            element = ET.fromstring(to_xml(i, root=singularize(root), header=False))
            root_element.append(element)
    else:
        for key, value in obj.iteritems():
            key = dasherize and key.replace("_", "-") or key
            if isinstance(value, dict) or isinstance(value, list):
                element = ET.fromstring(to_xml(value, root=key, header=False))
                root_element.append(element)
            else:
                element = ET.SubElement(root_element, key)
                serialize(value, element)
    if pretty:
        xml_pretty_format(root_element)
    xml_data = ET.tostring(root_element)
    if header:
        return XML_HEADER + "\n" + xml_data
    return xml_data
Example #10
0
 def parseVideo(self):
     self.url = self.url.replace('playHothtml5', 'playHot')
     logging.info("parseVideo: %s", self.url)
     responseString = self.fetchWeb(self.url, ua=self.ua)
     root = ET.fromstring(responseString)
     title = self.getElementText(root, 'name')
     total = int(self.getElementText(root, 'total'))
     series = int(self.getElementText(root, 'series'))
     previousVideo, nextVideo, allRelatedVideo = self.getRelatedVideos(total, series, title)
     if total > 1:
         title += '-%s' % series
     videoUrl = self.getElementText(root, 'medias/media/seg/newurl').replace('&amp;', '&')
     step2url = None
     for e in root.findall('medias/media/seg/gslblist/gslb/subgslb'):
         step2url = e.text
         if step2url.startswith('http://g3.letv.com'):
             break
     responseString = self.fetchWeb(step2url, ua=self.ua)
     urls = [u.replace('&amp;', '&') for u in (self.getAllElementText(ET.fromstring(responseString), 'nodelist/node'))]
     ranked = BatchRequests(urls, header_only=False, headers={"User-Agent": "Mozilla/5.0", "Range": "bytes=0-1000"}).rank()
     videoUrl = ranked[0]
     alternativeUrls = ranked
     logging.debug("alternativeUrls = %s", alternativeUrls)
     logging.info('videoUrl = %s', videoUrl)
     duration = self.getElementText(root, 'medias/media/seg/duration')
     with open(playlistStorage, 'w') as f:
         f.write(videoUrl)
     return Video(title.encode('utf8'), urllib2.quote(self.url), playlistStorage, duration, self.site,
                  previousVideo=previousVideo, nextVideo=nextVideo, allRelatedVideo=allRelatedVideo,
                  alternativeUrls=alternativeUrls)
Example #11
0
    def getIDs(self, queryURL, maxHits=0):
        ids = []
        cnt = 1

        # first batch of results
        result = self.dispatchRequest(queryURL)
        t = tree.fromstring(result)
        ids.extend([x.text for x in t.find('IdList').findall('Id')])
        hits = int(t.find('Count').text)
        print 'Total hits: ', hits

        print 'batch: %d, got: %d' % (cnt, len(ids))

        # if we have enough already
        if maxHits > 0 and (len(ids) > maxHits or maxHits > hits):
            return ids[:maxHits]

        # if there are more, get them also with retstart option
        while len(ids) < hits:
            nq = queryURL + '&retstart=%d&retmax=%d' % (len(ids), self.maxdoc)
            result = self.dispatchRequest(nq)
            t = tree.fromstring(result)
            ids.extend([x.text for x in t.find('IdList').findall('Id')])
            cnt += 1
            print 'batch: %d, total: %d' % (cnt, len(ids))
            if maxHits and len(ids) >= maxHits:
                break
        #end
        if maxHits:
            return ids[:maxHits]
        else:
            return ids
Example #12
0
def epubparser(file):
    tree = None

    dc_namespace = "{http://purl.org/dc/elements/1.1/}"

    zf = zipfile.ZipFile(file)

    container = ElementTree.fromstring(zf.read("META-INF/container.xml"))
    rootfile = container.find("{urn:oasis:names:tc:opendocument:xmlns:container}rootfiles/{urn:oasis:names:tc:opendocument:xmlns:container}rootfile").get("full-path")

    data = zf.read(rootfile)
    tree = ElementTree.fromstring(data)

    if tree:
        e = tree.find('{http://www.idpf.org/2007/opf}metadata')
        if e:
            di = {}
            for child in e:
                di[child.tag.replace(dc_namespace, "")] = child.text

            bauthor = di.get("creator", "unknown")
            btitle = di.get("title", "unknown")
            bgenre = di.get("subject", "unknown")

            return (bauthor, btitle, bgenre)
        else:
            return defaultparser(file)
    else:
        return defaultparser(file)
Example #13
0
  def xml(self):
    b = etree.TreeBuilder()
    b.start('post', {'id':self.id})

    b.start('title',{})
    b.data(escape(self.title))
    b.end('title')

    b.start('author',{})
    b.data(escape(self.author))
    b.end('author')

    b.start('date',{})
    b.data(str(time.mktime(self.date.timetuple())))
    b.end('date')

    b.start('text',{})
    b.data(escape(self.text))
    b.end('text')

    b.end('post')
    tag = b.close()
    # Sanity check to make sure that etree will allow us 
    # to parse this again later.
    f = StringIO()
    etree.ElementTree(tag).write(f)
    etree.fromstring(f.getvalue())
    return tag
Example #14
0
def to_xml(obj, root='object', pretty=False, header=True):
    """Convert a dictionary or list to an XML string.

    Args:
        obj: The dictionary/list object to convert.
        root: The name of the root xml element.
    Returns:
        An xml string.
    """
    root_element = ET.Element(root.replace('_', '-'))
    if isinstance(obj, list):
        root_element.set('type', 'array')
        for i in obj:
            element = ET.fromstring(
                to_xml(i, root=singularize(root), header=False))
            root_element.append(element)
    else:
        for key, value in obj.iteritems():
            key = key.replace('_', '-')
            if isinstance(value, dict) or isinstance(value, list):
                element = ET.fromstring(
                    to_xml(value, root=key, header=False))
                root_element.append(element)
            else:
                element = ET.SubElement(root_element, key)
                serialize(value, element)
    if pretty:
        xml_pretty_format(root_element)
    xml_data = ET.tostring(root_element)
    if header:
        return XML_HEADER + '\n' + xml_data
    return xml_data
Example #15
0
def validate_xml(value):
    value = "<div>" + value + "</div>"
    try:
        etree.fromstring(value)
    except Exception, e:
        print value
        raise ValidationError("Error parsing the xml: %s" % (str(e),))
Example #16
0
    def searchAllServers(self, query):
        xml_combined = None
        for server in self.servers:
            root = ET.fromstring(server.getSearchXML(query))
            data = ET.ElementTree(root).getchildren()

            for cont in data.iter('MediaContainer'):
                if xml_combined is None:
                    xml_combined = data
                    #insertion_point = xml_combined.findall("./MediaContainer")[0]
                    insertion_point = data
                else:
                    insertion_point.append(cont)

        for server in self.sharedServers:
            data = ET.ElementTree(ET.fromstring(server.getSearchXML(query)))

            for cont in data.iter('MediaContainer'):
                if xml_combined is None:
                    xml_combined = data
                    #insertion_point = xml_combined.findall("./MediaContainer")[0]
                    insertion_point = data
                else:
                    insertion_point.append(cont)

        dprint(__name__, 0, "test: {0}", ET.tostring(xml_combined))
        return ET.tostring(xml_combined)
 def test_merge_1(self):
     root= et.fromstring(xml_file5)
     x= merge(root)
     y= et.fromstring(xml_file6)
     z=et.tostring(y)
     string = et.tostring(x)
     self.assert_(string == z)
Example #18
0
def get_volumes():
    cmd = globalvars.cmd
    executor = globalvars.executor
    volume_cmd = cmd.get_volume_command()
    res = executor.execute(volume_cmd.get_info())
    root = ElementTree.fromstring(res)
    volumes_xml = root.find("volInfo").find("volumes").findall("volume")

    volumes = list()
    for volume_xml in volumes_xml:
        volume = None
        volume_info = Volume.with_volume_info(volume_xml)
        # Check whether the volume is up or not
        if volume_info.status:
            volume_cmd = cmd.get_volume_command().get_volume(volume_info.name).get_status(VolumeStatusOption.DETAIL)
            res = executor.execute(volume_cmd)
            root = ElementTree.fromstring(res)
            volume_xml = root.find("volStatus").find("volumes").find("volume")
            volume_status = Volume.with_volume_status(volume_xml)
            volume = Volume.merge(volume_status, volume_info)
        else:
            volume = volume_info
        volumes.append(volume)

    return Response(
        response=json.dumps(volumes, default=Volume.to_json),
        mimetype="application/json"
    )
Example #19
0
def doit(args) :
    ofile1 = args.outfile1
    ofile2 = args.outfile2
    ofile3 = args.outfile3

    xmlstring = "<item>\n<subitem hello='world'>\n<subsub name='moon'>\n<value>lunar</value>\n</subsub>\n</subitem>"
    xmlstring += "<subitem hello='jupiter'>\n<subsub name='moon'>\n<value>IO</value>\n</subsub>\n</subitem>\n</item>"

    # Using etutil's xmlitem class
    
    xmlobj = etutil.xmlitem()
    xmlobj.etree = ET.fromstring(xmlstring)
    
    etwobj = etutil.ETWriter(xmlobj.etree)
    etwobj.serialize_xml(xmlobj.write_to_xml)
    
    ofile1.write(xmlobj.outxmlstr)
    
    # Just using ETWriter
    
    etwobj = etutil.ETWriter( ET.fromstring(xmlstring) )
    etwobj.serialize_xml(ofile2.write)
    
    # Changing parameters
    
    etwobj = etutil.ETWriter( ET.fromstring(xmlstring) )
    etwobj.indentIncr = "    "
    etwobj.indentFirst = ""
    etwobj.serialize_xml(ofile3.write)
    
    # Close files and exit
    ofile1.close()
    ofile2.close()
    ofile3.close()
    return
Example #20
0
def _process_case_block(domain, case_block, attachments, old_case_id):
    def get_namespace(element):
        m = re.match('\{.*\}', element.tag)
        return m.group(0)[1:-1] if m else ''

    def local_attachment(attachment, old_case_id, tag):
        mime = attachment['server_mime']
        size = attachment['attachment_size']
        src = attachment['attachment_src']
        cached_attachment = get_cached_case_attachment(domain, old_case_id, tag)
        attachment_meta, attachment_stream = cached_attachment.get()
        return UploadedFile(attachment_stream, src, size=size, content_type=mime)

    # Remove namespace because it makes looking up tags a pain
    root = ET.fromstring(case_block)
    xmlns = get_namespace(root)
    case_block = re.sub(' xmlns="[^"]+"', '', case_block, count=1)

    root = ET.fromstring(case_block)
    tag = "attachment"
    xml_attachments = root.find(tag)
    ret_attachments = {}

    if xml_attachments:
        for attach in xml_attachments:
            attach.attrib['from'] = 'local'
            attach.attrib['src'] = attachments[attach.tag]['attachment_src']
            ret_attachments[attach.attrib['src']] = local_attachment(attachments[attach.tag], old_case_id, attach.tag)

    # Add namespace back in without { } added by ET
    root.attrib['xmlns'] = xmlns
    return ET.tostring(root), ret_attachments
def syncAllTask(request):
	# 记录ccd的任务
	an = AssignNotify()
	an.watch('cdchu')
	
	usrlist = UserList().getLogUsersNameList()
	
	host = "172.16.144.11"
	url = "/itsm/Service1.asmx/GetTaskList?users=" + "|".join(usrlist)
	
	conn = httplib.HTTPConnection(host)
	conn.request("GET", url)
	res = conn.getresponse()
	
	xl = ET.fromstring(res.read())
	con = ET.fromstring(xl.text.encode("utf-8"))
	
	contentList = xml2Python(con)
	
	cursor = connection.cursor()
	transaction.commit_unless_managed()
	
	cursor.execute("truncate table dfat_smtasklist")
	for key in contentList:
		addNew(key,smtasklist)
	conn.close()
	
	# 检查并通知
	an.notify('[email protected];[email protected]', usrlist)
	
	return HttpResponse("ok")
Example #22
0
    def run(self):
        while True:
            drives = {}
            p = subprocess.Popen(["system_profiler", "SPUSBDataType", "-xml"], stdout=subprocess.PIPE)
            xml = ElementTree.fromstring(p.communicate()[0])
            p.wait()

            xml = self._parseStupidPListXML(xml)
            for dev in self._findInTree(xml, "Mass Storage Device"):
                if "removable_media" in dev and dev["removable_media"] == "yes" and "volumes" in dev and len(dev["volumes"]) > 0:
                    for vol in dev["volumes"]:
                        if "mount_point" in vol:
                            volume = vol["mount_point"]
                            drives[os.path.basename(volume)] = volume

            p = subprocess.Popen(["system_profiler", "SPCardReaderDataType", "-xml"], stdout=subprocess.PIPE)
            xml = ElementTree.fromstring(p.communicate()[0])
            p.wait()

            xml = self._parseStupidPListXML(xml)
            for entry in xml:
                if "_items" in entry:
                    for item in entry["_items"]:
                        for dev in item["_items"]:
                            if "removable_media" in dev and dev["removable_media"] == "yes" and "volumes" in dev and len(dev["volumes"]) > 0:
                                for vol in dev["volumes"]:
                                    if "mount_point" in vol:
                                        volume = vol["mount_point"]
                                        drives[os.path.basename(volume)] = volume

            self.drivesChanged.emit(drives)
            time.sleep(5)
Example #23
0
def from_xml_str(xml_str, handle=None):
    """
    Generates response object from the given xml string.

    Args:
        xml_str (str): xml string

    Returns:
        object (external method or managed object or generic managed object)

    Example:
        xml_str='''\n
        <lsServer dn="org-root/ls-testsp" dynamicConPolicyName="test"\n
        extIPPoolName="ext-mgmt" name="testsp" />\n
        '''\n
        root_element = extract_root_elem(xml_str)\n
    """

    try:
        xml_raw_str = ucsgenutils.add_escape_chars(xml_str)
        root_elem = ET.fromstring(xml_raw_str)
    except:
        recovered_xml = ucsgenutils.remove_invalid_chars(xml_str)
        root_elem = ET.fromstring(recovered_xml)

    if root_elem.tag == "error":
        error_code = root_elem.attrib['errorCode']
        error_descr = root_elem.attrib['errorDescr']
        raise ex.UcsException(error_code, error_descr)

    class_id = ucsgenutils.word_u(root_elem.tag)
    response = ucscoreutils.get_ucs_obj(class_id, root_elem)
    response.from_xml(root_elem, handle)
    return response
Example #24
0
    def checkCredentials(self):

        if self.useCachedResponse('checkCredentialsCache'):
            xmldata = self.getCachedResponse('checkCredentialsCache')
            BongEnvironment.logger.info(u"using cached response for checkCredentials")
            #BongEnvironment.logger.info(unicode_string(xmldata))
            tree = ET.fromstring(xmldata)
        else:
            url = "http://{server}/api/users.xml?{credentials}".format( server = self.server
                                                                      , credentials = urllib.urlencode({ 'username' : self.username
                                                                                                       , 'password' : self.password }))
            request = urllib2.Request(url)
            response = urllib2.urlopen(request)
            if response.code != 200:
                BongEnvironment.logger.warning(url)
                BongEnvironment.logger.warning("request failed ({code!s} {message!s})".format( code = response.code
                                                                                  , message = response.msg ))
                return False

            BongEnvironment.logger.info("{req} -> ({code!s} {message!s})".format( req = url
                                                                     , code = response.code
                                                                     , message = response.msg ))
            xmldata = response.read()
            BongEnvironment.logger.info(unicode_string(xmldata))
            tree = ET.fromstring(xmldata)

            if self._et_node_text(tree, "status", "false") != 'true':
                BongEnvironment.logger.warning("response contains errors.")
                return False
            else:
                self.cacheResponse('checkCredentialsCache', xmldata)

        return True ;
    def check_source_in_project(self, project, package, verifymd5, deleted=False):

        self._fill_package_list(project)

        if not deleted and not package in self.packages[project]:
            return None, None

        his = self.get_package_history(project, package, deleted)
        if his is None:
            return None, None

        his = ET.fromstring(his)
        historyrevs = dict()
        revs = list()
        for rev in his.findall('revision'):
            historyrevs[rev.find('srcmd5').text] = rev.get('rev')
            revs.append(rev.find('srcmd5').text)
        revs.reverse()
        for i in range(min(len(revs), 5)): # check last commits
            srcmd5=revs.pop(0)
            root = self.cached_GET(makeurl(self.apiurl,
                                    ['source', project, package], { 'rev': srcmd5, 'view': 'info'}))
            root = ET.fromstring(root)
            if root.get('verifymd5') == verifymd5:
                return srcmd5, historyrevs[srcmd5]
        return None, None
Example #26
0
File: soco.py Project: labero/SoCo
    def get_current_track_info(self):
        """ Get information about the currently playing track.

        Returns:
        A dictionary containing the following information about the currently
        playing track: playlist_position, duration, title, artist, album, and
        a link to the album art.

        If we're unable to return data for a field, we'll return an empty
        string. This can happen for all kinds of reasons so be sure to check
        values. For example, a track may not have complete metadata and be
        missing an album name. In this case track['album'] will be an empty string.

        """
        response = self.__send_command(TRANSPORT_ENDPOINT, GET_CUR_TRACK_ACTION, GET_CUR_TRACK_BODY)

        dom = XML.fromstring(response.encode('utf-8'))

        track = {'title': '', 'artist': '', 'album': '', 'album_art': ''}

        track['playlist_position'] = dom.findtext('.//Track')
        track['duration'] = dom.findtext('.//TrackDuration')
        track['uri'] = dom.findtext('.//TrackURI')

        d = dom.findtext('.//TrackMetaData')

        # Duration seems to be '0:00:00' when listening to radio
        if d != '' and track['duration'] == '0:00:00':
            metadata = XML.fromstring(d.encode('utf-8'))

            #Try parse trackinfo
            trackinfo = metadata.findtext('.//{urn:schemas-rinconnetworks-com:metadata-1-0/}streamContent')

            index = trackinfo.find(' - ')

            if index > -1:
                track['artist'] = trackinfo[:index]
                track['title'] = trackinfo[index+3:]
            else:
                logger.warning('Could not handle track info: "%s"', trackinfo)
                logger.warning(traceback.format_exc())
                track['title'] = trackinfo.encode('utf-8')

        # If the speaker is playing from the line-in source, querying for track
        # metadata will return "NOT_IMPLEMENTED".
        elif d != '' and d != 'NOT_IMPLEMENTED':
            # Track metadata is returned in DIDL-Lite format
            metadata = XML.fromstring(d.encode('utf-8'))

            track['title'] = metadata.findtext('.//{http://purl.org/dc/elements/1.1/}title').encode('utf-8')
            track['artist'] = metadata.findtext('.//{http://purl.org/dc/elements/1.1/}creator').encode('utf-8')
            track['album'] = metadata.findtext('.//{urn:schemas-upnp-org:metadata-1-0/upnp/}album').encode('utf-8')

            album_art = metadata.findtext('.//{urn:schemas-upnp-org:metadata-1-0/upnp/}albumArtURI')

            if album_art is not None:
                track['album_art'] = 'http://' + self.speaker_ip + ':1400' + metadata.findtext('.//{urn:schemas-upnp-org:metadata-1-0/upnp/}albumArtURI')

        return track
Example #27
0
 def _parseXML(self, content):
     global ElementTree
     content = content.rstrip("\r") # FIXME: this seems wrong
     try:
         return ElementTree.fromstring(content)
     except TypeError:
         import xml.etree.ElementTree as ElementTree
         return ElementTree.fromstring(content)
Example #28
0
    def __getitem__(self, value):
        '''
        Random access to spectra if mzML fill is indexed,
        not compressed and not truncted.

        Example:

        >>> spectrum_with_nativeID_100 = msrun[100]

        '''
        answer = None
        if self.info['seekable'] is True:
            if len(self.info['offsets']) == 0:
                print("File does support random access, unfortunately indexlist missing, i.e. type not implemented yet ...", file=sys.stderr)

            if value in self.info['offsets']:
                startPos = self.info['offsets'][value]
                endPos_index = bisect.bisect_right(
                    self.info['offsetList'],
                    self.info['offsets'][value]
                )
                if endPos_index == len(self.info['offsetList']):
                    endPos = os.path.getsize(self.info['filename'])
                else:
                    endPos = self.info['offsetList'][endPos_index]


                self.seeker.seek(startPos, 0)
                data = self.seeker.read(endPos - self.info['offsets'][value])
                try:
                    self.spectrum.initFromTreeObject(
                        cElementTree.fromstring( data )
                    )
                except:
                    # have closing </mzml> & </run> &or </spectrumList>
                    startingTag = data.split()[0]
                    stopIndex = data.index( '</' + startingTag[1:] + '>')
                    self.spectrum.initFromTreeObject(
                        cElementTree.fromstring(
                            data[:stopIndex + len(startingTag) + 2]
                        )
                    )
                answer = self.spectrum
            else:
                print("Run does not contain spec with native ID {0}".format(value), file=sys.stderr)
                #print(self.info['offsets'].keys())

        else:
            self.iter = iter(cElementTree.iterparse(
                self.info['filename'],
                events = ( b'start', b'end')
            )) # NOTE: end might be sufficient

            for _ in self:
                if _['id'] == value:
                    answer = _
                    break
        return answer
Example #29
0
def _updateCache():
	global _removableCache

	while True:
		drives = []
		if platform.system() == "Windows":
			from ctypes import windll
			import ctypes
			bitmask = windll.kernel32.GetLogicalDrives()
			for letter in string.uppercase:
				if bitmask & 1 and windll.kernel32.GetDriveTypeA(letter + ':/') == 2:
					volumeName = ''
					nameBuffer = ctypes.create_unicode_buffer(1024)
					if windll.kernel32.GetVolumeInformationW(ctypes.c_wchar_p(letter + ':/'), nameBuffer, ctypes.sizeof(nameBuffer), None, None, None, None, 0) == 0:
						volumeName = nameBuffer.value
					if volumeName == '':
						volumeName = 'NO NAME'

					freeBytes = ctypes.c_longlong(0)
					if windll.kernel32.GetDiskFreeSpaceExA(letter + ':/', ctypes.byref(freeBytes), None, None) == 0:
						continue
					if freeBytes.value < 1:
						continue
					drives.append(('%s (%s:)' % (volumeName, letter), letter + ':/', volumeName))
				bitmask >>= 1
		elif platform.system() == "Darwin":
			p = subprocess.Popen(['system_profiler', 'SPUSBDataType', '-xml'], stdout=subprocess.PIPE)
			xml = ElementTree.fromstring(p.communicate()[0])
			p.wait()

			xml = _parseStupidPListXML(xml)
			for dev in _findInTree(xml, 'Mass Storage Device'):
				if 'removable_media' in dev and dev['removable_media'] == 'yes' and 'volumes' in dev and len(dev['volumes']) > 0:
					for vol in dev['volumes']:
						if 'mount_point' in vol:
							volume = vol['mount_point']
							drives.append((os.path.basename(volume), volume + '/', os.path.basename(volume)))

			p = subprocess.Popen(['system_profiler', 'SPCardReaderDataType', '-xml'], stdout=subprocess.PIPE)
			xml = ElementTree.fromstring(p.communicate()[0])
			p.wait()

			xml = _parseStupidPListXML(xml)
			for entry in xml:
				if '_items' in entry:
					for item in entry['_items']:
						for dev in item['_items']:
							if 'removable_media' in dev and dev['removable_media'] == 'yes' and 'volumes' in dev and len(dev['volumes']) > 0:
								for vol in dev['volumes']:
									if 'mount_point' in vol:
										volume = vol['mount_point']
										drives.append((os.path.basename(volume), volume + '/', os.path.basename(volume)))
		else:
			for volume in glob.glob('/media/*'):
				drives.append((os.path.basename(volume), volume + '/', os.path.basename(volume)))

		_removableCache = drives
		time.sleep(1)
Example #30
0
    def __getitem__(self, value):
        '''
        Random access to spectra if mzML fill is indexed,
        not compressed and not truncted.

        Example:

        >>> spectrum_with_nativeID_100 = msrun[100]

        '''
        answer = None
        if self.info['seekable'] is True:
            if len(self.info['offsets']) == 0:
                raise IOError("File does support random access: index list missing...")

            if value in self.info['offsets']:
                startPos = self.info['offsets'][value]
                endPos_index = bisect.bisect_right(
                    self.info['offsetList'],
                    self.info['offsets'][value]
                )
                if endPos_index == len(self.info['offsetList']):
                    endPos = os.path.getsize(self.info['filename'])
                else:
                    endPos = self.info['offsetList'][endPos_index]

                self.seeker.seek(startPos, 0)
                data = self.seeker.read(endPos - self.info['offsets'][value])
                try:
                    self.spectrum.initFromTreeObject(cElementTree.fromstring(data))
                except:
                    # have closing </mzml> & </run> &or </spectrumList>
                    startingTag = data.split()[0]
                    stopIndex = data.index('</' + startingTag[1:] + '>')
                    self.spectrum.initFromTreeObject(
                        cElementTree.fromstring(data[:stopIndex + len(startingTag) + 2])
                    )
                answer = self.spectrum
        else:
            # Reopen the file from the beginning if possible
            force_seeking = self.info.get('force_seeking', False)
            if force_seeking is False:
                self.info['fileObject'].close()

                assert self.info['filename'], \
                    'Must specify either filename or index for random spectrum access'
                self.info['fileObject'], _ = self.__open_file(self.info['filename'])
                self.iter = self.__init_iter()

                for spec in self:
                    if spec['id'] == value:
                        answer = spec
                        break

        if answer is None:
            raise KeyError("Run does not contain spec with native ID {0}".format(value))
        else:
            return answer
Example #31
0
def ImportTrackmateXML(xml_path,
                       Segimage,
                       XYcalibration=1,
                       Zcalibration=1,
                       Tcalibration=1,
                       image=None,
                       Mask=None,
                       mintracklength=2):

    Name = os.path.basename(os.path.splitext(xml_path)[0])
    savedir = os.path.dirname(xml_path)
    root = et.fromstring(open(xml_path).read())

    filtered_track_ids = [
        int(track.get('TRACK_ID')) for track in root.find('Model').find(
            'FilteredTracks').findall('TrackID')
    ]

    #Extract the tracks from xml
    tracks = root.find('Model').find('AllTracks')
    #Extract the cell objects from xml
    Spotobjects = root.find('Model').find('AllSpots')

    #Make a dictionary of the unique cell objects with their properties
    Uniqueobjects = {}
    Uniqueproperties = {}

    if Mask is not None:
        if len(Mask.shape) < len(Segimage.shape):
            # T Z Y X
            UpdateMask = np.zeros([
                Segimage.shape[0], Segimage.shape[1], Segimage.shape[2],
                Segimage.shape[3]
            ])
            for i in range(0, UpdateMask.shape[0]):
                for j in range(0, UpdateMask.shape[1]):

                    UpdateMask[i, j, :, :] = Mask[i, :, :]
        else:
            UpdateMask = Mask
        Mask = UpdateMask.astype('uint16')
        TimedMask = BoundaryPoints(Mask, XYcalibration, Zcalibration)

    for frame in Spotobjects.findall('SpotsInFrame'):

        for Spotobject in frame.findall('Spot'):
            #Create object with unique cell ID
            cell_id = int(Spotobject.get("ID"))
            Uniqueobjects[cell_id] = [cell_id]
            Uniqueproperties[cell_id] = [cell_id]
            #Get the TZYX location of the cells in that frame
            Uniqueobjects[cell_id].append([
                Spotobject.get('POSITION_T'),
                Spotobject.get('POSITION_Z'),
                Spotobject.get('POSITION_Y'),
                Spotobject.get('POSITION_X')
            ])
            #Get other properties associated with the Spotobject
            Uniqueproperties[cell_id].append([
                Spotobject.get('MAX_INTENSITY'),
                Spotobject.get('ESTIMATED_DIAMETER'),
                Spotobject.get('ESTIMATED_DIAMETER'),
                Spotobject.get('ESTIMATED_DIAMETER')
            ])

    Tracks = []
    for track in tracks.findall('Track'):

        track_id = int(track.get("TRACK_ID"))
        SpotobjectSourceTarget = []
        if track_id in filtered_track_ids:
            print('Creating Tracklets of TrackID', track_id)
            for edge in track.findall('Edge'):

                sourceID = edge.get('SPOT_SOURCE_ID')
                targetID = edge.get('SPOT_TARGET_ID')
                sourceTime = edge.get('EDGE_TIME')

                SpotobjectSourceTarget.append([sourceID, targetID, sourceTime])

            #Sort the tracks by edge time
            SpotobjectSourceTarget = sorted(SpotobjectSourceTarget,
                                            key=sortTracks,
                                            reverse=False)

            # Get all the IDs, uniquesource, targets attached, leaf, root, splitpoint IDs
            Sources, MultiTargets, RootLeaf, SplitPoints = Multiplicity(
                SpotobjectSourceTarget)

            if len(SplitPoints) > 0:
                SplitPoints = SplitPoints[::-1]
                DividingTrajectory = True

            else:

                DividingTrajectory = False

            # Remove dqngling tracklets
            SplitPoints, RootLeaf = PurgeTracklets(
                RootLeaf,
                SplitPoints,
                SpotobjectSourceTarget,
                DividingTrajectory,
                mintracklength=mintracklength)

            tstart = 0
            for sourceID, targetID, EdgeTime in SpotobjectSourceTarget:
                if RootLeaf[0] == sourceID:
                    Source = Uniqueobjects[int(sourceID)][1]
                    tstart = int(float(Source[0]))
                    break

            Tracklets = []
            if DividingTrajectory == True:
                print("Dividing Trajectory")
                #Make tracklets
                Root = RootLeaf[0]

                Visited = []
                #For the root we need to go forward
                tracklet = []
                tracklet.append(Root)
                trackletid = 0
                RootCopy = Root
                Visited.append(Root)
                while (RootCopy not in SplitPoints
                       and RootCopy not in RootLeaf[1:]):
                    for sourceID, targetID, EdgeTime in SpotobjectSourceTarget:
                        # Search for the target id corresponding to leaf
                        if RootCopy == sourceID:

                            #Once we find the leaf we move a step fwd to its target to find its target
                            RootCopy = targetID
                            if RootCopy in SplitPoints:
                                break
                            if RootCopy in Visited:
                                break
                            Visited.append(targetID)
                            tracklet.append(targetID)

                Tracklets.append([trackletid, tracklet])

                trackletid = 1
                for i in range(1, len(RootLeaf)):
                    Leaf = RootLeaf[i]
                    #For leaf we need to go backward
                    tracklet = []
                    tracklet.append(Leaf)
                    while (Leaf not in SplitPoints and Leaf != Root):
                        for sourceID, targetID, EdgeTime in SpotobjectSourceTarget:
                            # Search for the target id corresponding to leaf
                            if Leaf == targetID:
                                # Include the split points here

                                #Once we find the leaf we move a step back to its source to find its source
                                Leaf = sourceID
                                if Leaf in SplitPoints:
                                    break
                                if Leaf in Visited:
                                    break
                                tracklet.append(sourceID)
                                Visited.append(sourceID)
                    Tracklets.append([trackletid, tracklet])
                    trackletid = trackletid + 1

                # Exclude the split point near root
                for i in range(0, len(SplitPoints) - 1):
                    Start = SplitPoints[i]
                    tracklet = []
                    tracklet.append(Start)
                    OtherSplitPoints = SplitPoints.copy()
                    OtherSplitPoints.pop(i)
                    while (Start is not Root):
                        for sourceID, targetID, EdgeTime in SpotobjectSourceTarget:

                            if Start == targetID:

                                Start = sourceID
                                if Start in Visited:
                                    break
                                tracklet.append(sourceID)
                                Visited.append(sourceID)
                                if Start in OtherSplitPoints:
                                    break

                    Tracklets.append([trackletid, tracklet])
                    trackletid = trackletid + 1

            if DividingTrajectory == False:
                print('Not Dividing Trajectory')
                if len(RootLeaf) > 0:
                    Root = RootLeaf[0]
                    Leaf = RootLeaf[-1]
                    tracklet = []
                    trackletid = 0
                    tracklet.append(Root)
                    #For non dividing trajectories iterate from Root to the only Leaf
                    while (Root != Leaf):
                        for sourceID, targetID, EdgeTime in SpotobjectSourceTarget:
                            if Root == sourceID:
                                tracklet.append(sourceID)
                                Root = targetID
                                if Root == Leaf:
                                    break
                            else:
                                break
                    Tracklets.append([trackletid, tracklet])

            # Sort the Tracklets in time

            SortedTracklets = TrackletSorter(Tracklets, SpotobjectSourceTarget)
            # Create object trackID, T, Z, Y, X, speed, generationID, trackletID

            #For each tracklet create Track and Speed objects
            DictTrackobjects, DictSpeedobjects, Trackobjects, Trackletobjects = TrackobjectCreator(
                SortedTracklets, Uniqueobjects, XYcalibration, Zcalibration,
                Tcalibration)
            Tracks.append([
                track_id, DictTrackobjects, DictSpeedobjects, Trackobjects,
                Trackletobjects, SortedTracklets, tstart
            ])

    #Sort tracks by their ID
    Tracks = sorted(Tracks, key=sortID, reverse=False)

    # Write all tracks to csv file as ID, T, Z, Y, X
    ID = []
    StartID = {}

    RegionID = {}
    VolumeID = {}
    locationID = {}

    for trackid, DictTrackobjects, DictSpeedobjects, Trackobjects, Trackletobjects, SortedTracklets, tstart in Tracks:

        print('Computing Tracklets for TrackID:', trackid)
        RegionID[trackid] = [trackid]
        VolumeID[trackid] = [trackid]
        locationID[trackid] = [trackid]
        StartID[trackid] = [trackid]
        ID.append(trackid)
        TrackletRegionID = {}
        TrackletVolumeID = {}
        TrackletlocationID = {}

        StartID[trackid].append(tstart)

        Tloc = []
        Zloc = []
        Yloc = []
        Xloc = []
        Speedloc = []
        DistanceBoundary = []
        ProbabilityInside = []
        SlocZ = []
        SlocY = []
        SlocX = []
        Vloc = []
        Iloc = []
        for j in tqdm(range(0, len(Trackletobjects))):

            Spottrackletid = Trackletobjects[j]
            TrackletRegionID[Spottrackletid] = [Spottrackletid]
            TrackletVolumeID[Spottrackletid] = [Spottrackletid]
            TrackletlocationID[Spottrackletid] = [Spottrackletid]
            TrackletLocation = []
            TrackletRegion = []
            TrackletVolume = []

            DictSpotobject = DictTrackobjects[Spottrackletid][1]
            DictVelocitySpotobject = DictSpeedobjects[Spottrackletid][1]

            for i in range(0, len(DictSpotobject)):

                Spotobject = DictSpotobject[i]
                VelocitySpotobject = DictVelocitySpotobject[i]
                t = int(float(Spotobject[0]))
                z = int(float(Spotobject[1]))
                y = int(float(Spotobject[2]))
                x = int(float(Spotobject[3]))

                speed = (float(VelocitySpotobject))
                Tloc.append(t)
                Zloc.append(z)
                Yloc.append(y)
                Xloc.append(x)
                Speedloc.append(speed)
                if t < Segimage.shape[0]:
                    CurrentSegimage = Segimage[t, :]
                    if image is not None:
                        Currentimage = image[t, :]
                        properties = measure.regionprops(
                            CurrentSegimage, Currentimage)
                    if image is None:
                        properties = measure.regionprops(
                            CurrentSegimage, CurrentSegimage)

                    TwoDCoordinates = [(prop.centroid[1], prop.centroid[2])
                                       for prop in properties]
                    TwoDtree = spatial.cKDTree(TwoDCoordinates)
                    TwoDLocation = (y, x)
                    closestpoint = TwoDtree.query(TwoDLocation)
                    for prop in properties:

                        if int(prop.centroid[1]) == int(
                                TwoDCoordinates[closestpoint[1]][0]) and int(
                                    prop.centroid[2]) == int(
                                        TwoDCoordinates[closestpoint[1]][1]):

                            sizeZ = abs(prop.bbox[0] -
                                        prop.bbox[3]) * Zcalibration
                            sizeY = abs(prop.bbox[1] -
                                        prop.bbox[4]) * XYcalibration
                            sizeX = abs(prop.bbox[2] -
                                        prop.bbox[5]) * XYcalibration
                            Area = prop.area
                            intensity = np.sum(prop.image)
                            Vloc.append(Area)
                            SlocZ.append(sizeZ)
                            SlocY.append(sizeY)
                            SlocX.append(sizeX)
                            Iloc.append(intensity)
                            TrackletRegion.append([1, sizeZ, sizeY, sizeX])

                            # Compute distance to the boundary
                            if Mask is not None:

                                testlocation = (z * Zcalibration,
                                                y * XYcalibration,
                                                x * XYcalibration)
                                tree, indices, masklabel, masklabelvolume = TimedMask[
                                    str(int(t))]

                                cellradius = math.sqrt(sizeX * sizeX +
                                                       sizeY * sizeY) / 4

                                Regionlabel = Mask[int(t),
                                                   int(z),
                                                   int(y),
                                                   int(x)]
                                for k in range(0, len(masklabel)):
                                    currentlabel = masklabel[k]
                                    currentvolume = masklabelvolume[k]
                                    currenttree = tree[k]
                                    #Get the location and distance to the nearest boundary point
                                    distance, location = currenttree.query(
                                        testlocation)
                                    distance = max(0, distance - cellradius)
                                    if currentlabel == Regionlabel and Regionlabel > 0:
                                        probabilityInside = max(
                                            0, (distance) / currentvolume)
                                    else:

                                        probabilityInside = 0
                            else:
                                distance = 0
                                probabilityInside = 0

                            DistanceBoundary.append(distance)
                            ProbabilityInside.append(probabilityInside)
                            TrackletVolume.append([
                                Area, intensity, speed, distance,
                                probabilityInside
                            ])
                            TrackletLocation.append([t, z, y, x])

                TrackletlocationID[Spottrackletid].append(TrackletLocation)
                TrackletVolumeID[Spottrackletid].append(TrackletVolume)
                TrackletRegionID[Spottrackletid].append(TrackletRegion)

        locationID[trackid].append(TrackletlocationID)
        RegionID[trackid].append(TrackletRegionID)
        VolumeID[trackid].append(TrackletVolumeID)

    df = pd.DataFrame(list(
        zip(ID, Tloc, Zloc, Yloc, Xloc, DistanceBoundary, ProbabilityInside,
            SlocZ, SlocY, SlocX, Vloc, Iloc, Speedloc)),
                      index=None,
                      columns=[
                          'ID', 't', 'z', 'y', 'x', 'distBoundary',
                          'probInside', 'sizeZ', 'sizeY', 'sizeX', 'volume',
                          'intensity', 'speed'
                      ])

    df.to_csv(savedir + '/' + 'Extra' + Name + '.csv')
    df

    # create the final data array: track_id, T, Z, Y, X

    df = pd.DataFrame(list(zip(ID, Tloc, Zloc, Yloc, Xloc)),
                      index=None,
                      columns=['ID', 't', 'z', 'y', 'x'])

    df.to_csv(savedir + '/' + 'TrackMate' + Name + '.csv')
    df

    return RegionID, VolumeID, locationID, Tracks, ID, StartID
Example #32
0
    os.makedirs(pickledir)

#pd = dict()
num_files = 0
for fname in os.listdir(sourcedir):
    if fname.endswith('xml'):
        #		print(fname)
        nfname = re.sub('xml$', 'pkl', fname)
        if os.path.exists(os.path.join(pickledir, nfname)):
            print('Already have ' + nfname)
            continue
        with open(os.path.join(sourcedir, fname), 'rb') as myfile:
            #			string = myfile.read().decode('iso-8859-1').encode('utf8')
            string = myfile.read()
        try:
            root = ET.fromstring(string)
        except:
            continue

        node = root.find('.//SourceDesc/type')
        if node is not None and node.text == 'vers':
            #		if root.find('.//SourceDesc/type').text == 'vers':
            num_files = num_files + 1
            print(str(num_files) + ' ' + fname)
            string = str()
            work = list()
            ln = 1
            for line in root.findall('.//body//l'):
                verse = str(line.text)
                id = int(line.attrib['id'])
                #				print(str(id))
Example #33
0
    def test_parse_volume_no_source(self):
        with self.make_env() as env:
            disk_xml = etree.fromstring("""<disk/>""")

            chain = env.drive.parse_volume_chain(disk_xml)
            self.assertIsNone(chain)
Example #34
0
import csv
import xml.etree.cElementTree as et
from os.path import join

data_dir = '/var/www/FlaskApp/FlaskApp/static/data/community_health/'

nan = float('NaN')

data = {}
with open(join(data_dir, 'US_Counties.csv')) as f:
    next(f)
    reader = csv.reader(f, delimiter=',', quotechar='"')
    for row in reader:
        name, dummy, state, dummy, geometry, dummy, dummy, dummy, dummy, state_id, county_id, dummy, dummy = row
        xml = et.fromstring(geometry)
        lats = []
        lons = []
        for i, poly in enumerate(
                xml.findall('.//outerBoundaryIs/LinearRing/coordinates')):
            if i > 0:
                lats.append(nan)
                lons.append(nan)
            coords = (c.split(',')[:2] for c in poly.text.split())
            lat, lon = list(
                zip(*[(float(lat), float(lon)) for lon, lat in coords]))
            lats.extend(lat)
            lons.extend(lon)
        data[(int(state_id), int(county_id))] = {
            'name': name,
            'state': state,
Example #35
0
def deprecated_synclog_id_from_restore_payload(restore_payload):
    """DEPRECATED use <MockDevice>.sync().restore_id"""
    element = ElementTree.fromstring(restore_payload)
    return element.findall('{%s}Sync' % SYNC_XMLNS)[0].findall(
        '{%s}restore_id' % SYNC_XMLNS)[0].text
Example #36
0
def get_valid_to_time_from_idf_string(idf_string):
    tree_root = eTree.fromstring(idf_string)
    valid_to_date = tree_root.attrib["valid-to"]
    return DateAndTime(valid_to_date)
Example #37
0
 def parse_line(self, response):
     "解析班车"
     province_list = ('吉林', '辽宁', '河北', '黑龙江', '广东', "云南", '山西', '山东',
                      '广西壮族自治', '江西', '河南', '浙江', '安徽', '湖北', '湖南', "贵州",
                      '陕西', '江苏', '内蒙古自治', "四川", '海南', '山东', '甘肃', '青海',
                      '宁夏回族自治', "新疆维吾尔自治", '西藏自治', '贵州', '广西')
     start = response.meta["start"]
     end = response.meta["end"]
     sdate = response.meta["sdate"]
     res = json.loads(response.body)
     self.logger.info("finish %s ==> %s" %
                      (start["station_name"], end["zdmc"]))
     self.mark_done(start['station_name'], end["zdmc"], sdate)
     xml_text = re.findall(
         r"<getScheduledBusResult>(.*)</getScheduledBusResult>",
         res.get('msg', ''), re.S)[0]
     root = ET.fromstring(xml_text)
     node_find = root.find('Body')
     if node_find.attrib['size'] == '0':
         return
     res = node_find.findall('ScheduledBus')
     for d in res:
         s_sta_name = start['station_name']
         s_sta_id = start['czbh']
         d_city_name = end['zdmc']
         if len(d_city_name) >= 4:
             if d_city_name.startswith(province_list):
                 for j in province_list:
                     if d_city_name.startswith(j):
                         d_city_name = d_city_name.replace(j, '')
                         break
         d_sta_name = d.find('MDZMC').text
         drv_time = d.find('FCSJ').text
         distance = d.find('LC').text
         seat_type = d.find('CXMC').text
         bus_num = d.find('CCBH').text
         full_price = d.find('PJ').text
         left_tickets = d.find('YPZS').text
         d_city_id = d.find('MDZBH').text
         attrs = dict(
             s_province='海南',
             s_city_name=start['city_name'],
             s_city_id='',
             s_city_code=get_pinyin_first_litter(unicode(
                 start['city_name'])),
             s_sta_name=s_sta_name,
             s_sta_id=s_sta_id,
             d_city_name=d_city_name,
             d_city_code=get_pinyin_first_litter(d_city_name),
             d_city_id=d_city_id,
             d_sta_name=d_sta_name,
             d_sta_id='',
             drv_date=sdate,
             drv_time=drv_time,
             drv_datetime=dte.strptime("%s %s" % (sdate, drv_time),
                                       "%Y-%m-%d %H:%M"),
             distance=distance,
             vehicle_type="",
             seat_type=seat_type,
             bus_num=bus_num,
             full_price=float(full_price),
             half_price=float(full_price) / 2,
             fee=0,
             crawl_datetime=dte.now(),
             extra_info={},
             left_tickets=int(left_tickets),
             crawl_source="hainky",
             shift_id='',
         )
         yield LineItem(**attrs)
Example #38
0
 def parse_xml(self, fileReport):
     try:
         reportTree = ET.fromstring(fileReport)
     except SyntaxError, err:
         print('SyntaxError: %s. %s' % (err, fileReport))
         return None
Example #39
0
def directory_number_pull_ph():
    login_url = 'https://0.0.0.0/axl/'
    cucm_version_actions = 'CUCM:DB ver=11.5 listLine'
    username = '******'
    password = r'password'
    soap_data = '<soapenv:Envelope xmlns:soapenv="http://schemas.xmlsoap.org/soap/envelope/" ' \
                'xmlns:ns="http://www.cisco.com/AXL/API/8.5"><soapenv:Header/><soapenv:Body>' \
                '<ns:listLine><searchCriteria><description>%%</description></searchCriteria><returnedTags>' \
                '<pattern></pattern><description></description></returnedTags></ns' \
                ':listLine></soapenv:Body></soapenv:Envelope> '
    soap_headers = {
        'Content-type': 'text/xml',
        'SOAPAction': cucm_version_actions
    }

    urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
    requests.packages.urllib3.util.ssl_.DEFAULT_CIPHERS += 'HIGH:!DH:!aNULL'

    try:
        requests.packages.urllib3.contrib.pyopenssl.DEFAULT_SSL_CIPHERS_LIST += 'HIGH:!DH:!aNULL'
    except AttributeError:
        # nije pyopenssl support koristena / potrebna / moguca
        pass

    try:
        axl_request = requests.post(login_url,
                                    data=soap_data,
                                    headers=soap_headers,
                                    verify=False,
                                    auth=(username, password))
    except ConnectionError:
        print("Za prikaz slobodnih ekstenzija potreban je VPN")
    plain_txt = axl_request.text
    root = ET.fromstring(plain_txt)

    list_numbers = []
    extension_range_zagreb = list(range(8500, 8599)) + list(range(
        8700, 8800)) + [4111, 4801, 4804]
    shared_line_exstension_zagreb = []
    reserved_line_exstenion_zagreb = [
        8570, 8579, 8581, 8583, 8590, 8595, 8598, 8700, 8740, 8750, 8760, 8770,
        8780, 8790
    ]

    extension_range_split = list(range(8600, 8700))
    shared_line_exstension_split = [8603, 8607, 8615]

    for device in root.iter('line'):
        list_numbers.append(device.find('pattern').text)
        # Za prikaz kome broj pripada
        # list_description.append(device.find('description').text)

    list_without_quotes = []

    for line in list_numbers:
        line = line.replace("'", "").replace("*", "")
        list_without_quotes.append(line)

    clean_list_numbers = [int(i) for i in list_without_quotes]

    # nema rezerviranih brojeva u splitu
    free_agent_extensions_split = difference_list(extension_range_split,
                                                  clean_list_numbers)
    shared_line_numbers_split = shared_line(free_agent_extensions_split,
                                            shared_line_exstension_split)

    free_agent_extensions_zagreb = difference_list(extension_range_zagreb,
                                                   clean_list_numbers)
    shared_line_numbers_zagreb = shared_line(free_agent_extensions_zagreb,
                                             shared_line_exstension_zagreb)
    reserver_line_numbers_zagreb = reserved_line(
        shared_line_numbers_zagreb, reserved_line_exstenion_zagreb)

    return reserver_line_numbers_zagreb + ' Split: ' + shared_line_exstension_split
Example #40
0
 def fromstring(cls, data, transform=None):
     self = cls(transform=transform)
     self.root = ElementTree.fromstring(data)
     return self
Example #41
0
    def check_one_package(self, package):
        lproject = self.lookup.get(package, None)
        if not package in self.packages[self.from_prj]:
            logger.info("{} vanished".format(package))
            if self.lookup.get(package):
                del self.lookup[package]
                self.lookup_changes += 1
            return

        root = ET.fromstring(self._get_source_package(self.from_prj, package, None))
        linked = root.find('linked')
        if not linked is None and linked.get('package') != package:
            lstring = 'subpackage of {}'.format(linked.get('package'))
            if lstring != lproject:
                logger.warn("{} links to {} (was {})".format(package, linked.get('package'), lproject))
                self.lookup[package] = lstring
                self.lookup_changes += 1
            else:
                logger.debug("{} correctly marked as subpackage of {}".format(package, linked.get('package')))
            return

        pm = self.package_metas[package]
        devel = pm.find('devel')
        if devel is not None or (lproject is not None and lproject.startswith('Devel;')):
            develprj = None
            develpkg = None
            if devel is None:
                (dummy, develprj, develpkg) = lproject.split(';')
                logger.warn('{} lacks devel project setting {}/{}'.format(package, develprj, develpkg))
            else:
                develprj = devel.get('project')
                develpkg = devel.get('package')
            srcmd5, rev = self.check_source_in_project(develprj, develpkg,
                                                       root.get('verifymd5'))
            if srcmd5:
                lstring = 'Devel;{};{}'.format(develprj, develpkg)
                if not package in self.lookup or lstring != self.lookup[package]:
                    logger.debug("{} from devel {}/{} (was {})".format(package, develprj, develpkg, lproject))
                    self.lookup[package] = lstring
                    self.lookup_changes += 1
                else:
                    logger.debug("{} lookup from {}/{} is correct".format(package, develprj, develpkg))
                return

        if lproject and lproject != 'FORK' and not lproject.startswith('subpackage '):
            srcmd5, rev = self.check_source_in_project(lproject, package, root.get('verifymd5'))
            if srcmd5:
                logger.debug("{} lookup from {} is correct".format(package, lproject))
                # if it's from Factory we check if the package can be found elsewhere meanwhile
                if lproject != 'openSUSE:Factory':
                    return
            elif lproject == 'openSUSE:Factory' and not package in self.packages[lproject]:
                his = self.get_package_history(lproject, package, deleted=True)
                if his:
                    logger.debug("{} got dropped from {}".format(package, lproject))

        logger.debug("check where %s came from", package)
        foundit = False
        for project in self.project_preference_order:
            srcmd5, rev = self.check_source_in_project(project, package, root.get('verifymd5'))
            if srcmd5:
                if project != lproject:
                    if project == 'openSUSE:Leap:42.2:SLE-workarounds':
                        logger.info('{} is from {} but should come from {}'.format(package, project, lproject))
                    else:
                        logger.info('{} -> {} (was {})'.format(package, project, lproject))
                        self.lookup[package] = project
                        self.lookup_changes += 1
                else:
                    logger.debug('{} still coming from {}'.format(package, project))
                foundit = True
                break

        if not foundit:
            if lproject == 'FORK':
                logger.debug("{}: lookup is correctly marked as fork".format(package))
            else:
                logger.info('{} is a fork (was {})'.format(package, lproject))
                self.lookup[package] = 'FORK'
                self.lookup_changes += 1
def _convert_xml_to_service_properties(response):
    '''
    <?xml version="1.0" encoding="utf-8"?>
    <StorageServiceProperties>
        <Logging>
            <Version>version-number</Version>
            <Delete>true|false</Delete>
            <Read>true|false</Read>
            <Write>true|false</Write>
            <RetentionPolicy>
                <Enabled>true|false</Enabled>
                <Days>number-of-days</Days>
            </RetentionPolicy>
        </Logging>
        <HourMetrics>
            <Version>version-number</Version>
            <Enabled>true|false</Enabled>
            <IncludeAPIs>true|false</IncludeAPIs>
            <RetentionPolicy>
                <Enabled>true|false</Enabled>
                <Days>number-of-days</Days>
            </RetentionPolicy>
        </HourMetrics>
        <MinuteMetrics>
            <Version>version-number</Version>
            <Enabled>true|false</Enabled>
            <IncludeAPIs>true|false</IncludeAPIs>
            <RetentionPolicy>
                <Enabled>true|false</Enabled>
                <Days>number-of-days</Days>
            </RetentionPolicy>
        </MinuteMetrics>
        <Cors>
            <CorsRule>
                <AllowedOrigins>comma-separated-list-of-allowed-origins</AllowedOrigins>
                <AllowedMethods>comma-separated-list-of-HTTP-verb</AllowedMethods>
                <MaxAgeInSeconds>max-caching-age-in-seconds</MaxAgeInSeconds>
                <ExposedHeaders>comma-seperated-list-of-response-headers</ExposedHeaders>
                <AllowedHeaders>comma-seperated-list-of-request-headers</AllowedHeaders>
            </CorsRule>
        </Cors>
    </StorageServiceProperties>
    '''
    if response is None or response.body is None:
        return None

    service_properties_element = ETree.fromstring(response.body)
    service_properties = ServiceProperties()

    # Logging
    logging = service_properties_element.find('Logging')
    if logging is not None:
        service_properties.logging = Logging()
        service_properties.logging.version = logging.find('Version').text
        service_properties.logging.delete = _bool(logging.find('Delete').text)
        service_properties.logging.read = _bool(logging.find('Read').text)
        service_properties.logging.write = _bool(logging.find('Write').text)

        _convert_xml_to_retention_policy(
            logging.find('RetentionPolicy'),
            service_properties.logging.retention_policy)
    # HourMetrics
    hour_metrics_element = service_properties_element.find('HourMetrics')
    if hour_metrics_element is not None:
        service_properties.hour_metrics = Metrics()
        _convert_xml_to_metrics(hour_metrics_element,
                                service_properties.hour_metrics)

    # MinuteMetrics
    minute_metrics_element = service_properties_element.find('MinuteMetrics')
    if minute_metrics_element is not None:
        service_properties.minute_metrics = Metrics()
        _convert_xml_to_metrics(minute_metrics_element,
                                service_properties.minute_metrics)

    # CORS
    cors = service_properties_element.find('Cors')
    if cors is not None:
        service_properties.cors = list()
        for rule in cors.findall('CorsRule'):
            allowed_origins = rule.find('AllowedOrigins').text.split(',')

            allowed_methods = rule.find('AllowedMethods').text.split(',')

            max_age_in_seconds = int(rule.find('MaxAgeInSeconds').text)

            cors_rule = CorsRule(allowed_origins, allowed_methods,
                                 max_age_in_seconds)

            exposed_headers = rule.find('ExposedHeaders').text
            if exposed_headers is not None:
                cors_rule.exposed_headers = exposed_headers.split(',')

            allowed_headers = rule.find('AllowedHeaders').text
            if allowed_headers is not None:
                cors_rule.allowed_headers = allowed_headers.split(',')

            service_properties.cors.append(cors_rule)

    # Target version
    target_version = service_properties_element.find('DefaultServiceVersion')
    if target_version is not None:
        service_properties.target_version = target_version.text

    return service_properties
Example #43
0
def get_serial():
    try:
        data = dispatch(registration.Serial.rpc_serial())
        return registration.Serial.from_element(ET.fromstring(data.xml))
    except (RPCError, TimeoutExpiredError):
        return None
Example #44
0
def get_registration():
    try:
        data = dispatch(registration.RegNum.rpc_get())
        return registration.RegNum.from_element(ET.fromstring(data.xml))
    except (RPCError, TimeoutExpiredError):
        return None
Example #45
0
 def __init__(self, output):
     self.xml = ET.fromstring(output)
     self.issues = self.xml.findall("Issues/Issue")
Example #46
0
    def fetch(self, xml=None, sequence=None, **kwargs):
        """Get Blast record from url or file.

        :arg sequence: an object with an associated sequence string 
            or a sequence string itself
        :type sequence: :class:`.Atomic`, :class:`.Sequence`, str

        :arg xml: blast search results in XML format or an XML file that
            contains the results or a filename for saving the results or None
        :type xml: str

        :arg timeout: amount of time until the query times out in seconds
            default value is 120
        :type timeout: int
        """
        if self.isSuccess:
            LOGGER.warn("The record already exists so not further search is performed")
            return True
            
        if sequence is None:
            sequence = self._sequence

        if xml is None:
            xml = self._xml

        import xml.etree.cElementTree as ET
        have_xml = False
        filename = None
        if xml is not None:
            if len(xml) < 100:
                # xml likely contains a filename
                if os.path.isfile(xml):
                    # read the contents
                    try:
                        xml = ET.parse(xml)
                        root = xml.getroot()
                        have_xml = True
                    except:
                        raise ValueError('could not parse xml from xml file')
                else:
                    # xml contains a filename for writing
                    filename = xml
            else:
                try:
                    if isinstance(xml, list):
                        root = ET.fromstringlist(xml)
                    elif isinstance(xml, str):
                        root = ET.fromstring(xml)
                except:
                    raise ValueError('xml is not a filename and does not look like'
                                    ' a valid XML string')
                else:
                    have_xml = True

        if have_xml is False:
            # we still need to run a blast
            headers = {'User-agent': 'ProDy'}
            query = [('DATABASE', 'pdb'), ('ENTREZ_QUERY', '(none)'),
                    ('PROGRAM', 'blastp'),]

            expect = float(kwargs.pop('expect', 10e-10))
            if expect <= 0:
                raise ValueError('expect must be a positive number')
            query.append(('EXPECT', expect))
            hitlist_size = int(kwargs.pop('hitlist_size', 250))
            if hitlist_size <= 0:
                raise ValueError('expect must be a positive integer')
            query.append(('HITLIST_SIZE', hitlist_size))
            query.append(('QUERY', sequence))
            query.append(('CMD', 'Put'))

            sleep = float(kwargs.pop('sleep', 2))
            timeout = float(kwargs.pop('timeout', self._timeout))
            self._timeout = timeout

            try:
                import urllib.parse
                urlencode = lambda data: bytes(urllib.parse.urlencode(data), 'utf-8')
            except ImportError:
                from urllib import urlencode

            url = 'https://blast.ncbi.nlm.nih.gov/Blast.cgi'

            data = urlencode(query)
            LOGGER.timeit('_prody_blast')
            LOGGER.info('Blast searching NCBI PDB database for "{0}..."'
                        .format(sequence[:5]))
            handle = openURL(url, data=data, headers=headers)

            html = handle.read()
            index = html.find(b'RID =')
            if index == -1:
                raise Exception('NCBI did not return expected response.')
            else:
                last = html.find(b'\n', index)
                rid = html[index + len('RID ='):last].strip()

            query = [('ALIGNMENTS', 500), ('DESCRIPTIONS', 500),
                    ('FORMAT_TYPE', 'XML'), ('RID', rid), ('CMD', 'Get')]
            data = urlencode(query)

            while True:
                LOGGER.sleep(int(sleep), 'to reconnect to NCBI for search results.')
                LOGGER.write('Connecting to NCBI for search results...')
                handle = openURL(url, data=data, headers=headers)
                results = handle.read()
                index = results.find(b'Status=')
                LOGGER.clear()
                if index < 0:
                    break
                last = results.index(b'\n', index)
                status = results[index+len('Status='):last].strip()
                if status.upper() == b'READY':
                    break
                sleep = int(sleep * 1.5)
                if LOGGER.timing('_prody_blast') > timeout:
                    LOGGER.warn('Blast search time out.')
                    return False
            
            LOGGER.clear()
            LOGGER.report('Blast search completed in %.1fs.', '_prody_blast')

            root = ET.XML(results)
            try:
                ext_xml = filename.lower().endswith('.xml')
            except AttributeError:
                pass
            else:
                if not ext_xml:
                    filename += '.xml'
                out = open(filename, 'w')
                if PY3K:
                    out.write(results.decode())
                else:
                    out.write(results)
                out.close()
                LOGGER.info('Results are saved as {0}.'.format(repr(filename)))

        root = dictElement(root, 'BlastOutput_')

        if root['db'] != 'pdb':
            raise ValueError('blast search database in xml must be "pdb"')
        if root['program'] != 'blastp':
            raise ValueError('blast search program in xml must be "blastp"')
        self._param = dictElement(root['param'][0], 'Parameters_')

        query_len = int(root['query-len'])
        if sequence and len(sequence) != query_len:
            raise ValueError('query-len and the length of the sequence do not '
                            'match, xml data may not be for given sequence')
        hits = []
        for iteration in root['iterations']:
            for hit in dictElement(iteration, 'Iteration_')['hits']:
                hit = dictElement(hit, 'Hit_')
                data = dictElement(hit['hsps'][0], 'Hsp_')
                for key in ['align-len', 'gaps', 'hit-frame', 'hit-from',
                            'hit-to', 'identity', 'positive', 'query-frame',
                            'query-from', 'query-to']:
                    data[key] = int(data[key])
                data['query-len'] = query_len
                for key in ['evalue', 'bit-score', 'score']:
                    data[key] = float(data[key])
                p_identity = 100.0 * data['identity'] / (data['query-to'] -
                                                    data['query-from'] + 1)
                data['percent_identity'] = p_identity
                p_overlap = (100.0 * (data['align-len'] - data['gaps']) /
                            query_len)
                data['percent_coverage'] = p_overlap
                
                for item in (hit['id'] + hit['def']).split('>gi'):
                    head, title = item.split(None, 1)
                    head = head.split('|')
                    pdb_id = head[-2].lower()
                    chain_id = head[-1][:1]
                    pdbch = dict(data)
                    pdbch['pdb_id'] = pdb_id
                    pdbch['chain_id'] = chain_id
                    pdbch['title'] = (head[-1][1:] + title).strip()
                    hits.append((p_identity, p_overlap, pdbch))
        hits.sort(key=lambda hit: hit[0], reverse=True)
        self._hits = hits
        
        return True
Example #47
0
def doit(args):

    fields = [
        "copyright",
        "openTypeNameDescription",
        "openTypeNameDesigner",
        "openTypeNameDesignerURL",
        "openTypeNameLicense",  # General feilds
        "openTypeNameLicenseURL",
        "openTypeNameManufacturer",
        "openTypeNameManufacturerURL",
        "openTypeOS2CodePageRanges",
        "openTypeOS2UnicodeRanges",
        "openTypeOS2VendorID",
        "trademark",
        "year",
        "openTypeNameVersion",
        "versionMajor",
        "versionMinor",  # Version fields
        "ascender",
        "descender",
        "openTypeHheaAscender",
        "openTypeHheaDescender",
        "openTypeHheaLineGap",  # Design fields
        "openTypeOS2TypoAscender",
        "openTypeOS2TypoDescender",
        "openTypeOS2TypoLineGap",
        "openTypeOS2WinAscent",
        "openTypeOS2WinDescent"
    ]

    fromfont = args.fromfont
    tofont = args.tofont
    logger = args.logger
    reportonly = args.reportonly

    ffi = fromfont.fontinfo
    tfi = tofont.fontinfo
    updatemessage = " to be updated: " if reportonly else " updated: "
    precision = fromfont.paramset["precision"]
    # Increase screen logging level to W unless specific level supplied on command-line
    if not (args.quiet or "scrlevel" in args.paramsobj.sets["command line"]):
        logger.scrlevel = "W"

    updated = False
    for field in fields:
        if field in ffi:
            felem = ffi[field][1]
            ftag = felem.tag
            ftext = felem.text
            if ftag is 'real': ftext = processnum(ftext, precision)
            message = field + updatemessage

            if field in tfi:  # Need to compare values to see if update is needed
                telem = tfi[field][1]
                ttag = telem.tag
                ttext = telem.text
                if ttag is 'real': ttext = processnum(ttext, precision)

                if ftag in ("real", "integer", "string"):
                    if ftext != ttext:
                        if field == "openTypeNameLicense":  # Too long to display all
                            addmess = " Old: '" + ttext[
                                0:80] + "...' New: '" + ftext[0:80] + "...'"
                        else:
                            addmess = " Old: '" + ttext + "' New: '" + str(
                                ftext) + "'"
                        telem.text = ftext
                        logger.log(message + addmess, "W")
                        updated = True
                elif ftag in ("true, false"):
                    if ftag != ttag:
                        fti.setelem(field, ET.fromstring("<" + ftag + "/>"))
                        logger.log(
                            message + " Old: '" + ttag + "' New: '" +
                            str(ftag) + "'", "W")
                        updated = True
                elif ftag == "array":  # Assume simple array with just values to compare
                    farray = []
                    for subelem in felem:
                        farray.append(subelem.text)
                    tarray = []
                    for subelem in telem:
                        tarray.append(subelem.text)
                    if farray != tarray:
                        tfi.setelem(field, ET.fromstring(ET.tostring(felem)))
                        logger.log(
                            message + "Some values different Old: " +
                            str(tarray) + " New: " + str(farray), "W")
                        updated = True
                else:
                    logger.log(
                        "Non-standard fontinfo field type: " + ftag + " in " +
                        fontname, "X")
            else:
                tfi.addelem(field, ET.fromstring(ET.tostring(felem)))
                logger.log(
                    message +
                    "is missing from destination font so will be copied from source font",
                    "W")
                updated = True
        else:  # Field not in from font
            if field in tfi:
                logger.log(
                    field +
                    " is missing from source font but present in destination font",
                    "E")
            else:
                logger.log(field + " is in neither font", "W")

    # Now update on disk
    if not reportonly and updated:
        logger.log("Writing updated fontinfo.plist", "P")
        UFO.writeXMLobject(tfi,
                           tofont,
                           tofont.ufodir,
                           "fontinfo.plist",
                           True,
                           fobject=True)

    return
Example #48
0
            "arg2": "background",
            "arg3": ""}
        params = urllib.urlencode(params)
        updateUrl = UPDATE_URL % {"host": host, "params": params}

        try:
            req = urllib2.Request(updateUrl)
            logger.log(u"Sending NMJ scan update command via url: %s" % (updateUrl), logger.DEBUG)
            handle = urllib2.urlopen(req)
            response = handle.read()
        except IOError, e:
            logger.log(u"Warning: Couldn't contact Popcorn Hour on host %s: %s" % (host, e))
            return False

        try:
            et = etree.fromstring(response)
            result = et.findtext("returnValue")
        except SyntaxError, e:
            logger.log(u"Unable to parse XML returned from the Popcorn Hour: %s" % (e), logger.ERROR)
            return False
        
        if int(result) > 0:
            logger.log(u"Popcorn Hour returned an errorcode: %s" % (result))
            return False
        else:
            logger.log(u"NMJ started background scan")
            return True

    def _notifyNMJ(self, host=None, database=None, mount=None, force=False):
        if not sickbeard.USE_NMJ and not force:
            logger.log("Notification for NMJ scan update not enabled, skipping this notification", logger.DEBUG)
Example #49
0
def closestServers(client, all=False):
    """Determine the 5 closest speedtest.net servers based on geographic
    distance
    """

    urls = [
        '://www.speedtest.net/speedtest-servers-static.php',
        '://c.speedtest.net/speedtest-servers-static.php',
        '://www.speedtest.net/speedtest-servers.php',
        '://c.speedtest.net/speedtest-servers.php',
    ]
    errors = []
    servers = {}
    for url in urls:
        try:
            request = build_request(url)
            uh, e = catch_request(request)
            if e:
                errors.append('%s' % e)
                raise SpeedtestCliServerListError
            serversxml = []
            while 1:
                serversxml.append(uh.read(10240))
                if len(serversxml[-1]) == 0:
                    break
            if int(uh.code) != 200:
                uh.close()
                raise SpeedtestCliServerListError
            uh.close()
            try:
                try:
                    root = ET.fromstring(''.encode().join(serversxml))
                    elements = root.getiterator('server')
                except AttributeError:  # Python3 branch
                    root = DOM.parseString(''.join(serversxml))
                    elements = root.getElementsByTagName('server')
            except SyntaxError:
                raise SpeedtestCliServerListError
            for server in elements:
                try:
                    attrib = server.attrib
                except AttributeError:
                    attrib = dict(list(server.attributes.items()))
                d = distance(
                    [float(client['lat']),
                     float(client['lon'])],
                    [float(attrib.get('lat')),
                     float(attrib.get('lon'))])
                attrib['d'] = d
                if d not in servers:
                    servers[d] = [attrib]
                else:
                    servers[d].append(attrib)
            del root
            del serversxml
            del elements
        except SpeedtestCliServerListError:
            continue

        # We were able to fetch and parse the list of speedtest.net servers
        if servers:
            break

    if not servers:
        print_('Failed to retrieve list of speedtest.net servers:\n\n %s' %
               '\n'.join(errors))
        sys.exit(1)

    closest = []
    for d in sorted(servers.keys()):
        for s in servers[d]:
            closest.append(s)
            if len(closest) == 5 and not all:
                break
        else:
            continue
        break

    del servers
    return closest
Example #50
0
    def retrieve(self, proxies=None):
        '''
        Trying to fill the BioPAX and SBGN-PD data for certain pathway.

        :param proxies: if not None, proxies used by while requesting data.
        :return: None, but fill self's pathway's json file and SBGN if it is empty
        '''
        # print(self.id)
        query = []
        error = Queue()
        query.append(
            MultiThreadNetworkRequest(
                "http://www.reactome.org/ReactomeRESTfulAPI/RESTfulWS/sbgnExporter/{}"
                .format(
                    # re.findall(r"\d+", self.id.split("/")[-1])[0]
                    self.db_id),
                NetworkMethod.GET,
                self,
                "SBGN",
                proxy=proxies,
                error_queue=error))
        # print('http://reactome.org/ReactomeRESTfulAPI/RESTfulWS/pathwayHierarchy/{}'.format(
        #         self.species.name.replace(' ', '+')))
        query.append(
            MultiThreadNetworkRequest(
                'http://reactome.org/ReactomeRESTfulAPI/RESTfulWS/pathwayHierarchy/{}'
                .format(self.species.name.replace(' ', '+')),
                NetworkMethod.GET,
                self,
                'tree',
                proxy=proxies,
                error_queue=error))
        self.threads.extend(query)
        try:
            [x.start() for x in self.threads]
            [x.join() for x in self.threads]
        except Exception as e:
            # print("catched!")
            self.threads = []
            raise e
        finally:
            self.threads = []
            if self.tree:
                self.tree = ET.ElementTree(ET.fromstring(self.tree))
                tgt = None
                r = self.tree.getroot()
                for t in r.iter():
                    if t.attrib.get('dbId') == self.db_id:
                        tgt = t
                        # print(t.tag)
                while not tgt.attrib.get('hasDiagram'):
                    # get parent
                    for t in r.iter():
                        if tgt in t.getchildren():
                            tgt = t
                            break
                # print(tgt.attrib)
                if not self.species.name == 'H**o sapiens':
                    # need one more step:
                    r = NetworkRequest(
                        'http://www.reactome.org/ReactomeRESTfulAPI/RESTfulWS/detailedView/DatabaseObject/{}'
                        .format(tgt.attrib['dbId']),
                        NetworkMethod.GET,
                        proxy=proxies)
                    jd = json.loads(r.text)
                    # print(jd['stableIdentifier']['displayName'])
                    db_id = jd['stableIdentifier']['displayName'].split('.')[0]
                else:
                    db_id = 'R-HSA-{}'.format(tgt.attrib['dbId'])
                # print(db_id)
                res = NetworkRequest(
                    'http://www.reactome.org/download/current/diagram/{}.json?v={}'
                    .format(db_id, str(round(time.time()))),
                    NetworkMethod.GET,
                    proxy=proxies)
                # print(res.text)
                self.json_data = json.loads(res.text)
            if self.data:
                self.data = self.data.encode("utf-8")
        while not error.empty():
            er = error.get()
            if er:
                raise NetworkException(er[0], er[1])
Example #51
0
    def get_servers(self, servers=None):
        """Retrieve a the list of speedtest.net servers, optionally filtered
        to servers matching those specified in the ``servers`` argument
        """
        if servers is None:
            servers = []

        self.servers.clear()

        for i, s in enumerate(servers):
            try:
                servers[i] = int(s)
            except ValueError:
                raise InvalidServerIDType('%s is an invalid server type, must '
                                          'be int' % s)

        urls = [
            '://www.speedtest.net/speedtest-servers-static.php',
            'http://c.speedtest.net/speedtest-servers-static.php',
            '://www.speedtest.net/speedtest-servers.php',
            'http://c.speedtest.net/speedtest-servers.php',
        ]

        headers = {}
        if gzip:
            headers['Accept-Encoding'] = 'gzip'

        errors = []
        for url in urls:
            try:
                request = build_request(
                    '%s?threads=%s' %
                    (url, self.config['threads']['download']),
                    headers=headers)
                uh, e = catch_request(request)
                if e:
                    errors.append('%s' % e)
                    raise ServersRetrievalError()

                stream = get_response_stream(uh)

                serversxml = []
                while 1:
                    serversxml.append(stream.read(1024))
                    if len(serversxml[-1]) == 0:
                        break

                stream.close()
                uh.close()

                if int(uh.code) != 200:
                    raise ServersRetrievalError()

                printer(''.encode().join(serversxml), debug=True)

                try:
                    try:
                        root = ET.fromstring(''.encode().join(serversxml))
                        elements = root.getiterator('server')
                    except AttributeError:
                        root = DOM.parseString(''.join(serversxml))
                        elements = root.getElementsByTagName('server')
                except (SyntaxError, xml.parsers.expat.ExpatError):
                    raise ServersRetrievalError()

                for server in elements:
                    try:
                        attrib = server.attrib
                    except AttributeError:
                        attrib = dict(list(server.attributes.items()))

                    if servers and int(attrib.get('id')) not in servers:
                        continue

                    if int(attrib.get('id')) in self.config['ignore_servers']:
                        continue

                    try:
                        d = distance(self.lat_lon, (float(
                            attrib.get('lat')), float(attrib.get('lon'))))
                    except:
                        continue

                    attrib['d'] = d

                    try:
                        self.servers[d].append(attrib)
                    except KeyError:
                        self.servers[d] = [attrib]

                printer(''.encode().join(serversxml), debug=True)

                break

            except ServersRetrievalError:
                continue

        if servers and not self.servers:
            raise NoMatchedServers()

        return self.servers
Example #52
0
 def read_revision_from_string(self, xml_string):
     return self._unpack_revision(fromstring(xml_string))
Example #53
0
import subprocess
import sys
import xml.etree.cElementTree as etree

import libvirt

logging.basicConfig(format='%(levelname)s:%(funcName)s:%(message)s',
                    level=logging.INFO)

parser = etree.XMLParser()

conn = libvirt.openReadOnly(None)
if conn is None:
    logging.error('Failed to open connection to the hypervisor')
    sys.exit(1)

domain = conn.lookupByName(sys.argv[1])
desc = etree.fromstring(domain.XMLDesc(0))
macAddr = desc.find("devices/interface[@type='network']/mac"
                    ).attrib["address"].lower().strip()
logging.debug("XMLDesc = %s", macAddr)

output = subprocess.Popen(["arp", "-n"],
                          stdout=subprocess.PIPE).communicate()[0]
lines = [line.split() for line in output.split("\n")[1:]]
logging.debug(lines)

IPaddr = [line[0] for line in lines if line and (line[2] == macAddr)]
if IPaddr:
    print(IPaddr[0])
Example #54
0
    def get_config(self):
        """Download the speedtest.net configuration and return only the data
        we are interested in
        """

        headers = {}
        if gzip:
            headers['Accept-Encoding'] = 'gzip'
        request = build_request('://www.speedtest.net/speedtest-config.php',
                                headers=headers)
        uh, e = catch_request(request)
        if e:
            raise ConfigRetrievalError(e)
        configxml = []

        stream = get_response_stream(uh)

        while 1:
            configxml.append(stream.read(1024))
            if len(configxml[-1]) == 0:
                break
        stream.close()
        uh.close()

        if int(uh.code) != 200:
            return None

        printer(''.encode().join(configxml), debug=True)

        try:
            root = ET.fromstring(''.encode().join(configxml))
            server_config = root.find('server-config').attrib
            download = root.find('download').attrib
            upload = root.find('upload').attrib
            # times = root.find('times').attrib
            client = root.find('client').attrib

        except AttributeError:
            root = DOM.parseString(''.join(configxml))
            server_config = get_attributes_by_tag_name(root, 'server-config')
            download = get_attributes_by_tag_name(root, 'download')
            upload = get_attributes_by_tag_name(root, 'upload')
            # times = get_attributes_by_tag_name(root, 'times')
            client = get_attributes_by_tag_name(root, 'client')

        ignore_servers = list(map(int, server_config['ignoreids'].split(',')))

        ratio = int(upload['ratio'])
        upload_max = int(upload['maxchunkcount'])
        up_sizes = [32768, 65536, 131072, 262144, 524288, 1048576, 7340032]
        sizes = {
            'upload': up_sizes[ratio - 1:],
            'download':
            [350, 500, 750, 1000, 1500, 2000, 2500, 3000, 3500, 4000]
        }

        size_count = len(sizes['upload'])

        upload_count = int(math.ceil(upload_max / size_count))

        counts = {
            'upload': upload_count,
            'download': int(download['threadsperurl'])
        }

        threads = {
            'upload': int(upload['threads']),
            'download': int(server_config['threadcount']) * 2
        }

        length = {
            'upload': int(upload['testlength']),
            'download': int(download['testlength'])
        }

        self.config.update({
            'client': client,
            'ignore_servers': ignore_servers,
            'sizes': sizes,
            'counts': counts,
            'threads': threads,
            'length': length,
            'upload_max': upload_count * size_count
        })

        self.lat_lon = (float(client['lat']), float(client['lon']))

        printer(self.config, debug=True)

        return self.config
Example #55
0
    def read_block(self, stream, tagspec=None, elt_handler=None):
        """
        Read from ``stream`` until we find at least one element that
        matches ``tagspec``, and return the result of applying
        ``elt_handler`` to each element found.
        """
        if tagspec is None:
            tagspec = self._tagspec
        if elt_handler is None:
            elt_handler = self.handle_elt

        # Use a stack of strings to keep track of our context:
        context = list(self._tag_context.get(stream.tell()))
        assert context is not None  # check this -- could it ever happen?

        elts = []

        elt_start = None  # where does the elt start
        elt_depth = None  # what context depth
        elt_text = ""

        while elts == [] or elt_start is not None:
            if isinstance(stream, SeekableUnicodeStreamReader):
                startpos = stream.tell()
            xml_fragment = self._read_xml_fragment(stream)

            # End of file.
            if not xml_fragment:
                if elt_start is None:
                    break
                else:
                    raise ValueError("Unexpected end of file")

            # Process each <tag> in the xml fragment.
            for piece in self._XML_PIECE.finditer(xml_fragment):
                if self._DEBUG:
                    print("%25s %s" % ("/".join(context)[-20:], piece.group()))

                if piece.group("START_TAG"):
                    name = self._XML_TAG_NAME.match(piece.group()).group(1)
                    # Keep context up-to-date.
                    context.append(name)
                    # Is this one of the elts we're looking for?
                    if elt_start is None:
                        if re.match(tagspec, "/".join(context)):
                            elt_start = piece.start()
                            elt_depth = len(context)

                elif piece.group("END_TAG"):
                    name = self._XML_TAG_NAME.match(piece.group()).group(1)
                    # sanity checks:
                    if not context:
                        raise ValueError("Unmatched tag </%s>" % name)
                    if name != context[-1]:
                        raise ValueError("Unmatched tag <%s>...</%s>" %
                                         (context[-1], name))
                    # Is this the end of an element?
                    if elt_start is not None and elt_depth == len(context):
                        elt_text += xml_fragment[elt_start:piece.end()]
                        elts.append((elt_text, "/".join(context)))
                        elt_start = elt_depth = None
                        elt_text = ""
                    # Keep context up-to-date
                    context.pop()

                elif piece.group("EMPTY_ELT_TAG"):
                    name = self._XML_TAG_NAME.match(piece.group()).group(1)
                    if elt_start is None:
                        if re.match(tagspec, "/".join(context) + "/" + name):
                            elts.append((piece.group(),
                                         "/".join(context) + "/" + name))

            if elt_start is not None:
                # If we haven't found any elements yet, then keep
                # looping until we do.
                if elts == []:
                    elt_text += xml_fragment[elt_start:]
                    elt_start = 0

                # If we've found at least one element, then try
                # backtracking to the start of the element that we're
                # inside of.
                else:
                    # take back the last start-tag, and return what
                    # we've gotten so far (elts is non-empty).
                    if self._DEBUG:
                        print(" " * 36 + "(backtrack)")
                    if isinstance(stream, SeekableUnicodeStreamReader):
                        stream.seek(startpos)
                        stream.char_seek_forward(elt_start)
                    else:
                        stream.seek(-(len(xml_fragment) - elt_start), 1)
                    context = context[:elt_depth - 1]
                    elt_start = elt_depth = None
                    elt_text = ""

        # Update the _tag_context dict.
        pos = stream.tell()
        if pos in self._tag_context:
            assert tuple(context) == self._tag_context[pos]
        else:
            self._tag_context[pos] = tuple(context)

        return [
            elt_handler(
                ElementTree.fromstring(elt.encode("ascii",
                                                  "xmlcharrefreplace")),
                context,
            ) for (elt, context) in elts
        ]
Example #56
0
 def as_xml(self):
     return ElementTree.fromstring(self.serialize())
Example #57
0
def result(request):
    
    query=""
    if request.method == 'GET':
        # create a form instance and populate it with data from the request:
        form = SearchForm(request.GET)
        # check whether it's valid:
        if form.is_valid():
            # process the data in form.cleaned_data as required
            # ...
            buffer = BytesIO()
            result_list=[]
            query=form.cleaned_data['search_query']
            query=query.replace(" ","+")
             
            c = pycurl.Curl()
            def_url="http://*****:*****@45.79.170.4:8983/solr/courselibrary/select?hl.fl=content&hl=on&indent=on&wt=xml&q="
            calling_url=def_url+query
            c.setopt(c.URL,calling_url)
            c.setopt(c.WRITEDATA, buffer)
            c.perform()
            body = buffer.getvalue()
            xmlstr = body.decode("iso-8859-1")
           
            #for fetching content and id 
            root = ET.fromstring(xmlstr)
            result=root.findall("result")
            documents=result[0].findall("doc")
            for i in documents:
                result_object=result1()
                string=i.findall("str")
                for j in string:
                    name=j.get("name")
                    if name=="content":
                        result_object.content=j.text
                        print(result_object.content)
                    if name=="id":
                        result_object.id=j.text
                        print(result_object.id)
                    if name=="file123":
                        result_object.name=j.text
                        print(result_object.name)
                result_list.append(result_object)
                
            #for fetching highlighting snippet
            result123=root.findall("lst")
            for k in result123:
                highlight_element=k.get("name")
                if highlight_element=="highlighting":
                    list_unhigh=k.findall("lst")
                    for l in list_unhigh:
                        for obj in result_list:
                            if obj.id==l.get("name"):
                                
                                attr=l.findall("arr")
                                
                                for m in attr:
                                    highlighting_strelement=m.findall("str")
                                    for n in highlighting_strelement:
                                        obj.snippet=n.text
                                        (obj.snippet)
            
            display_list=[]
            for w in result_list:
                w.id=w.id
                w.id=w.id.replace("/var/www/coursewiki/search/media/"," ")
                w.id=w.id.strip(" ")
                w.id=w.id.strip(".txt")
                #w.id=w.id.replace(" ","%20")
                
                display_list.append(w)
            query=query.replace("+"," ")    
            context={'query_result':display_list,
                     'url_called':calling_url,'query':query}
            # redirect to a new URL:
            template=loader.get_template('search/result.html')
            return HttpResponse(template.render(context, request))
    # if a -GET- (or any other method) we'll create a blank form
    else:
        form = SearchForm()
        template = loader.get_template('search/index.html')
        context={'form': form}
    return HttpResponseRedirect('/search')
Example #58
0
logger.addHandler(console_log)
logger.addHandler(file_log)

ar = parser.parse_args()

logger.info('=' * 40)
logger.info('Start process ' + str(datetime.datetime.utcnow()))

# Open input XML-file (if exist)
if os.path.exists(ar.xml):
    with open(ar.xml, 'r') as inpfile:
        # read content of xml file as text
        rawcontent = inpfile.read()
        try:
            # Convert text content to XML tree
            xml = etree.fromstring(rawcontent)
            # Get attribute 'run'  from iperf run="iperf">
            baserun = xml.get('run', None)
            # If attribute doesn't find, exit from script
            if not baserun:
                logger.ERROR("Parameter 'run' doesn't set ")
                raise SystemExit

            # Get attribute 'timeout'  from iperf timeout="50">
            timeout = xml.get('timeout', 50)
            # If attribute doesn't find, exit from script
            if not timeout:
                logger.ERROR("Parameter 'timeout' doesn't set ")
                raise SystemExit

            # Justify xml for writing to log-file (add indents , etc ..)
Example #59
0
    # sign in, get MyPlex response
    try:
        response = urlopener.open(request).read()
    except urllib2.HTTPError, e:
        if e.code==401:
            dprint(__name__, 0, 'Authentication failed')
            return ('', '')
        else:
            raise
    
    dprint(__name__, 1, "====== MyPlex sign in XML ======")
    dprint(__name__, 1, response)
    dprint(__name__, 1, "====== MyPlex sign in XML finished ======")
    
    # analyse response
    XMLTree = etree.ElementTree(etree.fromstring(response))
    
    el_username = XMLTree.find('username')
    el_authtoken = XMLTree.find('authentication-token')    
    if el_username is None or \
       el_authtoken is None:
        username = ''
        authtoken = ''
        dprint(__name__, 0, 'MyPlex Sign In failed')
    else:
        username = el_username.text
        authtoken = el_authtoken.text
        dprint(__name__, 0, 'MyPlex Sign In successfull')
    
    return (username, authtoken)
Example #60
0
#!/usr/bin/env python

import xml.etree.cElementTree as ET
import sys, time

ctr = 1
delay = 1
while (ctr < 5):
    qos = '{http://www.cisco.com/nxos:1.0:qosmgr}'
    inraw = cli(
        'sh policy-map interface ethernet 4/23 input type queuing | xml | exclude "]]>]]>"'
    )
    tree = ET.ElementTree(ET.fromstring(inraw))
    root = tree.getroot()
    print "-", "Iteration %i" % (ctr), "-" * 55
    for i in root.iter(qos + 'ROW_cmap'):
        try:
            if i.find(qos + 'cmap-key').text == "2q4t-8e-in-q-default":
                drop_pkts = int(i.find(qos + 'que-dropped-pkts').text)
                print "Dropped Packets = ", drop_pkts
        except AttributeError:
            pass
    ctr += 1
    time.sleep(delay)