def __init__(self, **kwargs):
     self._issue_date_utc = None
     self._expiration_date_utc = None
     self.is_trial = "false"
     if "xml_node" in kwargs:
         xml_node = kwargs["xml_node"]
     else:
         xml_node = XML(kwargs["xml"])
     xml_attrs = xml_node.attrib
     attrs = {
         "id": "licenseId",
         "user_id": "userId",
         "payment_plan_id": "paymentPlanId",
         "issue_date_utc": "issueDateUTC",
         "expiration_date_utc": "expirationDateUTC",
         "is_trial": "is_trial",
     }
     for k, v in attrs.items():
         if v in xml_attrs:
             setattr(self, k, xml_attrs[v])
         else:
             setattr(self, k, None)
     features = []
     for node in xml_node.findall(".//feature"):
         feature = Feature(xml_node=node)
         features.append(feature)
     self.features = features
     super().__init__(**kwargs)
Exemple #2
0
def load(text, match=None):
    """This function reads a string that contains the XML of an Atom Feed, then 
    returns the 
    data in a native Python structure (a ``dict`` or ``list``). If you also 
    provide a tag name or path to match, only the matching sub-elements are 
    loaded.

    :param text: The XML text to load.
    :type text: ``string``
    :param match: A tag name or path to match (optional).
    :type match: ``string``
    """
    if text is None: return None
    text = text.strip()
    if len(text) == 0: return None
    nametable = {
        'namespaces': [],
        'names': {}
    }
    root = XML(text)
    items = [root] if match is None else root.findall(match)
    count = len(items)
    if count == 0: 
        return None
    elif count == 1: 
        return load_root(items[0], nametable)
    else:
        return [load_root(item, nametable) for item in items]
    def getNotifications(self, rurl):
        """
        Get a list of L{Notification} objects for the specified notification collection.

        @param rurl: a user's notification collection URL
        @type rurl: L{URL}
        """

        assert(isinstance(rurl, URL))

        # List all children of the notification collection
        results = self.getPropertiesOnHierarchy(rurl, (davxml.getcontenttype,))
        items = results.keys()
        items.sort()
        notifications = []
        for path in items:
            path = urllib.unquote(path)
            nurl = URL(url=path)
            if rurl == nurl:
                continue
            props = results[path]
            if props.get(davxml.getcontenttype, "none").split(";")[0] in ("text/xml", "application/xml"):
                data, _ignore_etag = self.readData(URL(url=path))
                node = XML(data)
                if node.tag == str(csxml.notification):
                    for child in node.getchildren():
                        if child.tag == str(csxml.invite_notification):
                            notifications.append(InviteNotification().parseFromNotification(nurl, child))
                        elif child.tag == str(csxml.invite_reply):
                            notifications.append(InviteReply().parseFromNotification(nurl, child))

        return notifications
Exemple #4
0
    def updatequotewithparameters(self, quote_id,ExternalReference,Grossvalue,netvalue,postingDate,RefDate,SoldToParty,SoldToPartyAdd,Status,TaxAmt,ValidFrm,ValidTo):
        logging.info("SAP is sending quote with more parameters")
        logging.info(locals())
        logging.info("CONNECTING TO SALESFORCE PARTNER WSDL FOR SESSION ID")
        url = "https://login.salesforce.com/services/Soap/u/28.0"

        data = """<?xml version="1.0" encoding="UTF-8"?>
                <soapenv:Envelope xmlns:soapenv="http://schemas.xmlsoap.org/soap/envelope/" xmlns:urn="urn:partner.soap.sforce.com">
                   <soapenv:Header>
                        <urn:CallOptions>
                            <urn:client></urn:client>
                            <urn:defaultNamespace></urn:defaultNamespace>
                        </urn:CallOptions>
                        <urn:LoginScopeHeader>
                            <urn:organizationId></urn:organizationId>
                            <urn:portalId></urn:portalId>
                        </urn:LoginScopeHeader>
                   </soapenv:Header>
                   <soapenv:Body>
                      <urn:login>
                          <urn:username>{{username}}</urn:username>
                          <urn:password>{{password}}</urn:password>
                      </urn:login>
                   </soapenv:Body>
                </soapenv:Envelope>"""
        t = Template(data)
        c = Context({
#            "username": "******",
#            "password": "******"
#            "username": "******",
#            "password": "******"
            "username": "******",
            "password": "******"
        })
        data = t.render(c)

        logging.info("SENDING:")
        logging.info(data)

        headers = {
            'Content-Type': 'text/xml; charset=utf-8',
            'SOAPAction' : 'https://login.salesforce.com/services/Soap/u/28.0'
        }
#        httplib2.debuglevel = 1 
        
        head = httplib2.Http()
    #    head.follow_all_redirects = True
        response, content = head.request(url, "POST", smart_str(data), headers)
        logging.info("########### SESSION ID response ###############%s"%response)
        logging.info("########## SESSION ID content ############## \n %s"%pretty(content))
        if response.get('status') == '200':
            logging.info("GOT THE SESSION ID FROM SALESFORCE")
            xml = XML(content)
            session_response=xml.find("{http://schemas.xmlsoap.org/soap/envelope/}Body").getchildren()[0]
            session_id = session_response[0][4].text
            quote_id_to_sf(session_id,quote_id,ExternalReference,Grossvalue,netvalue,postingDate,RefDate,SoldToParty,SoldToPartyAdd,Status,TaxAmt,ValidFrm,ValidTo)
        else:
            return content

        return "OK"
	def findSha1Artifacts(self, strFileName, strGroupID, strArtifactID):
		atVersions = []

		# Generate the SHA1 sum for the file.
		strFileSha1 = self.generate_sha1_from_file(strFileName)

		strPath = self.strUrlLuceneSearchSha1 % strFileSha1
		aucContent = self.tRestDriver.get(self.tServerBaseUrl, strPath)
		tSearchResult = XML(aucContent)

		# The search result must be complete.
		if tSearchResult.findtext('tooManyResults')!='false':
			raise Exception("Received a truncated search result!")
	
		# Loop over all results.
		for tNode in tSearchResult.findall('data/artifact'):
			strG = tNode.findtext('groupId')
			strA = tNode.findtext('artifactId')
			strVersion = tNode.findtext('version')

			if isinstance(strG, basestring)==True and isinstance(strA, basestring)==True and isinstance(strVersion, basestring)==True:
				strG = strG.strip()
				strA = strA.strip()
				strVersion = strVersion.strip()
				if strGroupID==strG and strArtifactID==strA:
					if strVersion=='SNAPSHOT':
						tVersion = deploy_version.version(0, 0, 0)
					else:
						tVersion = deploy_version.version(strVersion)
					atVersions.append(tVersion)

		atVersions.sort()

		return atVersions
    def __init__(self, host='localhost', port=2812, username=None, password='', https=False):

        if not port:
            port = 2812

        port = int(port)
        self.baseurl = (https and 'https://%s:%i' or 'http://%s:%i') % (host, port)
        url = self.baseurl + '/_status?format=xml'

        req = urllib2.Request(url)

        if username:
            base64string = base64.encodestring('%s:%s' % (username, password))[:-1]
            authheader = "Basic %s" % base64string
            req.add_header("Authorization", authheader)

        try:
            handle = urllib2.urlopen(req)
        except urllib2.URLError as e:
            raise Exception(e.reason)

        try:
            response = handle.read()
        except:
            raise Exception("Error while reading")

        try:
            from xml.etree.ElementTree import XML
            root = XML(response)
        except:
            raise Exception("Error while converting to XML")

        for serv_el in root.iter('service'):
            serv = MonitConn.Service(self, serv_el)
            self[serv.name] = serv
Exemple #7
0
    def __init__(self, args):

        self.args = args
        url = opener.open(args.build+"api/xml")
        try:
            log.debug('Fetching xml from %s code:%d', url.url, url.code)
            if url.code != 200:
                log.error('Failed to get CI XML from %s (code %d)',
                          url.url, url.code)
                raise Stop(20, 'Job lookup failed, is the job name correct?')
            ci_xml = url.read()
        finally:
            url.close()

        root = XML(ci_xml)

        artifacts = root.findall("./artifact")
        base_url = args.build+"artifact/"
        if len(artifacts) <= 0:
            raise AttributeError(
                "No artifacts, please check build on the CI server.")

        patterns = self.get_artifacts_list()
        for artifact in artifacts:
            filename = artifact.find("fileName").text

            for key, value in patterns.iteritems():
                if re.compile(value).match(filename):
                    rel_path = base_url + artifact.find("relativePath").text
                    setattr(self, key, rel_path)
                    pass
Exemple #8
0
 def _dispatchSoapRequest(self, request):
     try:
         try:
             envelope = XML(request.soap_data)
             body = envelope.find("{http://schemas.xmlsoap.org/soap/envelope/}Body")
             # determine UPnP action
             action = body.find("{%s}%s" % (request.soap_ns, request.soap_action))
             # look up the action in the service
             upnp_action = self.service._actions[request.soap_action]
             # build a list of the action arguments
             in_args = {}
             for arg in action:
                 in_args[arg.tag] = arg.text
             # execute the UPnP action
             logger.log_debug("executing %s#%s" % (self.service.serviceID, request.soap_action))
             out_args = upnp_action(request, self.service, in_args)
             # return the action response
             env = Element("s:Envelope")
             env.attrib['xmlns:s'] = "http://schemas.xmlsoap.org/soap/envelope/"
             env.attrib['s:encodingStyle'] = "http://schemas.xmlsoap.org/soap/encoding/"
             env.attrib['xmlns:i'] = "http://www.w3.org/1999/XMLSchema-instance"
             body = SubElement(env, "s:Body")
             resp = SubElement(body, "u:%sResponse" % request.soap_action)
             resp.attrib['xmlns:u'] = request.soap_ns
             for (name,type,value) in out_args:
                 arg = SubElement(resp, name)
                 arg.attrib["i:type"] = type
                 arg.text = value
             output = xmlprint(env)
             return HttpResponse(200, headers={'EXT': ''}, stream=output)
         except UPNPError, e:
             raise e
         except Exception, e:
             logger.log_error("caught unhandled exception: %s" % e)
             raise UPNPError(500, "Internal server error")
Exemple #9
0
    def fields(self, year, flat=False):

        data = {}

        fields_url = DEFINITIONS[self.dataset].get(str(year))

        if not fields_url:
            raise CensusException('%s is not available for %s' % (self.dataset, year))

        resp = requests.get(fields_url)
        doc = XML(resp.text)

        if flat:

            for elem in doc.iter('variable'):
                data[elem.attrib['name']] = "%s: %s" % (elem.attrib['concept'], elem.text)

        else:

            for concept_elem in doc.iter('concept'):

                concept = concept_elem.attrib['name']
                variables = {}

                for variable_elem in concept_elem.iter('variable'):
                    variables[variable_elem.attrib['name']] = variable_elem.text

                data[concept] = variables

        return data
Exemple #10
0
def _request(method, data=None):
    """Make the raw request to the Prowl API."""
    
    # Catch the errors and treat them just like the normal response.
    try:
        res = urlopen(API_URL_BASE + method, urlencode(data) if data else None)
    except HTTPError as res:
        pass
    
    xml = XML(res.read())
    if xml.tag != 'prowl':
        raise Error('malformed response: unexpected tag %r' % xml.tag)
    children = xml.getchildren()
    if len(children) != 1:
        raise Error('malformed response: too many children')
    node = children[0]
    status, data, text = node.tag, node.attrib, node.text
    
    if status not in ('success', 'error'):
        raise Error('malformed response: unknown status %r' % node.tag)
        
    if 'code' not in node.attrib:
        raise Error('malformed response: no response code')
    
    if status == 'error' and not text:
        raise Error('malformed response: no error message with code %d' % data['code'])
    
    data = dict((k, int(v)) for k, v in data.items())
    _last_meta_data.update(data)
    
    return status, data, text
def pmc_article_search():
    id_list = []
    search_string = '((neuron electrophysiology) OR (neurophysiology) OR ("input resistance") OR ("resting potential" OR "resting membrane potential") OR "LTP" OR "synaptic plasticity" OR "LTD")'
    search_string_quoted = quote(search_string, safeChars)
    #print search_string_quoted
    retstart = 0
    retmax = 20
    link = esearch % (search_string_quoted, retstart, retmax)
    req = Request(link)
    handle = urlopen(req)
    data = handle.read()
    xml = XML(data)    
    num_found_articles = 20#int(xml.find('.//Count').text)
    
    while retstart < num_found_articles:
        link = esearch % (search_string_quoted, retstart, retmax)
        req = Request(link)
        handle = urlopen(req)
        data = handle.read()
        #print data
        xml = XML(data)    
        id_list_temp = xml.findall(".//Id")
        if len(id_list_temp) == 0:
            id_list_temp = int(xml.findall(".//Id"))
        for id_elem in id_list_temp:
            id_list.append(int(id_elem.text))
        retstart += retmax
    return id_list
def protocolParser(protocol,game,conn):
   
    print protocol
    xmlelement=XML(protocol)
    print xmlelement.tag
    if(xmlelement.tag=="CLOGIN"):
        a_lst = xmlelement.findall("username")
        val=''
        for node in a_lst:
            val=node.attrib["usr"]
            print val
        threading.Thread(target=srvau(val,game,conn)).start()
        
    elif(xmlelement.tag=="CPLAY"):
        a_lst = xmlelement.findall("username")
        print 'a_lst'
        val=''
        for node in a_lst:
            val=node.attrib["usr"]
            print val
       
        threading.Thread(target=splay(val,game,conn)).start()
        
        
    elif(xmlelement.tag=="CDICE"):
       
        a_lst = xmlelement.findall("username")
        
        val=''
        for node in a_lst:
            val=node.attrib["usr"]
            print val
       
        threading.Thread(target=sdice(val,game,conn)).start()
Exemple #13
0
    def _parseIssueFeed(self, value, rd):
        try:
            feed = XML(value)
        except:
            rd.errback()
            return

        data = []

        for item in feed.findall('channel/item'):
            timeStruct = time.strptime(item.find('updated').text, '%a, %d %b %Y %H:%M:%S +0000')
            component  = item.find('component')

            if component is None:
                component = 'n/a'
            else:
                component = component.text

            data.append({
                'key':       item.find('key').text,
                'link':      item.find('link').text,
                'summary':   item.find('summary').text,
                'type':      item.find('type').text,
                'status':    item.find('status').text,
                'component': component,
                'updated':   timeStruct,
            })

        rd.callback(data)
def download(job, regex):
    """Grabs platform specific distribution targets from Hudson"""
    url = urllib.urlopen("/".join([HUDSON_ROOT, job, HUDSON_XML_SUFFIX]))
    hudson_xml = url.read()
    hudson_xml = hudson_xml.replace('origin/', 'origin_')
    url.close()
    root = XML(hudson_xml)

    building = root.findtext("./building")
    if building == 'true':
        print '%s build in progress, exiting...' % job
        sys.exit(1)
    revision = root.findtext("./changeSet/revision/revision")
    artifacts = root.findall("./artifact")
    print "Retrieving %s job artifacts from revision: %s" % (job, revision)
    base_url = "/".join([HUDSON_ROOT, job, 'lastSuccessfulBuild/artifact'])
    new_artifacts = list()
    for artifact in artifacts:
        filename = artifact.findtext("fileName")
        if not regex.match(filename):
            continue
        artifact_url = "/".join([base_url, artifact.findtext("relativePath")])
        print "Downloading %s from URL %s" % (filename, artifact_url)
        urllib.urlretrieve(artifact_url , filename)
        new_artifacts.append(filename)
    return [revision, new_artifacts]
Exemple #15
0
 def _process_response(self, response, returntype=dict):
   """ big ugly function.. slowly improving. """
   if DEBUG:
     print response
   # Parse XML
   root = XML(response)
   # Dict repsonse
   if returntype==dict:
       response_data = xmldict.XmlDictConfig(root)
       if response_data.has_key("error"):
           raise RemoteError(response_data)
   # List response
   elif returntype==list:
       response_data = []
       return_dict = {}
       for elem in root.getiterator():
           if elem.tag == root_tag and return_dict.has_key(elem.tag):
               response_data.append(return_dict)
               return_dict = {elem.tag: elem.text}
           else:
               return_dict[elem.tag] = elem.text
           if return_dict.has_key("error"):
               raise RemoteError(return_dict)
       # add final dict to the list
       response_data.append(return_dict)
   else:
       raise InvalidParameterError("unkown datatype: %s" % (returntype))
   if DEBUG:
     print response_data
   return response_data
def get_pubmed_id_from_doi(doi):
    searchLink = "http://eutils.ncbi.nlm.nih.gov/entrez/eutils/esearch.fcgi?db=pubmed&term=%s[aid]" % (doi)
    handle = urlopen(searchLink)
    data = handle.read()
    xml = XML(data)  # convert to an xml object so we can apply x-path search fxns to it
    pmidList = [x.text for x in xml.findall(".//Id")]  # find xml "Id" elements
    return pmidList
Exemple #17
0
    def get_version(self):
        '''obtain the version or just 2.2.x if < 2.3.x
        Raises:
            FailedRequestError: If the request fails.
        '''
        if self._version:
            return self._version
        url = "{}/about/version.xml".format(self.service_url)
        resp = self.http_request(url)
        version = None
        if resp.status_code == 200:
            dom = XML(resp.content)
            resources = dom.findall("resource")
            for resource in resources:
                if resource.attrib["name"] == "GeoServer":
                    try:
                        version = resource.find("Version").text
                        break
                    except AttributeError:
                        pass

        # This will raise an exception if the catalog is not available
        # If the catalog is available but could not return version information,
        # it is an old version that does not support that
        if version is None:
            # just to inform that version < 2.3.x
            version = "2.2.x"
        self._version = version
        return version
def get_journal(pmid):
    MAXURLTRIES = 5
    numTries = 0
    success = False
    link = esummary % (pmid)
    req = Request(link)
    while numTries < MAXURLTRIES and success == False: 
        try: 
            handle = urlopen(req)
            success = True
        except (URLError, HTTPError, BadStatusLine, ParseError):
            print ' failed %d times' % numTries 
            numTries += 1
    if numTries == MAXURLTRIES:
        journal_title = None 
        return journal_title
    try:                        
        data = handle.read()
        xml = XML(data)    
        journalXML = xml.find('.//FullJournalName')
        if journalXML is not None:
            journal_title = journalXML.text
        else:
            journal_title = None
        return journal_title
    except Exception:
        journal_title = None 
        return journal_title
Exemple #19
0
	def get_cell_group(self):
		ncx_file = self.get_ncx_file()
		with open(ncx_file,'r') as f:
			xml_str = f.read()
		neuroml = XML(xml_str) # The NeuroML file in parsable form.  
		cell_group = neuroml.find("object/void[@property='allSimConfigs']/void/object/void[@property='cellGroups']/void/string").text
		return cell_group
Exemple #20
0
        def updated(downloader, path, _):
            if path is None:
                logging.error('internet archive file list get fail')
                # FIXME: report to user a failure to download
                return

            from xml.etree.ElementTree import XML
            xml = XML(open(path, 'r').read())
            os.remove(path)

            table = {
                'text pdf': u'application/pdf',
                'grayscale luratech pdf': u'application/pdf-bw',
                'image container pdf': u'application/pdf',
                'djvu': u'image/x.djvu',
                'epub': u'application/epub+zip',
            }

            chosen = None
            for element in xml.findall('file'):
                fmt = element.find('format').text.lower()
                if fmt in table:
                    if table[fmt] == content_type:
                        chosen = element.get('name')
                        break

            if chosen is None:
                logging.error('internet archive file list omits content type')
                # FIXME: report to user a failure to find matching content
                return

            url = os.path.join(url_base, chosen)
            GObject.idle_add(download_cb, url)
	def getAllArtifactVersions(self, strGroupID, strArtifactID):
		atVersions = []

		strPath = self.strUrlLuceneSearchGA % (strGroupID, strArtifactID)
		aucContent = self.tRestDriver.get(self.tServerBaseUrl, strPath)
		tSearchResult = XML(aucContent)

		# The search result must be complete.
		if tSearchResult.findtext('tooManyResults')!='false':
			raise Exception("Received a truncated search result!")
	
		# Loop over all results.
		for tNode in tSearchResult.findall('data/artifact'):
			strVersion = tNode.findtext('version')
			if isinstance(strVersion, basestring)==True:
				strVersion = strVersion.strip()
				if strVersion=='SNAPSHOT':
					tVersion = deploy_version.version(0, 0, 0)
				else:
					tVersion = deploy_version.version(strVersion)
				atVersions.append(tVersion)

		# Sort the versions.
		atVersions.sort()

		return atVersions
Exemple #22
0
    def gsversion(self):
        '''obtain the version or just 2.2.x if < 2.3.x
        Raises:
            FailedRequestError: If the request fails.
        '''
        if self._version: return self._version
        about_url = self.service_url + "/about/version.xml"
        response, content = self.http.request(about_url, "GET")
        version = None
        if response.status == 200:
            dom = XML(content)
            resources = dom.findall("resource")
            for resource in resources:
                if resource.attrib["name"] == "GeoServer":
                    try:
                        version = resource.find("Version").text
                        break
                    except:
                        pass

        #This will raise an exception if the catalog is not available
        #If the catalog is available but could not return version information,
        #it is an old version that does not support that
        if version is None:
            self.get_workspaces()
            # just to inform that version < 2.3.x
            version = "2.2.x"
        self._version = version
        return version
Exemple #23
0
    def _process_result(self):
        document = XML(self._xml_data)

        if document.find(_FIND_DESCRIPTION) is None:
            logging.debug('Bundle %s not available in the server for the '
                'version %s', self._bundle.get_bundle_id(), config.version)
            version = None
            link = None
            size = None
        else:
            try:
                version = NormalizedVersion(document.find(_FIND_VERSION).text)
            except InvalidVersionError:
                logging.exception('Exception occured while parsing version')
                version = '0'

            link = document.find(_FIND_LINK).text

            try:
                size = long(document.find(_FIND_SIZE).text) * 1024
            except ValueError:
                logging.exception('Exception occured while parsing size')
                size = 0

        global _fetcher
        _fetcher = None
        self._completion_cb(self._bundle, version, link, size, None)
Exemple #24
0
    def fields(self, flat=False):

        data = {}

        resp = requests.get(self.fields_url)
        doc = XML(resp.text)

        if flat:

            for elem in doc.iter('variable'):
                data[elem.attrib['name']] = "%s: %s" % (elem.attrib['concept'], elem.text)

        else:

            for concept_elem in doc.iter('concept'):

                concept = concept_elem.attrib['name']
                variables = {}

                for variable_elem in concept_elem.iter('variable'):
                    variables[variable_elem.attrib['name']] = variable_elem.text

                data[concept] = variables

        return data
Exemple #25
0
 def identify(self, geosGeometry, geometryFieldName, layers, url, username, password):
     """
         Assuming :
         Url like http://localhost:8080/geoserver/wfs
         layers like geonode:capa
         geosGeometry is in Lambert72
     """
     #TODO input checking
     gmlString = geosGeometry.ogr.gml
     payload = self._buildWfsIntersectRequest(gmlString, layers, geometryFieldName)
     #Verify False to avoid certificate not trusted problems
     r = requests.post(url, data = payload, auth=(username, password), verify=False)
     tree = XML(r.text)
     if tree.tag == "{http://www.opengis.net/ogc}ServiceExceptionReport":
         #We Got OGC Error. Find the root cause and throw a proper Exception
         se = tree.find('{http://www.opengis.net/ogc}ServiceException')
         raise Exception(str(se.text).strip())
     else:
         clean_results = []
         features = tree.findall('{http://www.opengis.net/gml}featureMember')
         for feature in features:
             attributes = {}
             for child in feature:
                 for child_elem in child:
                     tag_name = child_elem.tag.split('}')[-1] #Get rid of namespace
                     if child_elem.text is not None:
                         attributes[tag_name] = child_elem.text
                     else:
                         attributes[tag_name] = ""
             clean_results.append(attributes)
         return clean_results
def sanitize_kml(kmlstring):
    kmldom = XML(kmlstring)
    ns = kmldom.tag.strip('kml')
    placemarks = kmldom.findall('.//%sPlacemark' % ns)
    for placemark in placemarks:
        summary = placemark.findall('%sdescription' % ns)
        summary[0].text = sanitize_kml_description(summary)
    return tostring(kmldom, 'utf-8')
def fetch_snp_records(ids):
    # 2010-07-12 11:57 Reece Hart <*****@*****.**> Most (all other?)
    # Entrez facilities use DTDs.  dbSNP uses XSD (with namespaces), which
    # isn't supported by Entrez.read.  Use xml.elementtree instead.
    xml = Entrez.efetch(db='snp',id=','.join(ids), retmode='xml').read()
    d = XML(xml)
    return map(_remap_dict_keys, map( _rs_elem_as_dict,
		d.findall('{http://www.ncbi.nlm.nih.gov/SNP/docsum}Rs')))
def isatom(body):
    """Answers if the given response body looks like ATOM."""
    root = XML(body)
    return \
        root.tag == XNAME_FEED and \
        root.find(XNAME_AUTHOR) is not None and \
        root.find(XNAME_ID) is not None and \
        root.find(XNAME_TITLE) is not None
def get_pubmed_id_from_title(titleEncoded):
    queryStrQuoted = quote("(%s)" % titleEncoded, '()')
    searchLink = 'http://eutils.ncbi.nlm.nih.gov/entrez/eutils/esearch.fcgi?db=pubmed&term=%s' % (queryStrQuoted)
    handle = urlopen(searchLink)   
    data = handle.read() 
    xml = XML(data) # convert to an xml object so we can apply x-path search fxns to it
    pmidList = [x.text for x in xml.findall('.//Id')] # find xml "Id" elements
    return pmidList
    def _get_sys_info(self):
        """
        Returns sysinfo of host system in the following format::
            {'memory': [{'bank_locator': 'BANK 0',
                         'form_factor': 'SODIMM',
                         'locator': 'ChannelA-DIMM0',
                         'manufacturer': 'Samsung',
                         'part_number': 'M471B5273DH0-CK0',
                         'serial_number': '9760E90B',
                         'size': '4096 MB',
                         'speed': '1600 MHz',
                         'type': 'DDR3',
                         'type_detail': 'Synchronous'},
                        {'bank_locator': 'BANK 2',
                         'form_factor': 'SODIMM',
                         'locator': 'ChannelB-DIMM0',
                         'manufacturer': 'Micron',
                         'part_number': '16KTF51264HZ-1G6M1',
                         'serial_number': '3255C613',
                         'size': '4096 MB',
                         'speed': '1600 MHz',
                         'type': 'DDR3',
                         'type_detail': 'Synchronous'}],
             'processor': {'external_clock': '100 MHz',
                           'family': 'Core i5',
                           'manufacturer': 'Intel(R) Corporation',
                           'max_speed': '2600 MHz',
                           'part_number': 'None',
                           'serial_number': 'None',
                           'signature': 'Type 0, Family 6, Model 58, Stepping 9',
                           'socket_destination': 'CPU Socket - U3E1',
                           'status': 'Populated, Enabled',
                           'type': 'Central Processor',
                           'version': 'Intel(R) Core(TM) i5-3320M CPU @ 2.60GHz'},
             'system': {'family': 'ThinkPad T430',
                        'manufacturer': 'LENOVO',
                        'product': '234455G',
                        'serial': 'PBKVYA6',
                        'sku': 'LENOVO_MT_2344',
                        'uuid': 'D6A27701-51F5-11CB-963F-F8A34AA11505',
                        'version': 'ThinkPad T430'}}

        """
        xml = XML(self.conn.getSysinfo(0))
        sysinfo = {}
        keys = ['system', 'processor']
        for key in keys:
            sysinfo[key] = {}
            for element in xml.findall(key+'/entry'):
                sysinfo[key][element.attrib['name']] = element.text

        sysinfo['memory'] = []
        for memorydevs in xml.findall('memory_device'):
            x = {}
            for entry in memorydevs.findall('entry'):
                x[entry.attrib['name']] = entry.text
            sysinfo['memory'].append(x)
        return sysinfo
def collect_session(apigee_connection, xml_utils, organization, environment,
                    proxy, revision, timeout, epsilon):

    # Trace data collection/download interval
    sleep = timeout - epsilon

    # Start the debug session
    print "Creating the debug session..."
    debug_session_id = apigee_connection.create_debug_session(
        proxy, revision, timeout)

    print "Debug session %s created..." % debug_session_id

    # Wait for session traces to collect
    print "Collecting traces for %s seconds...." % sleep
    print "This represents a session length of %s seconds" % timeout
    print "_minus_ a pre-defined interval of %s seconds during which trace data is downloaded and processed" % epsilon
    time.sleep(sleep)

    # Collect trace data
    print "Collecting trace data..."
    trace_data = apigee_connection.get_all_trace_data(proxy, revision,
                                                      debug_session_id)

    if len(trace_data) == 0:
        print "No traces collected!"
        print "This could be because there was no traffic to the proxy %s during the debug session." % proxy
    else:
        # Process trace data
        # Parse xml
        # Output trace data

        xml_file_header = xml_utils.get_xml_header(organization, environment,
                                                   proxy, revision)

        # Re-direct stdout to file
        orig_stdout = sys.stdout
        filename = "%s-%s-%s-%s_%s.xml" % (organization, environment, proxy,
                                           revision, debug_session_id)
        output_file = file(filename, 'wb')
        sys.stdout = output_file

        # Output XML File header
        print xml_file_header

        # Loop through the traces and write them to the file, including header and footer
        # needed for display in Edge UI

        for trace in trace_data:
            trace_id = trace['trace_id']
            trace_xml_text = trace['trace_xml']

            # Convert raw text to parsed XML Object
            trace_xml = XML(trace_xml_text)

            # Output trace header, including the trace_id
            print xml_utils.trace_header(trace_id)

            # Output each trace, omitting trace-by-trace <?xml/> tag
            for elem in trace_xml:
                if len(elem) > 0 and elem is not None:
                    XMLDump(elem)

            # For each trace output the trace footer
            print xml_utils.trace_footer()

        # Finally output the XML File footer
        print xml_utils.get_xml_footer()

        # Close the output file
        output_file.close()

        # Re-direct stdout back to stdout
        sys.stdout = orig_stdout

        print "Finished! Debug session written to file: %s" % filename
Exemple #32
0
def parse_card_title(cards):
    titles = []
    for card in cards:
        titles.append(XML(card.content).find("name").text)
    return titles
Exemple #33
0
def mobikwik_wallet_response(req):
    if req.POST :
        val = req.POST
        responseChecksum = calculate_response_checksum(val['statuscode'], val['orderid'], val['amount'], val['statusmessage'])
        if(responseChecksum == val['checksum']):
            if val['statuscode'] == "0" or val['statuscode'] == 0 :
                orderid = val['orderid']
                ACTUAL_AMOUNT = 200  # provide here the actual amount of the current orderid from your database or session
                if float(ACTUAL_AMOUNT) == float(val['amount']) :
                    csumstring = send_checksum_string(orderid)
                    sndchecksum = make_checksum(csumstring)
                    if(TEST_MODE == False):
                        MOBIKWIK_CHECK_STATUS_URL = "https://www.mobikwik.com/checkstatus"
                    else: 
                        MOBIKWIK_CHECK_STATUS_URL = "https://test.mobikwik.com/mobikwik/checkstatus"
                    MOBIKWIK_CHECK_STATUS_URL += "?mid=%s&checksum=%s&orderid=%s" % (MOBIKWIK_MERCHANT_ID,sndchecksum,orderid)
                    data = remote_call(MOBIKWIK_CHECK_STATUS_URL)
                    try : 
                        tree = XML(data)
                        try :
                            amount2 = tree.find('.//amount').text 
                        except Exception as e :
                            print "amount not found"
                        try : 
                            statuscode2 = tree.find('.//statuscode').text 
                        except Exception as e :
                            print "statuscode not found"
                        try : 
                            orderid2 = tree.find('.//orderid').text 
                        except Exception as e :
                            print "orderid not found"
                        try : 
                            refid2 = tree.find('.//refid').text 
                        except Exception as e :
                            print "refid not found"                    
                        try : 
                            statusmessage2 = tree.find('.//statusmessage').text 
                        except Exception as e :
                            print "statusmessage not found"
                        try : 
                            ordertype2 = tree.find('.//ordertype').text 
                        except Exception as e :
                            print "ordertype not found"
                        try : 
                            checksum2 = tree.find('.//checksum').text 
                        except Exception as e :
                            print "checksum not found"
                        ckstring2 = "'%s''%s''%s''%s''%s''%s'" % (statuscode2,orderid2,refid2,amount2,statusmessage2,ordertype2)
                        if statuscode2 == "0" or statuscode2 == 0 :
                            cksum2 = make_checksum(ckstring2)    
                            if cksum2 == checksum2 and float(amount2) == float(ACTUAL_AMOUNT) and orderid2 == orderid :
                                # now mark a txns as paid 
                                return HttpResponse("Transaction Successful")
                            else: 
                                return HttpResponse("Fraud Detected")
                        else : 
                            return HttpResponse("Transaction failed because of reason = %s" % statusmessage2)
                    except Exception as e: 
                        return HttpResponse("Error Occur  = %s" % e )
                                
                else : 
                    return HttpResponse("Txn Failed ! Fraud Detected")            
            else : 
                return HttpResponse("Txn Failed Because of reason : %s" % val['statusmessage'])
        else:
            return HttpResponse("Txn Failed! Fraud Detected. Response Checksum did not match")        
    else : 
        return HttpResponse("No Parameters received")
 def __init__(self, str):
     self._rootElement = XML(str)
     self._namespace = self._rootElement.tag[1:].split("}")[0]
Exemple #35
0
class _EtreeFrameParser(_XMLFrameParser):
    """
    Internal class to parse XML into DataFrames with the Python
    standard library XML module: `xml.etree.ElementTree`.
    """
    def __init__(self, *args, **kwargs) -> None:
        super().__init__(*args, **kwargs)

    def parse_data(self) -> List[Dict[str, Optional[str]]]:
        from xml.etree.ElementTree import XML

        if self.stylesheet is not None:
            raise ValueError(
                "To use stylesheet, you need lxml installed and selected as parser."
            )

        self.xml_doc = XML(self._parse_doc(self.path_or_buffer))

        self._validate_path()
        self._validate_names()

        return self._parse_nodes()

    def _parse_nodes(self) -> List[Dict[str, Optional[str]]]:
        elems = self.xml_doc.findall(self.xpath, namespaces=self.namespaces)
        dicts: List[Dict[str, Optional[str]]]

        if self.elems_only and self.attrs_only:
            raise ValueError(
                "Either element or attributes can be parsed not both.")
        elif self.elems_only:
            if self.names:
                dicts = [{
                    **({
                        el.tag: el.text.strip()
                    } if el.text and not el.text.isspace() else {}),
                    **{
                        nm: ch.text.strip() if ch.text else None
                        for nm, ch in zip(self.names, el.findall("*"))
                    },
                } for el in elems]
            else:
                dicts = [{
                    ch.tag: ch.text.strip() if ch.text else None
                    for ch in el.findall("*")
                } for el in elems]

        elif self.attrs_only:
            dicts = [{
                k: v.strip() if v else None
                for k, v in el.attrib.items()
            } for el in elems]

        else:
            if self.names:
                dicts = [{
                    **el.attrib,
                    **({
                        el.tag: el.text.strip()
                    } if el.text and not el.text.isspace() else {}),
                    **{
                        nm: ch.text.strip() if ch.text else None
                        for nm, ch in zip(self.names, el.findall("*"))
                    },
                } for el in elems]

            else:
                dicts = [{
                    **el.attrib,
                    **({
                        el.tag: el.text.strip()
                    } if el.text and not el.text.isspace() else {}),
                    **{
                        ch.tag: ch.text.strip() if ch.text else None
                        for ch in el.findall("*")
                    },
                } for el in elems]

        dicts = [{k.split("}")[1] if "}" in k else k: v
                  for k, v in d.items()} for d in dicts]

        keys = list(dict.fromkeys([k for d in dicts for k in d.keys()]))
        dicts = [{k: d[k] if k in d.keys() else None
                  for k in keys} for d in dicts]

        if self.names:
            dicts = [{nm: v
                      for nm, (k, v) in zip(self.names, d.items())}
                     for d in dicts]

        return dicts

    def _validate_path(self) -> None:
        """
        Notes
        -----
        `etree` supports limited XPath. If user attempts a more complex
        expression syntax error will raise.
        """

        msg = ("xpath does not return any nodes. "
               "If document uses namespaces denoted with "
               "xmlns, be sure to define namespaces and "
               "use them in xpath.")
        try:
            elems = self.xml_doc.find(self.xpath, namespaces=self.namespaces)
            if elems is None:
                raise ValueError(msg)

            if elems is not None and elems.find(
                    "*") is None and elems.attrib is None:
                raise ValueError(msg)

        except (KeyError, SyntaxError):
            raise SyntaxError(
                "You have used an incorrect or unsupported XPath "
                "expression for etree library or you used an "
                "undeclared namespace prefix.")

    def _validate_names(self) -> None:
        if self.names:
            parent = self.xml_doc.find(self.xpath, namespaces=self.namespaces)
            children = parent.findall("*") if parent else []

            if is_list_like(self.names):
                if len(self.names) < len(children):
                    raise ValueError(
                        "names does not match length of child elements in xpath."
                    )
            else:
                raise TypeError(
                    f"{type(self.names).__name__} is not a valid type for names"
                )

    def _parse_doc(self, raw_doc) -> bytes:
        from xml.etree.ElementTree import (
            XMLParser,
            parse,
            tostring,
        )

        handle_data = get_data_from_filepath(
            filepath_or_buffer=raw_doc,
            encoding=self.encoding,
            compression=self.compression,
            storage_options=self.storage_options,
        )

        with preprocess_data(handle_data) as xml_data:
            curr_parser = XMLParser(encoding=self.encoding)
            r = parse(xml_data, parser=curr_parser)

        return tostring(r.getroot())
    def test_myschema_xsd01(self):
        body_map = {'user_id':     1,
                    'first_name':  '',
                    'last_name':   '',
                    'email':       '*****@*****.**',
                    'description': '',
                    'birthday':    '',
                    'created':     '',  # TODO: ignore this (editable=False)
                    'url_site':    '',
                    'image':       '',
                    'language':    '',
                   }
        backend = self._get_backend(ContactFakeBackend, subject='create_contact',
                                    body_map=body_map, model=Contact,
                                   )
        builder = self._get_builder(backend)
        xsd = '{http://www.w3.org/2001/XMLSchema}'

        content = builder._render_myschema_xsd(self.request)
        xml = XML(content)

        self.assertEqual(builder.namespace, xml.get('targetNamespace'))
        self.assertEqual(builder.namespace,
                         re.search(r'xmlns:my="(?P<ns>[\w\d\-:/\.]*)"', content).groupdict()['ns']
                        )  # Can't be got with ElementTree, because it's a namespace

        ref_attrs = {node.get('ref')
                      for node in xml.findall('{xsd}element/{xsd}complexType/{xsd}sequence/{xsd}element'.format(xsd=xsd))
                    }
        # chain() because language_value is not declared in body_map, only language has to (m2m)
        expected_ref_attrs = {'my:{}'.format(key) for key in chain(body_map, ['language_value'])}
        self.assertEqual(expected_ref_attrs, ref_attrs)

        xsd_elements = {
            'CremeCRMCrudity': {'name': 'CremeCRMCrudity'},

            # <xsd:element name="user_id" type="xsd:integer"/>
            'user_id': {'name': 'user_id', 'type': 'xsd:integer'},

            # # <xsd:element name="is_actived" type="xsd:boolean"/>
            # 'is_actived': {'name': 'is_actived', 'type': 'xsd:boolean'},

            # TODO: check if my:requiredString accepts empty strings
            # # <xsd:element name="first_name" type="xsd:string"/>
            # 'first_name': {'name': 'first_name', 'type': 'xsd:string'},
            # <xsd:element name="first_name" type="my:requiredString"/>
            'first_name': {'name': 'first_name', 'type': 'my:requiredString'},

            # <xsd:element name="last_name" type="xsd:requiredString"/>
            'last_name': {'name': 'last_name', 'type': 'my:requiredString'},

            # TODO: check if my:requiredString accepts empty strings
            # # <xsd:element name="email" type="xsd:string"/>
            # 'email': {'name': 'email', 'type': 'xsd:string'},
            # <xsd:element name="email" type="my:requiredString"/>
            'email': {'name': 'email', 'type': 'my:requiredString'},

            # <xsd:element name="description">
            #   <xsd:complexType mixed="true">
            #     <xsd:sequence>
            #       <xsd:any minOccurs="0" maxOccurs="unbounded"
            #                namespace="http://www.w3.org/1999/xhtml" processContents="lax"/>
            #     </xsd:sequence>
            #   </xsd:complexType>
            # </xsd:element>
            'description': {'name': 'description'},

            # <xsd:element name="birthday" nillable="true" type="xsd:date"/>
            'birthday': {'name': 'birthday', 'type': 'xsd:date', 'nillable': 'true'},

            # <xsd:element name="created" type="xsd:dateTime"/>
            'created': {'name': 'created', 'type': 'xsd:dateTime'},

            # TODO: check if my:requiredAnyURI accepts empty strings
            # 'url_site':       {'name': 'url_site', 'type': 'xsd:anyURI'},
            'url_site':       {'name': 'url_site', 'type': 'my:requiredAnyURI'},

            'image':          {'name': 'image', 'type': 'xsd:base64Binary', 'nillable': 'true'},
            'language':       {'name': 'language'},
            'language_value': {'name': 'language_value', 'type': 'xsd:integer', 'nillable': 'true'},
        }

        for element_node in xml.findall('{}element'.format(xsd)):
            name = element_node.get('name')
            xsd_element_attrs = xsd_elements.get(name)

            if xsd_element_attrs is None:
                self.fail('There is at least an extra node named: {}'.format(name))

            self.assertEqual(set(xsd_element_attrs.keys()), set(element_node.keys()))

            for attr in element_node.keys():
                # self.assertEqual(xsd_element_attrs[attr], element_node.get(attr))
                # TODO: factorise
                expected = xsd_element_attrs[attr]
                value = element_node.get(attr)

                if expected != value:
                    self.fail('Value of attribute "{}" in node "{}" is wrong: expected "{}", got "{}".'.format(
                                    attr, name, expected, value,
                                )
                             )
Exemple #37
0
    def _interpol_search(self,
                         target_index,
                         chunk_size=8,
                         fallback_cutoff=100):
        """
        Use linear interpolation search to find spectra faster.

        Arguments:
            target_index (str or int) : native id of the item to access

        Keyword Arguments:
            chunk_size (int)        : size of the chunk to read in one go in kb

        """
        # print('target ', target_index)
        seeker = self.get_binary_file_handler()
        seeker.seek(0, 2)
        chunk_size = chunk_size * 512
        lower_bound = 0
        upper_bound = seeker.tell()
        mid = int(upper_bound / 2)
        seeker.seek(mid, 0)
        current_position = seeker.tell()
        used_indices = set()
        spectrum_found = False
        spectrum = None
        while spectrum_found is False:
            jumper_scaling = 1
            file_pointer = seeker.tell()
            data = seeker.read(chunk_size)
            spec_start = self.spec_open.search(data)
            if spec_start is not None:
                spec_start_offset = file_pointer + spec_start.start()
                seeker.seek(spec_start_offset)
                current_index = int(
                    re.search(b"[0-9]*$", spec_start.group("id")).group())

                self.offset_dict[current_index] = (spec_start_offset, )
                if current_index in used_indices:
                    # seeker.close()
                    if current_index > target_index:
                        jumper_scaling -= 0.1
                    else:
                        jumper_scaling += 0.1

                used_indices.add(current_index)

                dist = current_index - target_index
                if dist < -1 and dist > -(fallback_cutoff):
                    spectrum = self._search_linear(seeker, target_index)
                    # seeker.close()
                    spectrum_found = True
                    break
                elif dist > 0 and dist < fallback_cutoff:
                    while current_index > target_index:
                        offset = int(current_position - chunk_size)
                        seeker.seek(offset if offset > 0 else 0)
                        lower_bound = current_position
                        current_position = seeker.tell()
                        data = seeker.read(chunk_size)
                        if self.spec_open.search(data):
                            spec_start = self.spec_open.search(data)
                            current_index = int(
                                re.search(b"[0-9]*$",
                                          spec_start.group("id")).group())
                    seeker.seek(current_position)
                    spectrum = self._search_linear(seeker, target_index)
                    # seeker.close()
                    spectrum_found = True
                    break

                if int(current_index) == target_index:

                    seeker.seek(spec_start_offset)
                    start, end = self._read_to_spec_end(seeker)
                    seeker.seek(start)
                    self.offset_dict[current_index] = (start, end)
                    xml_string = seeker.read(end - start)
                    # seeker.close()
                    spectrum = spec.Spectrum(XML(xml_string),
                                             measured_precision=5e-6)
                    spectrum_found = True
                    break

                elif int(current_index) > target_index:
                    scaling = target_index / current_index
                    seeker.seek(
                        int(current_position * scaling * jumper_scaling))
                    upper_bound = current_position
                    current_position = seeker.tell()
                elif int(current_index) < target_index:
                    scaling = target_index / current_index
                    seeker.seek(
                        int(current_position * scaling * jumper_scaling))
                    lower_bound = current_position
                    current_position = seeker.tell()

            elif len(data) == 0:
                sorted_keys = sorted(self.offset_dict.keys())
                pos = (bisect.bisect_left(sorted_keys, target_index) - 2
                       )  # dat magic number :)
                try:
                    key = sorted_keys[pos]
                    spec_start_offset = self.offset_dict[key][0]
                except:
                    key = sorted_keys[pos]
                    spec_start_offset = self.offset_dict[key][0]
                seeker = self.get_binary_file_handler()
                seeker.seek(spec_start_offset)
                spectrum = self._search_linear(seeker, target_index)
                # seeker.close()
                spectrum_found = True
                break

        return spectrum
Exemple #38
0
    def _binary_search(self, target_index):
        """
        Retrieve spectrum for a given spectrum ID using binary jumps

        Args:
            target_index (int): native id of the spectrum to access

        Returns:
            Spectrum (pymzml.spec.Spectrum): pymzML spectrum


        """
        chunk_size = 12800
        offset_scale = 1
        jump_history = {'forwards': 0, 'backwards': 0}
        # This will be used if no spec was found at all during a jump
        # self._average_bytes_per_spec *= 10
        # print(f"self.seek_list : {self.seek_list}")
        with open(self.path, "rb") as seeker:
            if target_index not in self.offset_dict.keys():
                for jump in range(40):
                    scan = None
                    insert_position = bisect.bisect_left(
                        self.seek_list, (target_index, 0))
                    if (target_index < self.seek_list[0][0]
                            or target_index > self.seek_list[-1][0]):
                        raise Exception("Spectrum ID should be between"
                                        " {0} and {1}".format(
                                            self.seek_list[0][0],
                                            self.seek_list[-1][0]))

                    element_before = self.seek_list[insert_position - 1]
                    spec_offset_m1 = target_index - element_before[0]

                    element_after = self.seek_list[insert_position]
                    spec_offset_p1 = element_after[0] - target_index

                    byte_diff_m1_p1 = element_after[1] - element_before[1]
                    scan_diff_m1_p1 = element_after[0] - element_before[0]

                    average_spec_between_m1_p1 = int(
                        round(byte_diff_m1_p1 / scan_diff_m1_p1))
                    # print("\n------------")
                    # print(f"jump {jump}")
                    # print(f"insert_pos {insert_position}")
                    # print(f"average_spec_between_m1_p1 {average_spec_between_m1_p1}")
                    # print(f"diff target to m1 / spec_offset_m1 {spec_offset_m1}")
                    # print(f"diff target to p1 / spec_offset_p1 {spec_offset_p1}")

                    # which side are we closer to ...
                    if spec_offset_m1 < spec_offset_p1:
                        # print("Closer to m1 - jumping forward")
                        jump_direction = 'forwards'
                        jump_history['backwards'] = 0
                        jump_history['forwards'] += 1
                        byte_offset = element_before[
                            1] + jump_history['forwards'] * (
                                offset_scale * average_spec_between_m1_p1 *
                                spec_offset_m1)
                        if (target_index - element_before[0]) < 10:
                            # quite close to target, stat at element before
                            # and read chunks until found
                            byte_offset = element_before[1]
                    else:
                        # print("Closer to p1 - jumping backwards")
                        jump_direction = 'backwards'
                        jump_history['forwards'] = 0
                        jump_history['backwards'] += 1
                        byte_offset = element_after[
                            1] - jump_history['backwards'] * (
                                offset_scale * average_spec_between_m1_p1 *
                                spec_offset_p1)
                    byte_offset = int(byte_offset)
                    # print(f"jump_history {jump_history}")
                    # print(f"bytes offset {byte_offset}")
                    # print(f"offset_scale {offset_scale}")
                    # print(f"seek_list: {min(self.seek_list)} - {max(self.seek_list)} .. n: {len(self.seek_list)}")
                    # print(f"seek_list[:-10]: {self.seek_list[:10]}")
                    found_scan = False
                    chunk = b""
                    break_outer = False

                    for x in range(100):
                        seeker.seek(
                            max([
                                os.SEEK_SET + byte_offset + x * chunk_size, 1
                            ]))
                        chunk += seeker.read(chunk_size)
                    # print(f'read {len(chunk)}')
                    matches = re.finditer(regex_patterns.SPECTRUM_OPEN_PATTERN,
                                          chunk)
                    for _match_number, match in enumerate(matches):
                        if match is not None:
                            scan = int(
                                re.search(b"[0-9]*$",
                                          match.group("id")).group())
                            # print(">>", _match_number, scan)
                            if jump_direction == 'forwards':
                                if scan > target_index:
                                    # we went to far ...
                                    offset_scale = 0.1
                                    jump_history['forwards'] = 0
                                else:
                                    offset_scale = 1
                            if jump_direction == 'backwards':
                                if scan < target_index:
                                    offset_scale = 0.1
                                    jump_history['backwards'] = 0
                                else:
                                    offset_scale = 1

                            if scan in self.offset_dict.keys():
                                # print("Have seen this scan {scan} already")
                                continue
                            found_scan = True
                            new_entry = (
                                scan,
                                byte_offset + match.start(),
                            )
                            new_pos = bisect.bisect_left(
                                self.seek_list, new_entry)
                            self.seek_list.insert(new_pos, new_entry)
                            self.offset_dict[scan] = (byte_offset +
                                                      match.start(), )
                            if int(scan) == int(target_index):
                                # maybe jump from other boarder
                                break_outer = True
                                break
                    if break_outer:
                        break
                        if found_scan:
                            offset_scale = 1
                        else:
                            offset_scale += 1
                    if int(target_index) in self.offset_dict.keys():
                        break

            start = self.offset_dict[target_index]
            # print(f"reading spec at pos {start}")
            seeker.seek(start[0])
            match = None
            data = b""
            while b"</spectrum>" not in data:
                # print("reading to end")
                data += seeker.read(chunk_size)
            end = data.find(b"</spectrum>")
            seeker.seek(start[0])
            spec_string = seeker.read(end + len("</spectrum>"))
            spec_string = spec_string.decode("utf-8")
            spectrum = spec.Spectrum(XML(spec_string), measured_precision=5e-6)
            return spectrum
        def parse_result(status, data, result):
            (callback, user_data) = data
            if status != 0:
                callback(status, user_data, result)

            try:
                # filter out invalid UTF-8 to avoid breaking the XML parser
                result = result.decode('UTF-8',
                                       errors='replace').encode('UTF-8')
                root = XML(result)
                drivers = {}
                # We store the drivers as a dict of:
                # foomatic_id:
                #   { 'name': name,
                #     'url': url,
                #     'supplier': supplier,
                #     'license': short license string e.g. GPLv2,
                #     'licensetext': license text (Plain text),
                #     'nonfreesoftware': Boolean,
                #     'thirdpartysupplied': Boolean,
                #     'manufacturersupplied': Boolean,
                #     'patents': Boolean,
                #     'supportcontacts' (optional):
                #       list of { 'name',
                #                 'url',
                #                 'level',
                #               }
                #     'shortdescription': short description,
                #     'recommended': Boolean,
                #     'functionality':
                #       { 'text': integer percentage,
                #         'lineart': integer percentage,
                #         'graphics': integer percentage,
                #         'photo': integer percentage,
                #         'speed': integer percentage,
                #       }
                #     'packages' (optional):
                #       { arch:
                #         { file:
                #           { 'url': url,
                #             'fingerprint': signature key fingerprint URL
                #             'realversion': upstream version string,
                #             'version': packaged version string,
                #             'release': package release string
                #           }
                #         }
                #       }
                #     'ppds' (optional):
                #       URL string list
                #   }
                # There is more information in the raw XML, but this
                # can be added to the Python structure as needed.

                for driver in root.findall('driver'):
                    id = driver.attrib.get('id')
                    if id == None:
                        continue

                    dict = {}
                    for attribute in [
                            'name', 'url', 'supplier', 'license',
                            'shortdescription'
                    ]:
                        element = driver.find(attribute)
                        if element != None and element.text != None:
                            dict[attribute] = _normalize_space(element.text)

                    element = driver.find('licensetext')
                    if element != None and element.text != None:
                        dict['licensetext'] = element.text
                    if not 'licensetext' in dict or \
                       dict['licensetext'] == None:
                        element = driver.find('licenselink')
                        if element != None:
                            license_url = element.text
                            if license_url != None:
                                try:
                                    req = requests.get(license_url,
                                                       verify=True)
                                    dict['licensetext'] = \
                                        req.content.decode("utf-8")
                                except:
                                    _debugprint('Cannot retrieve %s' % url)

                    for boolean in [
                            'nonfreesoftware', 'recommended', 'patents',
                            'thirdpartysupplied', 'manufacturersupplied'
                    ]:
                        dict[boolean] = driver.find(boolean) != None

                    # Make a 'freesoftware' tag for compatibility with
                    # how the OpenPrinting API used to work (see trac
                    # #74).
                    dict['freesoftware'] = not dict['nonfreesoftware']

                    supportcontacts = []
                    container = driver.find('supportcontacts')
                    if container != None:
                        for sc in container.findall('supportcontact'):
                            supportcontact = {}
                            if sc.text != None:
                                supportcontact['name'] = \
                                    _normalize_space (sc.text)
                            else:
                                supportcontact['name'] = ""
                            supportcontact['url'] = sc.attrib.get('url')
                            supportcontact['level'] = sc.attrib.get('level')
                            supportcontacts.append(supportcontact)

                    if supportcontacts:
                        dict['supportcontacts'] = supportcontacts

                    if 'name' not in dict or 'url' not in dict:
                        continue

                    container = driver.find('functionality')
                    if container != None:
                        functionality = {}
                        for attribute in [
                                'text', 'lineart', 'graphics', 'photo', 'speed'
                        ]:
                            element = container.find(attribute)
                            if element != None:
                                functionality[attribute] = element.text
                        if functionality:
                            dict[container.tag] = functionality

                    packages = {}
                    container = driver.find('packages')
                    if container != None:
                        for arch in container.getchildren():
                            rpms = {}
                            for package in arch.findall('package'):
                                rpm = {}
                                for attribute in [
                                        'realversion', 'version', 'release',
                                        'url', 'pkgsys', 'fingerprint'
                                ]:
                                    element = package.find(attribute)
                                    if element != None:
                                        rpm[attribute] = element.text

                                repositories = package.find('repositories')
                                if repositories != None:
                                    for pkgsys in repositories.getchildren():
                                        rpm.setdefault(
                                            'repositories',
                                            {})[pkgsys.tag] = pkgsys.text

                                rpms[package.attrib['file']] = rpm
                            packages[arch.tag] = rpms

                    if packages:
                        dict['packages'] = packages

                    ppds = []
                    container = driver.find('ppds')
                    if container != None:
                        for each in container.getchildren():
                            ppds.append(each.text)

                    if ppds:
                        dict['ppds'] = ppds

                    drivers[id] = dict
                    _debugprint(
                        "listDrivers/parse_result: OpenPrinting entries: %s" %
                        repr(drivers))
                callback(0, user_data, drivers)
            except:
                callback(1, user_data, sys.exc_info())
Exemple #40
0
class _LxmlFrameParser(_XMLFrameParser):
    """
    Internal class to parse XML into DataFrames with third-party
    full-featured XML library, `lxml`, that supports
    XPath 1.0 and XSLT 1.0.
    """
    def __init__(self, *args, **kwargs) -> None:
        super().__init__(*args, **kwargs)

    def parse_data(self) -> list[dict[str, str | None]]:
        """
        Parse xml data.

        This method will call the other internal methods to
        validate xpath, names, optionally parse and run XSLT,
        and parse original or transformed XML and return specific nodes.
        """
        from lxml.etree import XML

        self.xml_doc = XML(self._parse_doc(self.path_or_buffer))

        if self.stylesheet is not None:
            self.xsl_doc = XML(self._parse_doc(self.stylesheet))
            self.xml_doc = XML(self._transform_doc())

        self._validate_path()
        self._validate_names()

        return self._parse_nodes()

    def _parse_nodes(self) -> list[dict[str, str | None]]:
        elems = self.xml_doc.xpath(self.xpath, namespaces=self.namespaces)
        dicts: list[dict[str, str | None]]

        if self.elems_only and self.attrs_only:
            raise ValueError(
                "Either element or attributes can be parsed not both.")

        elif self.elems_only:
            if self.names:
                dicts = [{
                    **({
                        el.tag: el.text.strip()
                    } if el.text and not el.text.isspace() else {}),
                    **{
                        nm: ch.text.strip() if ch.text else None
                        for nm, ch in zip(self.names, el.xpath("*"))
                    },
                } for el in elems]
            else:
                dicts = [{
                    ch.tag: ch.text.strip() if ch.text else None
                    for ch in el.xpath("*")
                } for el in elems]

        elif self.attrs_only:
            dicts = [el.attrib for el in elems]

        else:
            if self.names:
                dicts = [{
                    **el.attrib,
                    **({
                        el.tag: el.text.strip()
                    } if el.text and not el.text.isspace() else {}),
                    **{
                        nm: ch.text.strip() if ch.text else None
                        for nm, ch in zip(self.names, el.xpath("*"))
                    },
                } for el in elems]
            else:
                dicts = [{
                    **el.attrib,
                    **({
                        el.tag: el.text.strip()
                    } if el.text and not el.text.isspace() else {}),
                    **{
                        ch.tag: ch.text.strip() if ch.text else None
                        for ch in el.xpath("*")
                    },
                } for el in elems]

        if self.namespaces or "}" in list(dicts[0].keys())[0]:
            dicts = [{
                k.split("}")[1] if "}" in k else k: v
                for k, v in d.items()
            } for d in dicts]

        keys = list(dict.fromkeys([k for d in dicts for k in d.keys()]))
        dicts = [{k: d[k] if k in d.keys() else None
                  for k in keys} for d in dicts]

        if self.names:
            dicts = [{nm: v
                      for nm, (k, v) in zip(self.names, d.items())}
                     for d in dicts]

        return dicts

    def _validate_path(self) -> None:

        msg = ("xpath does not return any nodes. "
               "Be sure row level nodes are in xpath. "
               "If document uses namespaces denoted with "
               "xmlns, be sure to define namespaces and "
               "use them in xpath.")

        elems = self.xml_doc.xpath(self.xpath, namespaces=self.namespaces)
        children = self.xml_doc.xpath(self.xpath + "/*",
                                      namespaces=self.namespaces)
        attrs = self.xml_doc.xpath(self.xpath + "/@*",
                                   namespaces=self.namespaces)

        if elems == []:
            raise ValueError(msg)

        if elems != [] and attrs == [] and children == []:
            raise ValueError(msg)

    def _validate_names(self) -> None:
        """
        Validate names.

        This method will check if names is a list and aligns with
        length of parse nodes.

        Raises
        ------
        ValueError
            * If value is not a list and less then length of nodes.
        """
        if self.names:
            children = self.xml_doc.xpath(self.xpath + "[1]/*",
                                          namespaces=self.namespaces)

            if is_list_like(self.names):
                if len(self.names) < len(children):
                    raise ValueError(
                        "names does not match length of child elements in xpath."
                    )
            else:
                raise TypeError(
                    f"{type(self.names).__name__} is not a valid type for names"
                )

    def _parse_doc(self, raw_doc) -> bytes:
        from lxml.etree import (
            XMLParser,
            fromstring,
            parse,
            tostring,
        )

        handle_data = get_data_from_filepath(
            filepath_or_buffer=raw_doc,
            encoding=self.encoding,
            compression=self.compression,
            storage_options=self.storage_options,
        )

        with preprocess_data(handle_data) as xml_data:
            curr_parser = XMLParser(encoding=self.encoding)

            if isinstance(xml_data, io.StringIO):
                doc = fromstring(xml_data.getvalue().encode(self.encoding),
                                 parser=curr_parser)
            else:
                doc = parse(xml_data, parser=curr_parser)

        return tostring(doc)

    def _transform_doc(self) -> bytes:
        """
        Transform original tree using stylesheet.

        This method will transform original xml using XSLT script into
        am ideally flatter xml document for easier parsing and migration
        to Data Frame.
        """
        from lxml.etree import XSLT

        transformer = XSLT(self.xsl_doc)
        new_doc = transformer(self.xml_doc)

        return bytes(new_doc)
Exemple #41
0
    def updatequotewithparameters(self, quote_id, ExternalReference,
                                  Grossvalue, netvalue, postingDate, RefDate,
                                  SoldToParty, SoldToPartyAdd, Status, TaxAmt,
                                  ValidFrm, ValidTo):
        logging.info("SAP is sending quote with more parameters")
        logging.info(locals())
        logging.info("CONNECTING TO SALESFORCE PARTNER WSDL FOR SESSION ID")
        url = "https://login.salesforce.com/services/Soap/u/28.0"

        data = """<?xml version="1.0" encoding="UTF-8"?>
                <soapenv:Envelope xmlns:soapenv="http://schemas.xmlsoap.org/soap/envelope/" xmlns:urn="urn:partner.soap.sforce.com">
                   <soapenv:Header>
                        <urn:CallOptions>
                            <urn:client></urn:client>
                            <urn:defaultNamespace></urn:defaultNamespace>
                        </urn:CallOptions>
                        <urn:LoginScopeHeader>
                            <urn:organizationId></urn:organizationId>
                            <urn:portalId></urn:portalId>
                        </urn:LoginScopeHeader>
                   </soapenv:Header>
                   <soapenv:Body>
                      <urn:login>
                          <urn:username>{{username}}</urn:username>
                          <urn:password>{{password}}</urn:password>
                      </urn:login>
                   </soapenv:Body>
                </soapenv:Envelope>"""
        t = Template(data)
        c = Context({
            #            "username": "******",
            #            "password": "******"
            #            "username": "******",
            #            "password": "******"
            "username": "******",
            "password": "******"
        })
        data = t.render(c)

        logging.info("SENDING:")
        logging.info(data)

        headers = {
            'Content-Type': 'text/xml; charset=utf-8',
            'SOAPAction': 'https://login.salesforce.com/services/Soap/u/28.0'
        }
        #        httplib2.debuglevel = 1

        head = httplib2.Http()
        #    head.follow_all_redirects = True
        response, content = head.request(url, "POST", smart_str(data), headers)
        logging.info("########### SESSION ID response ###############%s" %
                     response)
        logging.info("########## SESSION ID content ############## \n %s" %
                     pretty(content))
        if response.get('status') == '200':
            logging.info("GOT THE SESSION ID FROM SALESFORCE")
            xml = XML(content)
            session_response = xml.find(
                "{http://schemas.xmlsoap.org/soap/envelope/}Body").getchildren(
                )[0]
            session_id = session_response[0][4].text
            quote_id_to_sf(session_id, quote_id, ExternalReference, Grossvalue,
                           netvalue, postingDate, RefDate, SoldToParty,
                           SoldToPartyAdd, Status, TaxAmt, ValidFrm, ValidTo)
        else:
            return content

        return "OK"
Exemple #42
0
def quote_id_to_sf(session_id, quote_id, ExternalReference, Grossvalue,
                   netvalue, postingDate, RefDate, SoldToParty, SoldToPartyAdd,
                   Status, TaxAmt, ValidFrm, ValidTo):
    logging.info(
        "############## CONNECTING TO SALESFORCE QUOTE WSDL ##############")
    url = "https://na15.salesforce.com/services/Soap/class/QuoteClass"

    data = """<?xml version="1.0" encoding="UTF-8"?>
    <soapenv:Envelope xmlns:soapenv="http://schemas.xmlsoap.org/soap/envelope/" xmlns:quot="http://soap.sforce.com/schemas/class/QuoteClass">
       <soapenv:Header>
          <quot:SessionHeader>
             <quot:sessionId>{{session_id}}</quot:sessionId>
          </quot:SessionHeader>
       </soapenv:Header>
       <soapenv:Body>
          <quot:insertQuote>
             <quot:quoteId>{{quote_id}}</quot:quoteId>
             <quot:ExternalReference>{{external}}</quot:ExternalReference>
             <quot:Grossvalue>{{gross}}</quot:Grossvalue>
             <quot:netvalue>{{netvalue}}</quot:netvalue>
             <quot:SoldToParty>{{SoldToParty}}</quot:SoldToParty>
             <quot:Status>{{Status}}</quot:Status>
             <quot:TaxAmt>{{TaxAmt}}</quot:TaxAmt>
             <quot:ValidFrm>{{ValidFrm}}</quot:ValidFrm>
             <quot:ValidTo>{{ValidTo}}</quot:ValidTo>
          </quot:insertQuote>
       </soapenv:Body>
    </soapenv:Envelope>"""
    t = Template(data)
    c = Context({
        "session_id": session_id,
        "quote_id": quote_id,
        "external": ExternalReference,
        "gross": Grossvalue if Grossvalue else 0.00,
        "netvalue": netvalue if netvalue else 0.00,
        "postingDate": postingDate,
        "RefDate": RefDate,
        "SoldToParty": SoldToParty,
        "SoldToPartyAdd": SoldToPartyAdd,
        "Status": Status,
        "TaxAmt": TaxAmt if TaxAmt else 0.00,
        "ValidFrm": ValidFrm,
        "ValidTo": ValidTo
    })
    data = t.render(c)

    logging.info("SENDING:")
    logging.info(data)

    headers = {
        'Content-Type': 'text/xml; charset=utf-8',
        'SOAPAction':
        'https://na15.salesforce.com/services/Soap/class/QuoteClass'
    }
    #        httplib2.debuglevel = 1

    head = httplib2.Http()
    #    head.follow_all_redirects = True
    response, content = head.request(url, "POST", smart_str(data), headers)
    logging.info("######################### QUOTE response ############## %s" %
                 response)
    logging.info(
        "###################### QUOTE content ################# \n%s" %
        pretty(content))
    if response.get('status') == "200":
        xml = XML(content)
        quote_response = xml.find(
            "{http://schemas.xmlsoap.org/soap/envelope/}Body").getchildren()[0]
        return
def main():
    """
    Main function to handle scripted usage of the module
    """
    # Parse command-line arguments
    args = parse_arguments(sys.argv[1:])

    # Validate that our args are safe to proceed
    validate_arguments(args)

    # Expand home directory alias in the credential file path
    args.aws_credential_file = os.path.expanduser(args.aws_credential_file)

    # Enable debug logging if set by user
    if args.debug:
        logging.basicConfig(level=logging.DEBUG)

    # Set SSL verification; User prompt is a negative assertion so we apply the inverse
    # session.verify = not args.no_verify_ssl

    # Set proxies for request to AWS STS service
    # First Get local proxy settings via env variables
    proxies = urllib.request.getproxies()

    # Then, Determine if in GE LAN &
    # Check if https_proxy was set as a local environment variable
    logging.debug('Proxy settings found in environment: {}'.format(proxies))
    try:
        # If in the GE LAN and the https proxy is not set
        if (socket.gethostbyname('sts.amazonaws.com')
                == '223.255.255.255') and ('https' not in proxies):
            logging.warning(
                "GE LAN detected and proxy missing, adding default proxy information"
            )

            # Set https_proxy
            print('Setting https_proxy to http://am.proxy.ge.com:80/.')
            proxies = {'https': 'http://am.proxy.ge.com:80/', 'no': '.ge.com'}
    except OSError as e:
        print(e)

        if e.errno == 8:
            print("The network is not routable. Please fix and try again.")

        raise SystemExit(1)

    # Create a handler & opener with the proxies set above
    proxy_handler = urllib.request.ProxyHandler(proxies)
    opener = urllib.request.build_opener(proxy_handler)

    # Prompt for missing SAML credentials
    while not args.username:
        args.username = input('Username: '******'Password: '******'utf8')
    logging.debug('Decoded IDP response: {}'.format(response_text))

    # Extract the SAML assertion
    try:
        saml_xml = XML(response_text)
        assertion = saml_xml.find(".//*[@name='SAMLResponse']").attrib['value']
        logging.debug('Decoded assertion:\n{}'.format(
            base64.b64decode(assertion)))
    except:
        # TODO: Improve error checking/handling
        print('Response did not contain a valid SAML assertion')
        raise SystemExit(1)

    # Parse the returned assertion and extract the authorized roles
    aws_roles = []
    assertion_xml = XML(base64.b64decode(assertion))

    for saml2attributevalue in assertion_xml.findall(
            ".//*[@Name='https://aws.amazon.com/SAML/Attributes/Role']/"):
        logging.debug(saml2attributevalue.text)
        aws_role = {}
        aws_role['role_arn'], aws_role[
            'principal_arn'] = saml2attributevalue.text.split(',')
        aws_role['name'] = aws_role['role_arn'].split('::')[1]
        aws_roles.append(aws_role)
    logging.debug(aws_roles)

    # If we're in interactive mode, have the user select the role from a list of available roles
    while not args.role:
        print('\nPlease choose the AWS account role you would like to assume:')
        for index, aws_role in enumerate(aws_roles, 1):
            print('[{}]: {}'.format(index, aws_role['name']))
        selection = input('Selection: ')

        if selection.isdigit():
            if 1 > int(selection) > len(aws_roles):
                print('\nInvalid Selection\n')
                continue
            else:
                args.role = aws_roles[int(selection) - 1]['name']
        else:
            print('\nSelection must be an integer')

    # Find the role specified by the user
    found_roles = [r for r in aws_roles if r['name'] == args.role]
    if len(found_roles) != 1:
        print(
            'Role "{}" not found. Run program in interactive mode for a list of available roles.'
            .format(args.role))
        raise SystemExit(1)
    aws_role = found_roles[0]

    # Use the SAML assertion to get an AWS token from STS
    sts_request = {
        'Action': 'AssumeRoleWithSAML',
        'Version': '2011-06-15',
        'RoleArn': aws_role['role_arn'],
        'PrincipalArn': aws_role['principal_arn'],
        'SAMLAssertion': assertion,
    }
    sts_request_data = urllib.parse.urlencode(sts_request)
    logging.debug(
        'STS Authentication request body: {}'.format(sts_request_data))
    sts_request_data = sts_request_data.encode('ascii')

    # Submit the authentication request
    try:
        with opener.open('https://sts.amazonaws.com/',
                         sts_request_data) as response:
            sts_response = response.read().decode('utf8')
    except HTTPError as e:
        print(e)
        if e.code == 403:
            print(
                'Try using the --mfa flag and provide your RSA token when prompted for a password.'
            )
        else:
            print('Failed to assume role with SAML assertion')
        raise SystemExit(1)
    logging.debug('AWS STS Response: {}'.format(sts_response))

    # Parse the STS response
    sts_response = XML(sts_response)

    # Read in the existing config file
    config = configparser.RawConfigParser()
    config.read(args.aws_credential_file)

    # Create the requested profile if it doesn't exist
    if not config.has_section(args.profile):
        config.add_section(args.profile)

    # Update the profile
    config.set(args.profile, 'output', args.cli_output_format)
    config.set(args.profile, 'region', args.region)
    config.set(
        args.profile, 'aws_access_key_id',
        sts_response.findtext(
            './/{https://sts.amazonaws.com/doc/2011-06-15/}AccessKeyId'))
    config.set(
        args.profile, 'aws_secret_access_key',
        sts_response.findtext(
            './/{https://sts.amazonaws.com/doc/2011-06-15/}SecretAccessKey'))
    config.set(
        args.profile, 'aws_session_token',
        sts_response.findtext(
            './/{https://sts.amazonaws.com/doc/2011-06-15/}SessionToken'))

    # Write the updated config file
    try:
        configfile = open(args.aws_credential_file, 'w+')
    except FileNotFoundError:
        os.makedirs(os.path.dirname(args.aws_credential_file))
        configfile = open(args.aws_credential_file, 'w+')
    config.write(configfile)
    configfile.close()
Exemple #44
0
def readfeedurl(feedurl, date=None):
    """
    Read feed url and concat feed items titles
    """
    date = date or datetime.date.today()
    # Get raw feed string from feed url
    try:
        r = requests.get(feedurl)
    except Exception as e:
        logger.error('Error reading feed url: %s' % feedurl)
        return ''

    # TODO: Check encoding...
    encoding = chardet.detect(r.content)['encoding']

    #print(encoding)
    #return
    if encoding != 'utf-8':
        r.encoding = 'latin-1'
    else:
        r.encoding = 'utf-8'
    # Parse raw feed string to xml
    try:
        tree = XML(r.text.strip())
    except ParseError as e:
        logger.error('Error reading feed: %s' % feedurl)
        return ''

    index = 0
    feedtext = ''
    printable = set(string.printable)

    # Read rss items
    for node in tree.iter('item'):

        # Limit taken items
        node_date = node.find('pubDate').text
        node_date_pieces = node_date.split(" ")

        node_date_pieces = [
            DAYS_MAP.get(piece, piece) for piece in node_date_pieces
        ]
        node_date_pieces = [
            MONTHS_MAP.get(piece, piece) for piece in node_date_pieces
        ]
        node_date = " ".join(node_date_pieces)

        try:
            parsed_date = parse(node_date)

        except:
            print(node_date)
            continue

        if str(parsed_date.date()) != str(date):
            continue

        #if not index < take:
        #    break

        # Get title text from the item node
        titletext = node.find('title').text.strip()

        # Remove shitty characters from jsp f*****g rss feeds...
        #titletext = ''.join(filter(lambda x: x in printable, titletext))

        feedtext += titletext + '\n'
        index += 1

    return feedtext
Exemple #45
0
 def query(self, path):
     element = XML(self.execute(path).encode("utf-8"))
     return element
Exemple #46
0
 def testBasic(self):
     p = create_path()
     config = ConfigXml(filename=str(p))
     config.close()
     self.assertXml(self.initial(), XML(p.text()))
Exemple #47
0
    raise SystemExit('Usage: nextbus.py route stopid')

route = sys.argv[1]
stopid = sys.argv[2]

u = urllib.request.urlopen(
    'http://ctabustracker.com/bustime/map/getStopPredictions.jsp?route={}&stop={}'
    .format(route, stopid))
data = u.read()
'''
b'<?xml version="1.0" encoding="UTF-8"?>
...
<pt>3 MIN</pt>
<pt>11 MIN</pt>
<pt>21 MIN</pt>
'''
# print(data)

doc = XML(data)

# import pdb; pdb.set_trace()    # Launch debugger

for pt in doc.findall('.//pt'):
    print(pt.text)
'''
Output:
3 MIN
16 MIN
24 MIN
'''
Exemple #48
0
 def testWithEnv(self):
     p = create_path()
     config = ConfigXml(filename=str(p), env_config="FOO")
     config.close()
     self.assertXml(self.initial("FOO"), XML(p.text()))
 def test_thresh_param(self):
     elem = XML('<param name="thresh" type="integer" value="5" />')
     return IntegerToolParameter(self, elem)
Exemple #50
0
 def make_tuples(xml):
     return [(deep_change, elt.tag)
             for (deep_change, elt) in _element_iterator(XML(xml))]
Exemple #51
0
def read_error_message(response):
    body = response.body.read()
    return body, XML(body).findtext("./messages/msg")
Exemple #52
0
    def get_nightly_binary_path(self, nightly_date):
        if nightly_date is None:
            return
        if not nightly_date:
            print(
                "No nightly date has been provided although the --nightly or -n flag has been passed.")
            sys.exit(1)
        # Will alow us to fetch the relevant builds from the nightly repository
        os_prefix = "linux"
        if is_windows():
            os_prefix = "windows-msvc"
        if is_macosx():
            os_prefix = "mac"
        nightly_date = nightly_date.strip()
        # Fetch the filename to download from the build list
        repository_index = NIGHTLY_REPOSITORY_URL + "?list-type=2&prefix=nightly"
        req = urllib2.Request(
            "{}/{}/{}".format(repository_index, os_prefix, nightly_date))
        try:
            response = urllib2.urlopen(req).read()
            tree = XML(response)
            namespaces = {'ns': tree.tag[1:tree.tag.index('}')]}
            file_to_download = tree.find('ns:Contents', namespaces).find(
                'ns:Key', namespaces).text
        except urllib2.URLError as e:
            print("Could not fetch the available nightly versions from the repository : {}".format(
                e.reason))
            sys.exit(1)
        except AttributeError as e:
            print("Could not fetch a nightly version for date {} and platform {}".format(
                nightly_date, os_prefix))
            sys.exit(1)

        nightly_target_directory = path.join(self.context.topdir, "target")
        # ':' is not an authorized character for a file name on Windows
        # make sure the OS specific separator is used
        target_file_path = file_to_download.replace(':', '-').split('/')
        destination_file = os.path.join(
            nightly_target_directory, os.path.join(*target_file_path))
        # Once extracted, the nightly folder name is the tar name without the extension
        # (eg /foo/bar/baz.tar.gz extracts to /foo/bar/baz)
        destination_folder = os.path.splitext(destination_file)[0]
        nightlies_folder = path.join(
            nightly_target_directory, 'nightly', os_prefix)

        # Make sure the target directory exists
        if not os.path.isdir(nightlies_folder):
            print("The nightly folder for the target does not exist yet. Creating {}".format(
                nightlies_folder))
            os.makedirs(nightlies_folder)

        # Download the nightly version
        if os.path.isfile(path.join(nightlies_folder, destination_file)):
            print("The nightly file {} has already been downloaded.".format(
                destination_file))
        else:
            print("The nightly {} does not exist yet, downloading it.".format(
                destination_file))
            download_file(destination_file, NIGHTLY_REPOSITORY_URL +
                          file_to_download, destination_file)

        # Extract the downloaded nightly version
        if os.path.isdir(destination_folder):
            print("The nightly folder {} has already been extracted.".format(
                destination_folder))
        else:
            self.extract_nightly(nightlies_folder, destination_folder, destination_file)

        return self.get_executable(destination_folder)
Exemple #53
0
 def unzip_theme(self, file_name, directory):
     """
     Unzip the theme, remove the preview file if stored. Generate a new preview file. Check the XML theme version
     and upgrade if necessary.
     :param file_name:
     :param directory:
     """
     self.log_debug('Unzipping theme %s' % file_name)
     theme_zip = None
     out_file = None
     file_xml = None
     abort_import = True
     try:
         theme_zip = zipfile.ZipFile(file_name)
         xml_file = [
             name for name in theme_zip.namelist()
             if os.path.splitext(name)[1].lower() == '.xml'
         ]
         if len(xml_file) != 1:
             self.log_error('Theme contains "%s" XML files' % len(xml_file))
             raise ValidationError
         xml_tree = ElementTree(
             element=XML(theme_zip.read(xml_file[0]))).getroot()
         theme_version = xml_tree.get('version', default=None)
         if not theme_version or float(theme_version) < 2.0:
             self.log_error('Theme version is less than 2.0')
             raise ValidationError
         theme_name = xml_tree.find('name').text.strip()
         theme_folder = os.path.join(directory, theme_name)
         theme_exists = os.path.exists(theme_folder)
         if theme_exists and not self.over_write_message_box(theme_name):
             abort_import = True
             return
         else:
             abort_import = False
         for name in theme_zip.namelist():
             out_name = name.replace('/', os.path.sep)
             split_name = out_name.split(os.path.sep)
             if split_name[-1] == '' or len(split_name) == 1:
                 # is directory or preview file
                 continue
             full_name = os.path.join(directory, out_name)
             check_directory_exists(os.path.dirname(full_name))
             if os.path.splitext(name)[1].lower() == '.xml':
                 file_xml = str(theme_zip.read(name), 'utf-8')
                 out_file = open(full_name, 'w', encoding='utf-8')
                 out_file.write(file_xml)
             else:
                 out_file = open(full_name, 'wb')
                 out_file.write(theme_zip.read(name))
             out_file.close()
     except (IOError, zipfile.BadZipfile):
         self.log_exception('Importing theme from zip failed %s' %
                            file_name)
         raise ValidationError
     except ValidationError:
         critical_error_message_box(
             translate('OpenLP.ThemeManager', 'Validation Error'),
             translate('OpenLP.ThemeManager', 'File is not a valid theme.'))
     finally:
         # Close the files, to be able to continue creating the theme.
         if theme_zip:
             theme_zip.close()
         if out_file:
             out_file.close()
         if not abort_import:
             # As all files are closed, we can create the Theme.
             if file_xml:
                 theme = self._create_theme_from_xml(file_xml, self.path)
                 self.generate_and_save_image(theme_name, theme)
             # Only show the error message, when IOError was not raised (in
             # this case the error message has already been shown).
             elif theme_zip is not None:
                 critical_error_message_box(
                     translate('OpenLP.ThemeManager', 'Validation Error'),
                     translate('OpenLP.ThemeManager',
                               'File is not a valid theme.'))
                 self.log_error('Theme file does not contain XML data %s' %
                                file_name)
Exemple #54
0
    def list_pd(self):
        """
        <?xml version="1.0" standalone="yes" ?>
        <SymCLI_ML>
          <Inquiry>
            <Dev_Info>
              <pd_name>/dev/sdb</pd_name>
              <dev_name>000F1</dev_name>
              <symid>000196801561</symid>
              <dev_ident_name>V_TOOLSDL360S24</dev_ident_name>
            </Dev_Info>
            <Product>
              <vendor>EMC</vendor>
            </Product>
          </Inquiry>
        """
        inq = {}
        cmd = ["syminq", "-identifier", "device_name", "-output", "xml_e"]
        (ret, out, err) = self.call(cmd)
        if ret != 0:
            raise ex.Error("Failed to run command %s" % ' '.join(cmd))
        xml = XML(out)
        for e in xml.findall("Inquiry/Dev_Info"):
            pd_name = e.find("pd_name").text
            dev_name = e.find("dev_name").text
            if dev_name not in inq:
                inq[dev_name] = []
            inq[dev_name].append(pd_name)
        """
        <?xml version="1.0" standalone="yes" ?>
        <SymCLI_ML>
          <DG>
            <Device>
              <Dev_Info>
                <dev_name>003AD</dev_name>
                <configuration>RDF1+TDEV</configuration>
                <ld_name>DEV001</ld_name>
                <status>Ready</status>
              </Dev_Info>
              <Front_End>
                <Port>
                  <pd_name>/dev/sdq</pd_name>
                  <director>07E</director>
                  <port>1</port>
                </Port>
              </Front_End>
            </Device>
          </DG>
        </SymCLI_ML>
        """
        cmd = [
            'symdg', '-g', self.symdg, 'list', 'ld', '-output', 'xml_e', '-i',
            '15', '-c', '4'
        ]
        (ret, out, err) = self.call(cmd)
        if ret != 0:
            raise ex.Error("Failed to run command %s" % ' '.join(cmd))
        devs = []
        xml = XML(out)
        for e in xml.findall("DG/Device/Dev_Info"):
            dev_name = e.find("dev_name").text
            if dev_name in inq:
                devs += inq[dev_name]

        return devs
Exemple #55
0
def generate_urdf(parts_num, save_ind):
    robot = urdf_obj(parts_num, [1, 1, 1])
    num   = robot.link_num
    root = Element('robot', name="block")

    links_name = ["base_link"] + [str(i+1) for i in range(num)]
    links_h_raw  = np.random.rand(num)
    links_w      = [2, 1.5]
    links_h_raw = links_h_raw / sum(links_h_raw) * 0.3
    links_h_raw[::-1].sort()
    links_h = links_h_raw
    # links_h = np.flip(links_h_raw)
    all_kinds_shape = ["box", "cylinder"]
    links_shape = [all_kinds_shape[randint(0, 0)] for i in range(num)]
    joints_name = ["{}_j_{}".format(i, i+1) for i in range(num-1)]
    all_kinds_joints = ["revolute", "fixed", "prismatic", "continuous", "planar"]
    joints_type = [all_kinds_joints[randint(0, 0)] for i in range(num-1)]
    #
    children = [
        Element('link', name=links_name[i])
        for i in range(num)
        ]
    joints = [
        Element('joint', name=joints_name[i], type=joints_type[i])
        for i in range(num-1)
        ]
    # add inertial component
    node_inertial = XML('''<inertial><origin rpy="0 0 0" xyz="0 0 0"/><mass value="1.0"/><inertia ixx="0.9" ixy="0.9" ixz="0.9" iyy="0.9" iyz="0" izz="0.9"/></inertial>''')
    # add color components
    mat_blue      = SubElement(root, 'material', name="blue")
    color_blue    = SubElement(mat_blue, "color", rgba="0 0 0.8 1")
    mat_black     = SubElement(root, 'material', name="black")
    color_blue    = SubElement(mat_black, "color", rgba="0 0 0 1")
    mat_white     = SubElement(root, 'material', name="white")
    color_white   = SubElement(mat_white, "color", rgba="1 1 1 1")
    material_lib  =['color_blue', 'color_white']
    colors_val  = ["1 1 0", "1 0 1", "0 1 1", "1 0 0", "0 1 0", "0 0 1"]
    colors_name = ["yellow", "magenta", "cyan", "red", "green", "blue"]
    for i in range(len(colors_val)):
        mat_any        = SubElement(root, 'material', name=colors_name[i])
        color_any      = SubElement(mat_any, "color", rgba="{} 1".format(colors_val[i]))
        material_lib.append(colors_name[i])
    random.shuffle(material_lib)
    #>>>>>>>>>>>>>>>>> links properties
    for i in range(num):
        if i == 0:
            visual   = SubElement(children[i], 'visual')
            origin   = SubElement(visual, 'origin', rpy="0.0 0 0", xyz="0 0 {}".format(0))
            geometry = SubElement(visual, 'geometry')
            if links_shape[i] == "cylinder":
                shape    = SubElement(geometry, 'cylinder', length=str(links_h[i]), radius=str(links_h[i] / 4))
            elif links_shape[i] == "box":
                shape    = SubElement(geometry, 'box', size="{} {} {}".format(links_w[0], links_w[1], links_h[i]))
            material = SubElement(visual, 'material', name=material_lib[i])
        else:
            visual = [Element('visual') for j in range(2)]
            # visual for link 
            origin   = SubElement(visual[0], 'origin', rpy="0.0 0 0", xyz="0 {} {}".format(links_w[1]/2, 0)) #links_h[i]/2
            geometry = SubElement(visual[0], 'geometry')
            if links_shape[i] == "cylinder":
                shape    = SubElement(geometry, 'cylinder', length=str(links_h[i]), radius=str(links_h[i] / 4))
            elif links_shape[i] == "box":
                shape    = SubElement(geometry, 'box', size="{} {} {}".format(links_w[0], links_w[1], links_h[i]))
            material = SubElement(visual[0], 'material', name=material_lib[i])
            # visual for joint
            origin_joint    = SubElement(visual[1], 'origin', rpy="0.0 1.5707 0", xyz="0 0 0")
            geometry_joint  = SubElement(visual[1], 'geometry')
            shape_joint     = SubElement(geometry_joint, 'cylinder', length=str(links_w[0]), radius="{}".format(links_h[i]/4))
            material_joint  = SubElement(visual[1], 'material', name=material_lib[i])
            children[i].extend(visual)

        inertial = SubElement(children[i], 'inertial')
        node_inertial = XML('''<inertial><origin rpy="0 0 0" xyz="0 0 0"/><mass value="1.0"/><inertia ixx="0.9" ixy="0.9" ixz="0.9" iyy="0.9" iyz="0" izz="0.9"/></inertial>''')
        inertial.extend(node_inertial)
        if i == 0:
            for mass in inertial.iter('mass'):
                mass.set('value', "0.0")
            for inertia in inertial.iter('inertia'):
                inertia.set('ixx', "0.0")
                inertia.set('ixy', "0.0")
                inertia.set('ixz', "0.0")
                inertia.set('iyy', "0.0")
                inertia.set('iyz', "0.0")
                inertia.set('izz', "0.0")
    #>>>>>>>>>>>>>> joint features
    for i in range(num-1):
        parent = SubElement(joints[i], "parent", link=links_name[i])
        child  = SubElement(joints[i], "child",  link=links_name[i+1])
        if i==0:
            if links_shape[i] == "box":
                origin = SubElement(joints[i], "origin", xyz="0 {} {}".format(links_w[1]/2, (links_h[i])/2 ), rpy="0 0 0")
            elif links_shape[i] == "cylinder":
                origin = SubElement(joints[i], "origin", xyz="0 0 {}".format((links_h[i])/2 - 0.005), rpy="0 0 0")
        else:
            if links_shape[i] == "box":
                origin = SubElement(joints[i], "origin", xyz="0 {} {}".format(links_w[1], links_h[i]), rpy="0 0 0")
            elif links_shape[i] == "cylinder":
                origin = SubElement(joints[i], "origin", xyz="0 0 {}".format((links_h[i])/2- 0.005), rpy="0 0 0")
        if joints_type[i] == "revolute":
            axis  = SubElement(joints[i], "axis", xyz="1 0 0")
            limit = SubElement(joints[i], "limit", effort="1000.0", lower="-3.14", upper="3.14", velocity="0.5")
    # extend from list with different names
    root.extend(children)
    root.extend(joints)
    xml_string = xml.dom.minidom.parseString(tostring(root))
    xml_pretty_str = xml_string.toprettyxml()
    # print(xml_pretty_str)
    tree = ET.ElementTree(root)
    save_dir = '/Users/DragonX/Downloads/ARC/DATA/{:04d}'.format(save_ind)
    if not os.path.exists(save_dir):
        os.makedirs(save_dir)
    with open(save_dir + '/syn.urdf', "w") as f:
        f.write(xml_pretty_str)
    # Create a copy
    for i in range(num):
        member_part = copy.deepcopy(root)
        # remove all visual nodes directly
        for link in member_part.findall('link'):
            if link.attrib['name']!=links_name[i]:
                for visual in link.findall('visual'):
                    link.remove(visual)

        xml_string = xml.dom.minidom.parseString(tostring(member_part))
        xml_pretty_str = xml_string.toprettyxml()
        tree = ET.ElementTree(member_part)
        with open(save_dir + '/syn_p{}.urdf'.format(i), "w") as f:
            f.write(xml_pretty_str)
    #>>>>>>>>>>> modify each components here to get multiple separate URDF files
    # we could keep all the joints, but every file should only keep one visual features
    # tree.write(open('./data/example.urdf', 'w'), encoding='unicode')
    # >>>>>>> only for debug use <<<<<<<<<< #
    print("links_h: ", links_h)
    print("links_h_raw: ", links_h_raw)
Exemple #56
0
 def get_dg_rdf_type(self):
     rdf_query = self.rdf_query
     self.xmldg = XML(rdf_query)
     rdftype = self.xmldg.find('DG/DG_Info/type').text
     return rdftype
def printXml(text):
    xml = XML(text)

    print "%s\t%.2f\t%.2f\t%s" % (xml.get("name"), eval(
        xml.find("price").text), eval(xml.find("offer").text), "up"
                                  if xml.find("up").text == "True" else "down")
Exemple #58
0
class SyncSymsrdfs(Sync):
    def __init__(self,
                 symid=None,
                 symdg=None,
                 rdfg=None,
                 symdevs=None,
                 precopy_timeout=300,
                 **kwargs):
        super(SyncSymsrdfs, self).__init__(type="sync.symsrdfs", **kwargs)

        if symdevs is None:
            symdevs = []
        self.pausable = False
        self.label = "srdf/s symdg %s" % (symdg)
        self.symid = symid

        self.symdg = symdg
        self.rdfg = rdfg
        self.symdevs = symdevs
        self.precopy_timeout = precopy_timeout
        self.symdev = {}
        self.pdevs = {}
        self.svcstatus = {}
        self.symld = {}
        self.pairs = []
        self._pairs = []
        self.active_pairs = []
        self.last = None

    def __str__(self):
        return "%s symdg=%s symdevs=%s rdfg=%s" % (super(
            SyncSymsrdfs, self).__str__(), self.symdg, self.symdevs, self.rdfg)

    def list_pd(self):
        """
        <?xml version="1.0" standalone="yes" ?>
        <SymCLI_ML>
          <Inquiry>
            <Dev_Info>
              <pd_name>/dev/sdb</pd_name>
              <dev_name>000F1</dev_name>
              <symid>000196801561</symid>
              <dev_ident_name>V_TOOLSDL360S24</dev_ident_name>
            </Dev_Info>
            <Product>
              <vendor>EMC</vendor>
            </Product>
          </Inquiry>
        """
        inq = {}
        cmd = ["syminq", "-identifier", "device_name", "-output", "xml_e"]
        (ret, out, err) = self.call(cmd)
        if ret != 0:
            raise ex.Error("Failed to run command %s" % ' '.join(cmd))
        xml = XML(out)
        for e in xml.findall("Inquiry/Dev_Info"):
            pd_name = e.find("pd_name").text
            dev_name = e.find("dev_name").text
            if dev_name not in inq:
                inq[dev_name] = []
            inq[dev_name].append(pd_name)
        """
        <?xml version="1.0" standalone="yes" ?>
        <SymCLI_ML>
          <DG>
            <Device>
              <Dev_Info>
                <dev_name>003AD</dev_name>
                <configuration>RDF1+TDEV</configuration>
                <ld_name>DEV001</ld_name>
                <status>Ready</status>
              </Dev_Info>
              <Front_End>
                <Port>
                  <pd_name>/dev/sdq</pd_name>
                  <director>07E</director>
                  <port>1</port>
                </Port>
              </Front_End>
            </Device>
          </DG>
        </SymCLI_ML>
        """
        cmd = [
            'symdg', '-g', self.symdg, 'list', 'ld', '-output', 'xml_e', '-i',
            '15', '-c', '4'
        ]
        (ret, out, err) = self.call(cmd)
        if ret != 0:
            raise ex.Error("Failed to run command %s" % ' '.join(cmd))
        devs = []
        xml = XML(out)
        for e in xml.findall("DG/Device/Dev_Info"):
            dev_name = e.find("dev_name").text
            if dev_name in inq:
                devs += inq[dev_name]

        return devs

    def promote_devs_rw(self):
        if Env.sysname != "Linux":
            return
        devs = [
            d for d in self.list_pd()
            if d.startswith("/dev/mapper/") or d.startswith("/dev/rdsk/")
        ]
        for dev in devs:
            self.promote_dev_rw(dev)

    def promote_dev_rw(self, dev):
        utilities.devices.linux.promote_dev_rw(dev, log=self.log)

    def get_symid_from_export(self, cf):
        with open(cf, 'r') as f:
            buff = f.read()
        return buff.split("\n")[0].split()[-1]

    def postsync(self):
        local_export_symid = self.get_symid_from_export(self.dgfile_local_name)
        if local_export_symid == self.symid:
            return self.do_dgimport(self.dgfile_local_name)
        remote_export_symid = self.get_symid_from_export(self.dgfile_rdf_name)
        if remote_export_symid == self.symid:
            self.do_dgimport(self.dgfile_rdf_name)

    def presync(self):
        s = self.svc.group_status(
            excluded_groups=set(["app", "sync", "task", "disk.scsireserv"]))
        if self.svc.options.force or s['avail'].status == core.status.UP:
            self.do_rdf_dgexport()
            self.do_local_dgexport()
            self.do_dg_wwn_map()

    def files_to_sync(self):
        return [
            self.dgfile_rdf_name,
            self.dgfile_local_name,
            self.wwn_map_fpath,
        ]

    @lazy
    def wwn_map_fpath(self):
        return os.path.join(self.var_d, "wwn_map")

    def do_dg_wwn_map(self):
        devs = []
        with open(self.dgfile_local_name, "r") as filep:
            for line in filep.readlines():
                if "DEV" not in line:
                    continue
                devs.append(line.split()[1])
        cmd = [
            "/usr/symcli/bin/symdev", "list", "-output", "xml_e", "-sid",
            self.symid, "-devs", ",".join(devs), "-v"
        ]
        ret, out, err = self.call(cmd)
        if ret != 0:
            raise ex.Error("Failed to run command %s" % ' '.join(cmd))
        tree = XML(out)
        mapping = []
        for dev in tree.findall("Symmetrix/Device"):
            try:
                local = dev.find('Product/wwn').text
                remote = dev.find('RDF/Remote/wwn').text
            except Exception as exc:
                self.log.warning(str(exc))
            else:
                mapping.append((local, remote))
        with open(self.wwn_map_fpath, 'w') as filep:
            json.dump(mapping, filep)
            filep.write("\n")

    def do_local_dgexport(self, fpath=None):
        if fpath is None:
            fpath = self.dgfile_local_name
        try:
            os.unlink(fpath)
        except:
            pass
        cmd = [
            '/usr/symcli/bin/symdg', 'export', self.symdg, '-f', fpath, '-i',
            '15', '-c', '4'
        ]
        (ret, out, err) = self.call(cmd)
        if ret != 0:
            raise ex.Error("Failed to run command %s" % ' '.join(cmd))
        return out

    def do_rdf_dgexport(self):
        fpath = self.dgfile_rdf_name
        try:
            os.unlink(fpath)
        except:
            pass
        cmd = [
            '/usr/symcli/bin/symdg', 'export', self.symdg, '-f', fpath, '-rdf',
            '-i', '15', '-c', '4'
        ]
        (ret, out, err) = self.call(cmd)
        if ret != 0:
            raise ex.Error("Failed to run command %s" % ' '.join(cmd))
        return out

    def do_dgremove(self):
        cmd = [
            '/usr/symcli/bin/symdg', 'delete', self.symdg, '-force', '-i',
            '15', '-c', '4'
        ]
        (ret, out, err) = self.vcall(cmd)
        if ret != 0:
            raise ex.Error("Failed to run command %s" % ' '.join(cmd))
        return out

    def is_dgimport_needed(self):
        self.do_local_dgexport(fpath=self.dgfile_tmp_local_name)
        import filecmp
        if filecmp.cmp(self.dgfile_tmp_local_name,
                       self.dgfile_rdf_name,
                       shallow=False):
            return False
        return True

    def do_dgimport(self, ef):
        if self.symdg in self.get_dg_list():
            if not self.is_dgimport_needed():
                self.log.info("symrdf dg %s is already up to date" %
                              self.symdg)
                return
            else:
                self.do_dgremove()
        self.log.info("symrdf dg %s will be imported from file" % self.symdg)
        cmd = ['symdg', 'import', self.symdg, '-f', ef, '-i', '15', '-c', '4']
        (ret, out, err) = self.vcall(cmd)
        if ret != 0:
            raise ex.Error("Failed to run command %s" % ' '.join(cmd))
        return out

    @lazy
    def dgfile_tmp_local_name(self):
        return os.path.join(self.var_d,
                            'symrdf_' + self.symdg + '.dg.tmp.local')

    @lazy
    def dgfile_local_name(self):
        return os.path.join(self.var_d, 'symrdf_' + self.symdg + '.dg.local')

    @lazy
    def dgfile_rdf_name(self):
        return os.path.join(self.var_d, 'symrdf_' + self.symdg + '.dg.rdf')

    def flush_cache(self):
        self.unset_lazy("rdf_query")

    def get_symdevs(self):
        for symdev in self.symdevs:
            l = symdev.split(':')
            if len(l) != 2:
                self.log.error("symdevs must be in symid:symdev ... format")
                raise ex.Error
            self.symdev[l[0], l[1]] = dict(symid=l[0], symdev=l[1])

    @lazy
    def rdf_query(self):
        cmd = [
            '/usr/symcli/bin/symrdf', '-g', self.symdg, '-rdfg',
            str(self.rdfg), 'query', '-output', 'xml_e'
        ]
        ret, out, err = self.call(cmd)
        if ret != 0:
            raise ex.Error("Failed to run command %s" % ' '.join(cmd))
        return out

    def dg_query(self):
        cmd = [
            '/usr/symcli/bin/symdg', 'list', '-output', 'xml_e', '-i', '15',
            '-c', '4'
        ]
        (ret, out, err) = self.call(cmd)
        if ret != 0:
            raise ex.Error("Failed to run command %s" % ' '.join(cmd))
        return out

    # browse local device groups and build dict with list
    def get_dg_list(self):
        try:
            rdf_query = self.dg_query()
        except:
            return {}
        self.xmldg = XML(rdf_query)
        self.dglist = {}
        for dg in self.xmldg.findall("DG/DG_Info"):
            name = dg.find('name').text
            self.dglist[name] = None
        return self.dglist

    def get_dg_rdf_type(self):
        rdf_query = self.rdf_query
        self.xmldg = XML(rdf_query)
        rdftype = self.xmldg.find('DG/DG_Info/type').text
        return rdftype

    def is_rdf1_dg(self):
        if self.get_dg_rdf_type() == "RDF1":
            return True
        return False

    def is_rdf2_dg(self):
        if self.get_dg_rdf_type() == "RDF2":
            return True
        return False

    def is_rdf21_dg(self):
        if self.get_dg_rdf_type() == "RDF21":
            return True
        return False

    def get_dg_state(self):
        h = {}
        for pair in self.xmldg.findall("DG/RDF_Pair"):
            mode = pair.find('mode').text
            state = pair.find('pair_state').text
            key = mode + "/" + state
            h[key] = None
        if len(h) == 1:
            retmsg = list(h.keys())[0]
        else:
            retmsg = "mixed srdf pairs state"
        return retmsg

    def get_rdfpairs_from_dg(self):
        cmd = [
            'symrdf', '-g', self.symdg, '-rdfg',
            str(self.rdfg), 'query', '-output', 'xml_e'
        ]
        (ret, out, err) = self.call(cmd)
        if ret != 0:
            raise ex.Error

        self.rdfpairs = {}  # remote_symm;remote_dev;rdfg
        self.xmldg = XML(out)

        for pair in self.xmldg.findall("DG/RDF_Pair"):
            source = pair.find('Source/dev_name').text
            target = pair.find('Target/dev_name').text
            self.rdfpairs[source] = target
        self.log.debug("rdfpairs from dg %s", str(self.rdfpairs))

    def is_synchronous_mode(self):
        cmd = [
            '/usr/symcli/bin/symrdf', '-g', self.symdg, '-rdfg',
            str(self.rdfg), 'verify', '-synchronous', '-i', '15', '-c', '4'
        ]
        (ret, out, err) = self.call(cmd)
        if ret == 0:
            return True
        return False

    def is_asynchronous_mode(self):
        cmd = [
            '/usr/symcli/bin/symrdf', '-g', self.symdg, '-rdfg',
            str(self.rdfg), 'verify', '-asynchronous', '-i', '15', '-c', '4'
        ]
        (ret, out, err) = self.call(cmd)
        if ret == 0:
            return True
        return False

    def is_acp_disk_mode(self):
        cmd = [
            '/usr/symcli/bin/symrdf', '-g', self.symdg, '-rdfg',
            str(self.rdfg), 'verify', '-acp_disk', '-i', '15', '-c', '4'
        ]
        (ret, out, err) = self.call(cmd)
        if ret == 0:
            return True
        return False

    def is_synchronized_state(self):
        cmd = [
            '/usr/symcli/bin/symrdf', '-g', self.symdg, '-rdfg',
            str(self.rdfg), 'verify', '-synchronized', '-i', '15', '-c', '4'
        ]
        (ret, out, err) = self.call(cmd)
        if ret == 0:
            return True
        return False

    def is_synchronous_and_synchronized_state(self):
        cmd = [
            '/usr/symcli/bin/symrdf', '-g', self.symdg, '-rdfg',
            str(self.rdfg), 'verify', '-synchronous', '-synchronized', '-i',
            '15', '-c', '4'
        ]
        (ret, out, err) = self.call(cmd)
        if ret == 0:
            return True
        return False

    def is_syncinprog_state(self):
        cmd = [
            '/usr/symcli/bin/symrdf', '-g', self.symdg, '-rdfg',
            str(self.rdfg), 'verify', '-syncinprog', '-i', '15', '-c', '4'
        ]
        (ret, out, err) = self.call(cmd)
        if ret == 0:
            return True
        return False

    def is_suspend_state(self):
        cmd = [
            '/usr/symcli/bin/symrdf', '-g', self.symdg, '-rdfg',
            str(self.rdfg), 'verify', '-suspended', '-i', '15', '-c', '4'
        ]
        (ret, out, err) = self.call(cmd)
        if ret == 0:
            return True
        return False

    def is_split_state(self):
        cmd = [
            '/usr/symcli/bin/symrdf', '-g', self.symdg, '-rdfg',
            str(self.rdfg), 'verify', '-split', '-i', '15', '-c', '4'
        ]
        (ret, out, err) = self.call(cmd)
        if ret == 0:
            return True
        return False

    def is_failedover_state(self):
        cmd = [
            '/usr/symcli/bin/symrdf', '-g', self.symdg, '-rdfg',
            str(self.rdfg), 'verify', '-failedover', '-i', '15', '-c', '4'
        ]
        (ret, out, err) = self.call(cmd)
        if ret == 0:
            return True
        return False

    def is_partitioned_state(self):
        cmd = [
            '/usr/symcli/bin/symrdf', '-g', self.symdg, '-rdfg',
            str(self.rdfg), 'verify', '-partitioned', '-i', '15', '-c', '4'
        ]
        (ret, out, err) = self.call(cmd)
        if ret == 0:
            return True
        return False

    # SRDF/A expected state is consistent AND enabled
    def is_consistent_state(self):
        cmd = [
            '/usr/symcli/bin/symrdf', '-g', self.symdg, '-rdfg',
            str(self.rdfg), 'verify', '-consistent', '-i', '15', '-c', '4'
        ]
        (ret, out, err) = self.call(cmd)
        if ret == 0:
            return True
        return False

    def is_enabled_state(self):
        cmd = [
            '/usr/symcli/bin/symrdf', '-g', self.symdg, '-rdfg',
            str(self.rdfg), 'verify', '-enabled', '-i', '15', '-c', '4'
        ]
        (ret, out, err) = self.call(cmd)
        if ret == 0:
            return True
        return False

    def can_sync(self, target=None):
        return True

    def resume(self):
        cmd = [
            '/usr/symcli/bin/symrdf', '-g', self.symdg, '-rdfg',
            str(self.rdfg), '-noprompt', 'resume', '-i', '15', '-c', '4'
        ]
        (ret, out, err) = self.vcall(cmd)
        if ret != 0:
            raise ex.Error("Failed to run command %s" % ' '.join(cmd))
        self.flush_cache()

    def suspend(self):
        cmd = [
            '/usr/symcli/bin/symrdf', '-g', self.symdg, '-rdfg',
            str(self.rdfg), '-noprompt', 'suspend', '-i', '15', '-c', '4'
        ]
        (ret, out, err) = self.vcall(cmd)
        if ret != 0:
            raise ex.Error("Failed to run command %s" % ' '.join(cmd))
        self.flush_cache()

    def establish(self):
        cmd = [
            '/usr/symcli/bin/symrdf', '-g', self.symdg, '-rdfg',
            str(self.rdfg), '-noprompt', 'establish', '-i', '15', '-c', '4'
        ]
        (ret, out, err) = self.vcall(cmd)
        if ret != 0:
            raise ex.Error("Failed to run command %s" % ' '.join(cmd))
        self.flush_cache()

    def failover(self):
        cmd = [
            '/usr/symcli/bin/symrdf', '-g', self.symdg, '-rdfg',
            str(self.rdfg), '-noprompt', 'failover', '-i', '15', '-c', '4'
        ]
        (ret, out, err) = self.vcall(cmd)
        if ret != 0:
            raise ex.Error("Failed to run command %s" % ' '.join(cmd))
        self.flush_cache()

    def failoverestablish(self):
        cmd = [
            '/usr/symcli/bin/symrdf', '-g', self.symdg, '-rdfg',
            str(self.rdfg), '-noprompt', 'failover', '-establish', '-i', '15',
            '-c', '4'
        ]
        (ret, out, err) = self.vcall(cmd)
        if ret != 0:
            raise ex.Error("Failed to run command %s" % ' '.join(cmd))
        self.flush_cache()

    def split(self):
        cmd = [
            '/usr/symcli/bin/symrdf', '-g', self.symdg, '-rdfg',
            str(self.rdfg), '-noprompt', 'split', '-i', '15', '-c', '4'
        ]
        (ret, out, err) = self.vcall(cmd)
        if ret != 0:
            raise ex.Error
        self.flush_cache()

    def swap(self):
        cmd = [
            '/usr/symcli/bin/symrdf', '-g', self.symdg, '-rdfg',
            str(self.rdfg), '-noprompt', 'swap', '-i', '15', '-c', '4'
        ]
        (ret, out, err) = self.vcall(cmd)
        if ret != 0:
            raise ex.Error
        self.flush_cache()

    def get_syminfo(self):
        self.get_dg_rdf_type()

    def get_last(self):
        if self.last is not None:
            return
        for symid, symdev in self.symdev:
            ld = self.symld[symid, symdev]
            # format: Thu Feb 25 10:20:56 2010
            last = datetime.datetime.strptime(ld['clone_lastaction'],
                                              "%a %b %d %H:%M:%S %Y")
            if self.last is None or last > self.last:
                self.last = last

    def sync_status(self, verbose=False):
        try:
            self.get_syminfo()
        except ex.Error as e:
            self.status_log(str(e))
            return core.status.WARN
        state = self.get_dg_state()
        self.status_log("current state %s" % state, "info")
        if self.is_synchronous_and_synchronized_state():
            return core.status.UP
        self.status_log("expecting Synchronous/Synchronized")
        return core.status.WARN

    # SRDF split
    def sync_split(self):
        self.split()

    # SRDF suspend
    def sync_quiesce(self):
        self.suspend()

    # SRDF swap
    def sync_swap(self):
        self.swap()

    def sync_break(self):
        self.split()

    # SRDF establish
    def sync_resync(self):
        self.establish()

    def sync_establish(self):
        self.establish()

    def start(self):
        if Env.nodename in self.svc.drpnodes:
            if self.is_rdf2_dg():
                if self.is_synchronous_and_synchronized_state():
                    self.split()
                elif self.is_partitioned_state():
                    self.log.warning(
                        "symrdf dg %s is RDF2 and partitioned. failover is preferred action."
                        % self.symdg)
                    self.failover()
                elif self.is_failedover_state():
                    self.log.info(
                        "symrdf dg %s is already RDF2 and FailedOver." %
                        self.symdg)
                elif self.is_suspend_state():
                    self.log.warning(
                        "symrdf dg %s is RDF2 and suspended: R2 data may be outdated"
                        % self.symdg)
                    self.split()
                elif self.is_split_state():
                    self.log.info(
                        "symrdf dg %s is RDF2 and already splitted." %
                        self.symdg)
                else:
                    raise ex.Error(
                        "symrdf dg %s is RDF2 on drp node and unexpected SRDF state, you have to manually return to a sane SRDF status."
                    )
            elif self.is_rdf1_dg():
                if self.is_synchronous_and_synchronized_state():
                    pass
                else:
                    raise ex.Error(
                        "symrdf dg %s is RDF1 on drp node, you have to manually return to a sane SRDF status."
                    )
        elif Env.nodename in self.svc.nodes:
            if self.is_rdf1_dg():
                if self.is_synchronous_and_synchronized_state():
                    self.log.info(
                        "symrdf dg %s is RDF1 and synchronous/synchronized." %
                        self.symdg)
                elif self.is_partitioned_state():
                    self.log.warning("symrdf dg %s is RDF1 and partitioned." %
                                     self.symdg)
                elif self.is_failedover_state():
                    raise ex.Error(
                        "symrdf dg %s is RDF1 and write protected, you have to manually run either sync_split+sync_establish (ie losing R2 data), or syncfailback (ie losing R1 data)"
                        % self.symdg)
                elif self.is_suspend_state():
                    self.log.warning("symrdf dg %s is RDF1 and suspended." %
                                     self.symdg)
                elif self.is_split_state():
                    self.log.warning("symrdf dg %s is RDF1 and splitted." %
                                     self.symdg)
                else:
                    raise ex.Error(
                        "symrdf dg %s is RDF1 on primary node and unexpected SRDF state, you have to manually return to a sane SRDF status."
                    )
            elif self.is_rdf2_dg():  # start on metrocluster passive node
                if self.is_synchronous_and_synchronized_state():
                    self.failoverestablish()
                elif self.is_partitioned_state():
                    self.log.warning(
                        "symrdf dg %s is RDF2 and partitioned, failover is preferred action."
                        % self.symdg)
                    self.failover()
                else:
                    raise ex.Error(
                        "symrdf dg %s is RDF2 on primary node, you have to manually return to a sane SRDF status."
                    )
        self.promote_devs_rw()

    def refresh_svcstatus(self):
        self.svcstatus = self.svc.group_status(
            excluded_groups=set(["app", "sync", "task", "disk.scsireserv"]))

    def get_svcstatus(self):
        if len(self.svcstatus) == 0:
            self.refresh_svcstatus()
Exemple #59
0
#!/usr/bin/python3 -dd

import urllib.request

u = urllib.request.urlopen(
    'http://ctabustracker.com/bustime/map/getStopPrediction.jsp?route=22&stop=14787'
)
stop = urllib.request.urlopen(
    'http://ctabustracker.com/bustime/map/getStop.jsp?route=22&stop=14787')
name = stop.read()

from xml.etree.ElementTree import XML
doc = XML(name)

for pt in doc.findall('.//name'):
    print(pt.text)
Exemple #60
0
from xml.etree.ElementTree import Element, SubElement, XML

from ElementTree_pretty import prettify

top = Element('top')

parent = SubElement(top, 'parent')

children = XML('<root><child num="0"/><child num="1"/>'
               '<child num="2"/></root>')

parent.extend(children)

print(prettify(top))