def getSETIXML(url, projAbbr): import urllibRequests as ur data = ur.get(url + '/stats/user.gz', codec='raw') import gzip stdout = open('../Data/' + projAbbr + 'User.xml', 'wb') stdout.write(gzip.decompress(data)) stdout.close() data = ur.get(url + '/stats/tables.xml', codec='raw') stdout = open('../Data/' + projAbbr + 'Info.xml', 'wb') stdout.write(data) stdout.close()
def getSETIXML(): import urllibRequests as ur data = ur.get('http://setiathome.berkeley.edu/stats/user.gz',codec='raw') import gzip stdout = open('../Data/SETIUser.xml','wb') stdout.write(gzip.decompress(data)) stdout.close() data = ur.get('http://setiathome.berkeley.edu/stats/tables.xml',codec='raw') stdout = open('../Data/SETIInfo.xml','wb') stdout.write(data) stdout.close()
def getSETIXML(url,projAbbr): import urllibRequests as ur data = ur.get(url+'/stats/user.gz',codec='raw') import gzip stdout = open('../Data/'+projAbbr+'User.xml','wb') stdout.write(gzip.decompress(data)) stdout.close() data = ur.get(url+'/stats/tables.xml',codec='raw') stdout = open('../Data/'+projAbbr+'Info.xml','wb') stdout.write(data) stdout.close()
def getSETIXML(): import urllibRequests as ur data = ur.get('http://setiathome.berkeley.edu/stats/user.gz', codec='raw') import gzip stdout = open('../Data/SETIUser.xml', 'wb') stdout.write(gzip.decompress(data)) stdout.close() data = ur.get('http://setiathome.berkeley.edu/stats/tables.xml', codec='raw') stdout = open('../Data/SETIInfo.xml', 'wb') stdout.write(data) stdout.close()
def main(): conf = parseConf.parse('weather.conf') data = { 'whichClient': 'NDFDgenMultiZipCode', 'zipCodeList': conf['zipCode'], 'product': 'glance', 'begin': datetime.datetime.fromtimestamp(int(time.time())).isoformat(), 'end': (datetime.datetime.fromtimestamp(int(time.time())) + datetime.timedelta(0.5)).isoformat(), 'Unit': 'e', 'icons': 'icons', 'Submit': 'Submit' } req = ur.get( 'http://graphical.weather.gov/xml/SOAP_server/ndfdXMLclient.php', data) tree = ET.fromstring(req.text) info = tree.find('data').find('parameters').find('conditions-icon').find( 'icon-link') print(info.text[39:-4])
def lookup(self): """Lookup word in dict.cn""" # Fetch data from the server response = requests.get(self.base_url + self.word_text) # Parse the webpage soup = BeautifulSoup(response, "lxml") div_word = soup.find("div", attrs={"class": "word"}) if (len(div_word.find_all("div")) is 0): self.valid = False else: self._store_info(div_word)
def makeDB(fileName): try: localInfo = open('../Data/SETIInfo.xml', 'r') localTime = localInfo.read().split()[4][13:-14] except: getSETIXML() else: import urllibRequests as ur remoteTime = ur.get('http://setiathome.berkeley.edu/stats/tables.xml' ).split()[4][13:-14] if int(localTime) < int(remoteTime): print('The local SETI users.xml is old, updating...') # getSETIXML() # Now it is safe to assume that the ../Data/SETIUser.xml is up to date. import sqlfile as sqldb try: db = sqldb.sqliteDB(fileName, 'seti') except: sqldb.createSQLiteDB(fileName, ['CPID', 'Nickname', 'Country'], 'seti') db = sqldb.sqliteDB(fileName, 'seti') print('Creating the SETI User SQLite DB...') # Time to parse the whole XML file... import xml.etree.ElementTree as ET tree = ET.iterparse('../Data/SETIUser.xml') for (event, elem) in tree: if elem.tag == 'user': cpid = elem.find('cpid').text uid = elem.find('id').text if int(uid) % 10000 == 0: print('Processing uid', uid) try: uname = base64.b64encode( elem.find('name').text.encode('utf-8')).decode('ascii') except: uname = base64.b64encode(b'NoName').decode('ascii') print(uid, cpid, elem.find('name').text) country = elem.find('country').text if not country: country = 'International' db.addItem([uid, cpid, uname, country]) elem.clear() db.updateDB() print('SETI User DB was created successfully.')
def makeDB(fileName, projAbbr, webUrl): try: localInfo = open('../Data/' + projAbbr + 'Info.xml', 'r') localTime = localInfo.read().split()[4][13:-14] except: getSETIXML(webUrl, projAbbr) else: import urllibRequests as ur remoteTime = ur.get(webUrl + '/stats/tables.xml').split()[4][13:-14] if int(localTime) < int(remoteTime): print('The local ' + projAbbr + ' users.xml is old, updating...') getSETIXML(webUrl, projAbbr) print('The local ' + projAbbr + ' users.xml is fresh now.') # Now it is safe to assume that the ../Data/SETIUser.xml is up to date. import sqldb try: db = sqldb.sqliteDB(fileName, projAbbr.lower()) except: sqldb.createSQLiteDB(fileName, ['CPID', 'Nickname', 'Country', 'Score'], projAbbr.lower()) db = sqldb.sqliteDB(fileName, projAbbr.lower()) print('Creating the ' + projAbbr + ' User SQLite DB...') # Time to parse the whole XML file... import xml.etree.ElementTree as ET tree = ET.iterparse('../Data/' + projAbbr + 'User.xml') for (event, elem) in tree: if elem.tag == 'user': cpid = elem.find('cpid').text uid = elem.find('id').text if int(uid) % 10000 == 0: print('Processing uid', uid) score = elem.find('total_credit').text uname = elem.find('name').text if not uname: uname = "NoName" uname = base64.b64encode(uname.encode('utf-8')).decode('ascii') country = elem.find('country').text if not country: country = 'International' db.addItem([uid, cpid, uname, country, score]) db.updateDB() elem.clear() print(projAbbr + ' User DB was created successfully.')
def makeDB(fileName,projAbbr,webUrl): try: localInfo = open('../Data/'+projAbbr+'Info.xml','r') localTime = localInfo.read().split()[4][13:-14] except: getSETIXML(webUrl,projAbbr) else: import urllibRequests as ur remoteTime = ur.get(webUrl+'/stats/tables.xml').split()[4][13:-14] if int(localTime) < int(remoteTime): print('The local '+projAbbr+' users.xml is old, updating...') getSETIXML(webUrl,projAbbr) print('The local '+projAbbr+' users.xml is fresh now.') # Now it is safe to assume that the ../Data/SETIUser.xml is up to date. import sqldb try: db = sqldb.sqliteDB(fileName,projAbbr.lower()) except: sqldb.createSQLiteDB(fileName,['CPID','Nickname','Country','Score'],projAbbr.lower()) db = sqldb.sqliteDB(fileName,projAbbr.lower()) print('Creating the '+projAbbr+' User SQLite DB...') # Time to parse the whole XML file... import xml.etree.ElementTree as ET tree = ET.iterparse('../Data/'+projAbbr+'User.xml') for (event,elem) in tree: if elem.tag == 'user': cpid = elem.find('cpid').text uid = elem.find('id').text if int(uid) % 10000 == 0: print('Processing uid',uid) score = elem.find('total_credit').text uname = elem.find('name').text if not uname: uname = "NoName" uname = base64.b64encode(uname.encode('utf-8')).decode('ascii') country = elem.find('country').text if not country: country = 'International' db.addItem([uid,cpid,uname,country,score]) db.updateDB() elem.clear() print(projAbbr+' User DB was created successfully.')
def makeDB(fileName): try: localInfo = open('../Data/SETIInfo.xml','r') localTime = localInfo.read().split()[4][13:-14] except: getSETIXML() else: import urllibRequests as ur remoteTime = ur.get('http://setiathome.berkeley.edu/stats/tables.xml').split()[4][13:-14] if int(localTime) < int(remoteTime): print('The local SETI users.xml is old, updating...') # getSETIXML() # Now it is safe to assume that the ../Data/SETIUser.xml is up to date. import sqldb try: db = sqldb.sqliteDB(fileName,'seti') except: sqldb.createSQLiteDB(fileName,['CPID','Nickname','Country'],'seti') db = sqldb.sqliteDB(fileName,'seti') print('Creating the SETI User SQLite DB...') # Time to parse the whole XML file... import xml.etree.ElementTree as ET tree = ET.iterparse('../Data/SETIUser.xml') flag = False # Remove this later. for (event,elem) in tree: if elem.tag == 'user': cpid = elem.find('cpid').text uid = elem.find('id').text if int(uid) % 10000 == 0: print('Processing uid',uid) uname = base64.b64encode(elem.find('name').text.encode('utf-8')).decode('ascii') country = elem.find('country').text if not country: country = 'International' if flag: # Remove this later db.addItem([uid,cpid,uname,country]) db.updateDB() elif uid == '8703526': flag = True elem.clear() print('SETI User DB was created successfully.')
def lookup(self): """Lookup word in Merriam Webster dictionary""" # Fetch data from the server response = requests.get(self.base_url + self.word_text, params={"key": self.key}) # Parse the xml fetched soup = BeautifulSoup(response, "lxml") entries = soup.find_all("entry") if (entries): # If the word is valid # The word only has one entry if (entries[0]["id"] == self.word_text): # Store entry info into result self.entries.append(self._store_info(entries[0])) # The word has more then one entry else: for entry in entries: # If the entry is indeed one of the entries of the word if (entry["id"].startswith(self.word_text + "[")): self.entries.append(self._store_info(entry)) else: # If the word is no valid self.valid = False
def getScore(userID,dateStr): data = urllibRequests.get( 'http://folding.extremeoverclocking.com/user_summary.php?s=&u='+userID, codec='iso-8859-1').splitlines() return data[data.index(' <td>'+dateStr+'</td>')+1][20:-5].replace(',','')
def getUID(nName): data = urllibRequests.get( 'http://folding.extremeoverclocking.com/xml/user_summary.php?un='+nName+'&t=3213') return ET.fromstring(data)[1].find('UserID').text
def getNewScore(url,pid): req = UR.get(url+'?format=xml&userid='+pid) parse = ET.fromstring(req) score = parse.find('total_credit').text return score[:score.index('.')]