def write_data(open_file): if (file_has_expired(open_file)): open_file = open(date_arr[2] + date_arr[3], 'a') if sensorinfo["time"]["refreshurl"] != "null": open_file.write(str(int(time.time()))) open_file.write(",") for i in range(0, len(sensorinfo["sensors"])): if (sensorinfo["sensors"][i]["refreshurl"] != "null"): uopen(sensorinfo["sensors"][i]["refreshurl"]) url = uopen(sensorinfo["sensors"][i]["dataurl"]).read() dataArray = url.split('\n') val = (dataArray[1]).strip() #still have to deal with string/int/etc data! cant graph string if "bool" in sensorinfo["sensors"][i]["interpret"]: if "true" in val: open_file.write("1") else: open_file.write("0") elif "double" in sensorinfo["sensors"][i]["interpret"]: rounded = '{0:.2f}'.format(float(val)) open_file.write(rounded) else: matchkey = (str(sensorinfo["sensors"][i]["interpret"])).strip() if (val == matchkey): open_file.write("1") else: open_file.write("0") open_file.write(',') open_file.write('\n')
def write_data(open_file): if(file_has_expired(open_file)): open_file = open(date_arr[2] + date_arr[3], 'a') if sensorinfo["time"]["refreshurl"] != "null": open_file.write(str(int(time.time()))) open_file.write(",") for i in range (0,len(sensorinfo["sensors"])): if (sensorinfo["sensors"][i]["refreshurl"] != "null"): uopen(sensorinfo["sensors"][i]["refreshurl"]) url = uopen(sensorinfo["sensors"][i]["dataurl"]).read() dataArray = url.split('\n') val = (dataArray[1]).strip() #still have to deal with string/int/etc data! cant graph string if "bool" in sensorinfo["sensors"][i]["interpret"]: if "true" in val: open_file.write("1") else: open_file.write("0") elif "double" in sensorinfo["sensors"][i]["interpret"]: rounded = '{0:.2f}'.format(float(val)) open_file.write(rounded) else: matchkey = (str(sensorinfo["sensors"][i]["interpret"])).strip() if(val == matchkey): open_file.write("1") else: open_file.write("0") open_file.write(',') open_file.write('\n')
def __init__(self,url): '''Takes url of ClickVenture, creates Adventure object. Call `graph` method to make/show the graph.''' self.url = url page=uopen(url) self.soup=BeautifulSoup(page) self.G = None #will be replaced by graph later #get page title try: titletag=self.soup.find('meta',attrs={'property':"og:title"}) self.title = titletag.attrMap['content'] except: self.title='no title found'
def data_gen(): t = data_gen.t while True: newdata = [] t += 0.05 newdata.append(t) #refresh urls for i in range(0, len(sensorinfo["sensors"])): if sensorinfo["sensors"][i]["refreshurl"] != "null": uopen(sensorinfo["sensors"][i]["refreshurl"]) #append data for i in range(0, len(sensorinfo["sensors"])): url = uopen(sensorinfo["sensors"][i]["dataurl"]).read() dataArray = url.split('\n') val = dataArray[1] if "bool" in sensorinfo["sensors"][i]["interpret"]: if "true" in val: newdata.append(int(sensorinfo["sensors"][i]["graphto"])) else: newdata.append(0) elif "double" in sensorinfo["sensors"][i]["interpret"]: newdata.append(val) elif "string" in sensorinfo["sensors"][i]["interpret"]: matchkey = sensorinfo["sensors"][i]["interpret"][7:] val = val.replace("\r", "") if (val == matchkey): newdata.append(int(sensorinfo["sensors"][i]["graphto"])) else: newdata.append(0) else: matchkey = (str(sensorinfo["sensors"][i]["interpret"])).strip() if (val == matchkey): newdata.append(int(sensorinfo["sensors"][i]["graphto"])) else: newdata.append(0) yield newdata
def data_gen(): t = data_gen.t while True: newdata = [] t += 0.05 newdata.append(t) #refresh urls for i in range (0,len(sensorinfo["sensors"])): if sensorinfo["sensors"][i]["refreshurl"] != "null": uopen(sensorinfo["sensors"][i]["refreshurl"]) #append data for i in range (0,len(sensorinfo["sensors"])): url = uopen(sensorinfo["sensors"][i]["dataurl"]).read() dataArray = url.split('\n') val = dataArray[1] if "bool" in sensorinfo["sensors"][i]["interpret"]: if "true" in val: newdata.append(int(sensorinfo["sensors"][i]["graphto"])) else: newdata.append(0) elif "double" in sensorinfo["sensors"][i]["interpret"]: newdata.append(val) elif "string" in sensorinfo["sensors"][i]["interpret"]: matchkey = sensorinfo["sensors"][i]["interpret"][7:] val = val.replace("\r", "") if(val == matchkey): newdata.append(int(sensorinfo["sensors"][i]["graphto"])) else: newdata.append(0) else: matchkey = (str(sensorinfo["sensors"][i]["interpret"])).strip() if(val == matchkey): newdata.append(int(sensorinfo["sensors"][i]["graphto"])) else: newdata.append(0) yield newdata
def get_articles(): '''collects article URLs from master article list on the Clickhole website.''' articles=[] #list to put article URLs in for pageno in [1,2,3,4,5]: list_URL = r'http://www.clickhole.com/features/clickventure/?page={0}'.format(pageno) list_page=uopen(list_URL) list_soup = BeautifulSoup(list_page) for article_soup in list_soup.findAll('article'): link_tag = article_soup.findAll('a')[0] #finding the link tag within the HTML article_url = r'http://clickhole.com' + link_tag.attrs[1][1] #getting the href component of the link attribute articles.append(article_url) #adding URL to master list print 'Found ' + str(len(articles)) + ' articles.' return articles
def fetchresults(): page = uopen(gradcafe) soup = BeautifulSoup(page) import re for postrow in soup('tr', {'class':re.compile('row*')}): sn = str(postrow('td',{'class':'instcol'})[0].contents[0]) m = str(postrow('td')[1].contents[0]) try: r = str(postrow('td')[2]('span', {'class': re.compile('d*')})[0].contents[0]) + str(postrow('td')[2].contents[1]) except: r = str(postrow('td')[2].contents[0]) d = str(postrow('td',{'class':'datecol'})[0].contents[0]) try: n = str(postrow('ul',{'class':'control'})[0]('li')[1].contents[0]) except: n = "" yield " | ".join([sn, m, r, d, n])
def getRanking(isbn): page = uopen('%s%s' % (AMZN, isbn)) # or stri.format() data = page.read() page.close() return REGEX.findall(data)[0]