def parse(self,response): n = 0 for sel in response.xpath('//td[@class="f"]'): query = urllib.unquote(self.__get_url_query(response.url)) CLASS = urllib.unquote(self.__get_url_class(response.url)) item = CeshiItem() title = re.sub('<[^>]*?>','',sel.xpath('.//a/font[@size="3"]').extract()[0]) lading = sel.xpath('.//a[1]/@href').extract()[0] time = sel.xpath('.//font[@color="#008000"]/text()').re('(\d{4}-\d{1,2}-\d{1,2})')[0] size = sel.xpath('.//font[@color="#008000"]/text()').re('(\d+K)')[0] n += 1 item['rank'] = n item['title'] = title.encode('utf8') item['lading'] = lading.encode('utf8') item['time'] = time.encode('utf8') item['size'] = size.encode('utf8') item['query'] = query item['update'] = current_date item['CLASS'] = CLASS yield item
def Schedule(url): setCookie(cookie_file) response = net().http_GET(url) link = response.content link = link.replace('\r', '').replace('\n', '').replace('\t', '').replace( ' ', '').replace(' ', '') month = re.findall('<h2 class="blockhead">([^<]+?)</h2>', link) match = re.findall( '<h3><span class=".+?">([^<]+?)</span><span class="daynum" style=".+?" onclick=".+?">(\d+)</span></h3><ul class="blockrow eventlist">(.+?)</ul>', link) addLink( '[COLOR red][I]Times are E.S.T / GMT -5 | Follow us on Twitter for latest channel news, updates + more.[/I][/COLOR]', '', '', icon, fanart) for day, num, data in match: day = day.encode('ascii', 'ignore').decode('ascii') num = num.encode('ascii', 'ignore').decode('ascii') data = data.encode('ascii', 'ignore').decode('ascii') addLink( '[COLOR blue][B]' + day + ' ' + num + ' ' + month[0] + '[/B][/COLOR]', '', '', icon, fanart) match2 = re.findall( '<span class="eventtime">(.+?)</span><a href=".+?" title="">(.+?)</a>', data) for time, title in match2: time = time.encode('ascii', 'ignore').decode('ascii') title = title.encode('ascii', 'ignore').decode('ascii') addLink('[COLOR yellow]' + time + '[/COLOR] ' + title, 'url', '', icon, fanart)
def getmoodjson(): itemnumber = request.forms.get("itemnumber") page = request.forms.get("page") itemnumber=int(itemnumber) page=int(page) result=[] dbpath='db/mood.db' conn = sqlite3.connect(dbpath) c = conn.cursor() SqlSentence="SELECT * FROM statics Order By time desc" i=0 j=0 for row in c.execute(SqlSentence): if i==(page-1)*itemnumber+j and j<itemnumber: j=j+1 username=row[0] time=row[1] mood=row[2] words=row[3] single={"username":str(username.encode("utf-8")), "time":str(time.encode("utf-8")), "mood":str(mood.encode("utf-8")), "words":str(words.encode("utf-8"))} result.append(single) if j==itemnumber: break i=i+1 conn.commit() c.close() conn.close() out=json.dumps(result, ensure_ascii=False) return str(out)
def run(self) -> None: server = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) server.setblocking(True) print ('Server created') #Bind socket to local host and port try: server.bind((self.IP, self.PORT)) except socket.error as msg: print (f'Bind failed. Error: { str(msg) }') sys.exit() print ('Server bind complete') print (f'Server now listening @ {self.IP}:{self.PORT}') while True: try: data = server.recvfrom(8) if not data: break except socket.timeout: break client = data[1] print(f'Server got the data from {client}') time = self.forge_time() server.sendto(time.encode('utf-8'), client) print(f'Server sent {time} to {client}') server.close() print ('Server closed')
def getmoodjson(): itemnumber = request.forms.get("itemnumber") page = request.forms.get("page") itemnumber = int(itemnumber) page = int(page) result = [] dbpath = 'db/mood.db' conn = sqlite3.connect(dbpath) c = conn.cursor() SqlSentence = "SELECT * FROM statics Order By time desc" i = 0 j = 0 for row in c.execute(SqlSentence): if i == (page - 1) * itemnumber + j and j < itemnumber: j = j + 1 username = row[0] time = row[1] mood = row[2] words = row[3] single = { "username": str(username.encode("utf-8")), "time": str(time.encode("utf-8")), "mood": str(mood.encode("utf-8")), "words": str(words.encode("utf-8")) } result.append(single) if j == itemnumber: break i = i + 1 conn.commit() c.close() conn.close() out = json.dumps(result, ensure_ascii=False) return str(out)
def parse(self, response): n = 0 for sel in response.xpath('//td[@class="f"]'): query = urllib.unquote(self.__get_url_query(response.url)) CLASS = urllib.unquote(self.__get_url_class(response.url)) item = CeshiItem() title = re.sub('<[^>]*?>', '', sel.xpath('.//a/font[@size="3"]').extract()[0]) lading = sel.xpath('.//a[1]/@href').extract()[0] time = sel.xpath('.//font[@color="#008000"]/text()').re( '(\d{4}-\d{1,2}-\d{1,2})')[0] size = sel.xpath('.//font[@color="#008000"]/text()').re( '(\d+K)')[0] n += 1 item['rank'] = n item['title'] = title.encode('utf8') item['lading'] = lading.encode('utf8') item['time'] = time.encode('utf8') item['size'] = size.encode('utf8') item['query'] = query item['update'] = current_date item['CLASS'] = CLASS yield item
def parse(url, outfile): content = "" like = "" repost = "" comment = "" time = "" ori_user = "" ori_userID = "" ori_text = "" ori_like = "" ori_repost = "" ori_comment = "" req = s.get(url, headers=param) soup = BeautifulSoup(req.text, "html.parser", from_encoding="utf8") contents = soup.find_all("div", {"class": "c"}) for c in contents: try: id = c.get("id") divs = c.find_all("div") if divs[-1].find("span", {"class": "ct"}).find("a"): texts = divs[-1].find_all(text=True) texts = [t.strip() for t in texts if t.strip() != ""] content = "".join(texts[:-6]) like = texts[-6] repost = texts[-5] comment = texts[-4] time = divs[-1].find("span", { "class": "ct" }).find_all(text=True) else: texts = divs[-1].find_all(text=True) texts = [t.strip() for t in texts if t.strip() != ""] content = "".join(texts[:-5]) like = texts[-5] repost = texts[-4] comment = texts[-3] time = texts[-1] if len(divs) == 3: ori_user = divs[0].find("span", { "class": "cmt" }).find("a").text ori_userID = divs[0].find("span", { "class": "cmt" }).find("a").get("href") ori_text = divs[0].find("span", {"class": "ctt"}).text spans = divs[1].find_all("span", {"class": "cmt"}) ori_like = spans[0].text ori_repost = spans[1].text ori_comment = divs[1].find("a", {"class": "cc"}).text print id, content.encode(code, "ignore") line = "%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\n" %(id,content.encode("utf8"),\ like.encode("utf8"), repost.encode("utf8"), comment.encode("utf8"), time.encode("utf8"),\ ori_user.encode("utf8"), ori_userID, ori_text.encode("utf8"), ori_like.encode("utf8")\ , ori_repost.encode("utf8"), ori_comment.encode("utf8")) outfile.write(line) except Exception, e: #print e pass
def generate_hash(prev_hash, public_key, amount, to_whom_public, time): hash = hashlib.sha256() hash.update(prev_hash.encode('utf-8')) hash.update(public_key.encode('utf-8')) hash.update(amount.encode('utf-8')) hash.update(to_whom_public.encode('utf-8')) hash.update(time.encode('utf-8')) return hash.hexdigest()
def get_infor(url): data = get_json(url)['data'] mlist = [] for li in data: list=[] speaker_name = li['speaker']['member']['name'] introduct = li['description'] live_name = li['subject'] time = get_time(li['starts_at']) join= str(li['seats']['taken']) fee = str(li['fee']['amount']/100)+ li['fee']['unit'] href = 'https://www.zhihu.com/lives/'+li['id'] selfurl= 'https://www.zhihu.com/people/'+li['speaker']['member']['url_token'] try: score = str(li['feedback_score']) except: score = 'No score now' try: like_num = str(li['liked_num']) except: like_num = 'No like_num now' try: answer_count = str(li['speaker_message_count']) except: answer_count = 'No answer_account now' #==============selfurl 用于爬取个人信息======= list.append(live_name.encode('gbk','ignore')) list.append(speaker_name.encode('gbk','ignore')) list.append(href.encode('gbk','ignore')) list.append(time.encode('gbk','ignore')) list.append(join.encode('gbk','ignore')) list.append(introduct.encode('gbk','ignore')) list.append(fee.encode('gbk','ignore')) list.append(selfurl.encode('gbk','ignore')) list.append(score.encode('gbk','ignore')) list.append(like_num.encode('gbk','ignore')) list .append(answer_count.encode('gbk','ignore')) mlist.append(list) return mlist
def getg(time): key = 'd1b964811afb40118a12068ff74a12f4' h1 = hmac.new(key.encode('utf-8'), ''.encode('utf-8'), sha1) h1.update("password".encode('utf-8')) h1.update("c3cef7c66a1843f8b3a9e6a1e3160e20".encode('utf-8')) h1.update("com.zhihu.web".encode('utf-8')) h1.update(time.encode('utf-8')) h1.hexdigest() return h1.hexdigest()
def login(self, uname, pw): for usr in self.user: if usr.name == uname: if usr.pw == pw: time = str(datetime.datetime.now()) usr.token = hashlib.md5(uname.encode()+pw.encode()+time.encode()).hexdigest() usr.location = str(random.randint(0, MAP.width-1)) + str(random.randint(0, MAP.height-1)) return usr else: raise Fail("incorrect password") raise Fail("username not found")
def leaveRoom(room_no, time): global ReturnCode, ReturnCodeFlag command = int.to_bytes(105, 2, byteorder='big') room_number = int.to_bytes(room_no, 4, byteorder='big') msg = command + room_number + time.encode('ascii') client.sendall(msg) while ReturnCodeFlag or ReturnCode not in {305, 451}: continue code = ReturnCode ReturnCodeFlag = True return code
def attendance_main(self): if !self.start: self.start=1 #get the time in alphabetical form self.today = datetime.datetime.now() self.today. ############################### # please edit this part for saturdays and sundays #js = {"day":today.strftime("%a"),"location":"CMPE001"} js = {"day":"MON","location":"CMPE001"} ############################### # try: #The following is the request to get attendance data for today self.current_attendance = requests.post(self.url, json.dumps(js)) self.current_attendance = json.loads(self.current_attendance.content) # print self.current_attendance self.lectures_array= self.current_attendance["records"] if self.today != datetime.datetime.now().day: self.current_attendance = requests.post(self.url, json.dumps(js)) self.current_attendance = json.loads(self.current_attendance.content) self.lectures_array= self.current_attendance["records"] self.current_time =datetime.datetime.now() ############################### i=0 for lecture in self.lectures_array: self.current_time =datetime.datetime.now() for key in lecture.keys(): if(key != u"instrcutors"): time= key # remove the colon from the time unicode string time=time.replace(":","") # time is in unicode # this line changes time from unicode to ascii time=time.encode('ascii','ignore') self.lecture_time = self.current_time.replace(hour=int(time[0:2]), minute=int(time[3:5])) self.lectures_array_edited[i]=self.lecture_time # print self.lecture_time # print "###" # self.current_time= self.current_time.replace(hour=8) # print "###" # print self.lecture_time + datetime.timedelta(hours=2) # if self.current_time >= self.lecture_time and self.current_time <= (self.lecture_time + datetime.timedelta(hours=2) ): # print "kak" ############################### # except Exception: # self.app_gui.template = ("./attendance/admin_scan.html", {"errors": ["Server down :(. Try again later. "]})
def joinInRoom(room_number, time): global ReturnCodeFlag, ReturnCode command = int.to_bytes(104, 2, byteorder='big') room_number = int.to_bytes(room_number, 4, byteorder='big') msg = command + room_number + time.encode('ascii') client.sendall(msg) while ReturnCodeFlag or ReturnCode not in {304, 441}: continue code = ReturnCode ReturnCodeFlag = True return code
def getAllData(conn): cur = conn.cursor() cur.execute("DELETE FROM data WHERE Valid='F'") cur.execute("DELETE FROM data WHERE State=2") cur.execute("SELECT time,id FROM data") rows = cur.fetchall() arr = [] for (time, id) in rows: arr.append((time.encode('ascii'), id, "DATA")) return arr
def parse(url,outfile): content = "" like = "" repost = "" comment = "" time = "" ori_user = "" ori_userID = "" ori_text = "" ori_like = "" ori_repost = "" ori_comment = "" req = s.get(url,headers=param) soup = BeautifulSoup(req.text,"html.parser",from_encoding="utf8") contents = soup.find_all("div",{"class":"c"}) for c in contents: try: id = c.get("id") divs = c.find_all("div") if divs[-1].find("span",{"class":"ct"}).find("a"): texts = divs[-1].find_all(text=True) texts = [t.strip() for t in texts if t.strip() != ""] content = "".join(texts[:-6]) like = texts[-6] repost = texts[-5] comment = texts[-4] time = divs[-1].find("span",{"class":"ct"}).find_all(text=True) else: texts = divs[-1].find_all(text=True) texts = [t.strip() for t in texts if t.strip() != ""] content = "".join(texts[:-5]) like = texts[-5] repost = texts[-4] comment = texts[-3] time = texts[-1] if len(divs) == 3: ori_user = divs[0].find("span",{"class":"cmt"}).find("a").text ori_userID = divs[0].find("span",{"class":"cmt"}).find("a").get("href") ori_text = divs[0].find("span",{"class":"ctt"}).text spans = divs[1].find_all("span",{"class":"cmt"}) ori_like = spans[0].text ori_repost = spans[1].text ori_comment = divs[1].find("a",{"class":"cc"}).text print id,content.encode(code,"ignore") line = "%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\n" %(id,content.encode("utf8"),\ like.encode("utf8"), repost.encode("utf8"), comment.encode("utf8"), time.encode("utf8"),\ ori_user.encode("utf8"), ori_userID, ori_text.encode("utf8"), ori_like.encode("utf8")\ , ori_repost.encode("utf8"), ori_comment.encode("utf8")) outfile.write(line) except Exception,e: #print e pass
def getMonitorLatestEvent(self, monitorID): # Returns the latest ID and max score frame ID event for a monitor. If # connection error occurs or there are no events for the monitor, both # will be 0. # First need to determine the number of pages monitor_url = self._server \ + "/zm/api/events/index/MonitorID:{:d}.json?page=1"\ .format(monitorID) response = requests.get(url=monitor_url, cookies=self._cookies, verify=self._verify_ssl) latest_eventid = 0 maxscore_frameid = 0 if not response.ok: self.debug(1, "Connection error in getMonitorLatestEvent", "stderr") return latest_eventid, maxscore_frameid # Loop through all events and get most recent one based on start time # (loop backwards because latest events are on later pages) npages = response.json()['pagination']['pageCount'] latest_eventtime = datetime.strptime('1970-01-01 00:00:00', '%Y-%m-%d %H:%M:%S') for i in range(npages, 0, -1): monitor_url = self._server \ + "/zm/api/events/index/MonitorID:{:d}.json?page={:d}"\ .format(monitorID, i) response = requests.get(url=monitor_url, cookies=self._cookies, verify=self._verify_ssl) data = response.json() try: for event in data['events']: ID = int(event['Event']['Id']) time = event['Event']['StartTime'] if time is not None: time_obj = datetime.strptime(time.encode('ascii'), '%Y-%m-%d %H:%M:%S') if time_obj > latest_eventtime: latest_eventtime = time_obj latest_eventid = ID maxscore_frameid = \ int(event['Event']['MaxScoreFrameId']) except KeyError: self.debug(1, "No events list present", "stderr") continue return latest_eventid, maxscore_frameid
def one_sub_page_parser(self): try: sub_link = self.total_links.pop() except: print('total_links are empty') sub_link = None html = download(sub_link) print(sub_link, ': downloading and parsing...') soup = BeautifulSoup(html, 'html.parser') postingtitle = soup.find_all('span', 'postingtitletext')[0] try: title = postingtitle.find_all('span', id='titletextonly')[0].text except: title = '' try: rent = postingtitle.find_all('span', 'price')[0].text except: rent = '' try: br_and_square = postingtitle.find_all('span', 'housing')[0].text br = correct_data(br_and_square.split('-')[0]) square = br_and_square.split('-')[1] except: br = '' square = '' try: address = correct_data(postingtitle.find_all('small')[0].text) except: address = '' try: time = soup.find_all('p', 'postinginfo')[0].find_all( 'time', 'timeago')[0]['datetime'] time = correct_time(time) time = ' '.join(time) except: time = '' time = time.encode('utf-8') rent = rent.encode('utf-8') br = br.encode('utf-8') square = square.encode('utf-8') address = address.encode('utf-8') title = title.encode('utf-8') row = [time, rent, br, square, address, title] self.writer.writerow(row)
def login(self, uname, pw): for usr in self.user: if usr.name == uname: if usr.pw == pw: time = str(datetime.datetime.now()) usr.token = hashlib.md5(uname.encode() + pw.encode() + time.encode()).hexdigest() usr.location = str(random.randint(0, MAP.width - 1)) + str( random.randint(0, MAP.height - 1)) return usr else: raise Fail("incorrect password") raise Fail("username not found")
def make_custom_table(data): """Generate a new HTML table from custom datasets. This create a new HTML table from a dict givn from joinTables, iterateTable or filterLectures. It returns the raw HTML text. """ days = ["Montag", "Dienstag", "Mittwoch", "Donnerstag", "Freitag"] order = data['order'] sp = " " real = data['data'] output = ['<table border="1", cellpadding="2">'] output.append('<tr>') for x in range(0, 6): output.append('<td>') if x == 0: output.append(sp) else: output.append(days[x-1]) output.append('</td>') output.append('</tr>') for time in order: output.append('<tr>') output.append('<td>') output.append(time.encode('utf-8')) output.append('</td>') for idx in range(0, 5): output.append('<td>') if len(real[idx][time]) == 0: output.append(sp) le_list = [] for lecture in real[idx][time]: le_html = (u'%(name)s<br>%(short)s %(typ)s<br>%(room)s' % lecture) if "source" in lecture: le_html = (u'<span style="color: %s;">%s</span>' % (get_color(lecture['source']), le_html)) le_list.append(le_html) output.append(u'<br><br>'.join(le_list).encode('utf-8')) output.append('</td>') output.append('</tr>') output.append("</table>") return '\n'.join(output)
def _newBlock(): """Create a new block on the blockchain.""" nonce = 0 # Get the last index from the blockchain filename index = _getNewIndex() # Generate a timestamp for this block time = _timestamp() # Gather the last transaction as the block data data = _getNewData() # SHA256 of previous block contents prevHash = _hashPrevBlock() prevHashString = prevHash.hexdigest() # First convert the index to a string (rest are strings) indexString = str(index) # Encode the strings to bytes for hashing indexBytes = indexString.encode() timeBytes = time.encode() dataBytes = data.encode() hashBytes = prevHashString.encode() # Create a block variable to hold the whole block block = indexBytes + timeBytes + dataBytes + hashBytes while True: # Reset the hash variable newhash = None # Start the hash variable newhash = hashlib.sha256() # Add the block to be hashed newhash.update(block) # Convert the nonce to a string nonceString = str(nonce) # Prepare the nonce for adding to the hash nonceBytes = nonceString.encode() # Update block by concatenating the nonceBytes newhash.update(nonceBytes) # Create a string of the hash digest = newhash.hexdigest() # Get the first 14 characters of the hash digest first14 = digest[0:14] # Fourteen zeroes for comparison fourteenZeros = "00000000000000" if (first14 == fourteenZeros): _writeBlock(digest, index, time, data, prevHashString, nonce) # Break out of the infinite while loop break # Per instructions call it quits at nonce value of 50 000 if (nonce >= 50000): _writeBlock(digest, index, time, data, prevHashString, nonce) # Break out of the infinite while loop break nonce += 1
def parseA(url,outfile): req = s.get(url,headers=param) soup = BeautifulSoup(req.text,"html.parser",from_encoding="utf8") contents = soup.find_all("div",{"class":"c"}) for c in contents: try: user = c.find("a").text user_url = c.find("a").get("href") time = c.find("span",{"class":"ct"}).text.strip() print user.encode(code,"ignore") line = "%s\t%s\t%s\n" %(user.encode("utf8","ignore"),user_url,time.encode("utf8","ignore")) outfile.write(line) except Exception,e: #print e pass
def timeexchange(time): try: time = time.split() time = time[1:] #去掉第一个单词on year = time[-1] time = time[0:2] time = [year] + time #把年份移到最前面 time[2] = time[2].replace(',', '') #替换掉逗号 if len(time[2]) == 1: time[2] = '0' + time[2] time[1] = monthexchange(str(time[1])) #把月份变成数字 time = '/'.join(time) #组成格式 except: time = 'N/A' return time.encode('utf-8')
def parseA(url, outfile): req = s.get(url, headers=param) soup = BeautifulSoup(req.text, "html.parser", from_encoding="utf8") contents = soup.find_all("div", {"class": "c"}) for c in contents: try: user = c.find("a").text user_url = c.find("a").get("href") time = c.find("span", {"class": "ct"}).text.strip() print user.encode(code, "ignore") line = "%s\t%s\t%s\n" % (user.encode( "utf8", "ignore"), user_url, time.encode("utf8", "ignore")) outfile.write(line) except Exception, e: #print e pass
def write_txt(self,uid,time_list,weibo_list): #一个人的id和他的时间,微博列表 zipper=zip(time_list,weibo_list) print uid,' 正在写入博文' if uid is not None: not_number=re.compile('\D') uid=re.sub(not_number,'', str(uid)) file=open(self.text_dir+'uid='+str(uid)+'.txt','w+') else: file=open(self.text_dir+self.default_title +'.txt','w+') for i in zipper: time=i[0] weibo=i[1] time=time.encode('utf-8','ignore') file.write(uid+','+time+','+weibo+'\n') file.close() print uid,' 完成输入博文'
def searchm(req): #try: useremail= req.COOKIES.get('useremail') follows=Followship.objects.filter(fans__email=useremail) friends=[] for follow in follows: friend=follow.followed friends.append(friend) user=User.objects.get(email=useremail) if(req.POST['gender']=='True'): gender=True else: gender=False school=req.POST['school'] time=req.POST['time'] num=req.POST['num'] low=int(req.POST['low']) high=int(req.POST['high']) now=datetime.datetime.now() day=datetime.date.today() list=[] time=time.encode('utf-8') if time=='一分钟前': a=60 elif time=='一个小时前': a=3600 elif time=='一天前': a=86400 elif time=='三天前': a=259200 elif time=='一周前': a=604800 else: a=2592000 for i in User.objects.all() : old=((day-i.birthday).days)/365 b=(i.last_login).replace(tzinfo=None) cha=(now-b).seconds if(i.is_boy==gender and i.school==school and old>=low and old<=high and cha<=a and i!=user and (i not in friends)): list.append(i) if(num=='越多越好'): list=find_friends(list,useremail,friends) return render_to_response('find_friends.html',{'friends':list,'username':user.name}) #except: return HttpResponseRedirect('/quanzi/login/')
def collectData(self): count = 0 frames = [] timestamps = [] accelerations = [] steerings = [] print("Starting to collect data") while (count < self.numberOfTrials): # capture image and timestamp frame = self.camera.captureImage() frames.append(frame) if (count % 5 == 0): print(count) timestamp = datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S.%f')[:-3] timestamps.append(timestamp) # get acceleration and steering values accelerations.append(str(self.acceleration)) steerings.append(str(self.steering)) count = count + 1 self.pwm.changeDutyCycle(15.0, 15.0) print("Completed data collection of %d samples\n" % self.numberOfTrials) self.mutex.acquire() self.sock.recv() # ignore message message = "STATUS=COLLECTIONDONE;TRIALS=" + str(self.numberOfTrials) self.sock.send(message.encode()) # send frames for i in range(self.numberOfTrials): self.sock.recv() frame = np.array(frames[i]) image_data = frame.tostring() self.sock.send(image_data) self.sock.recv() time = ("TIME=" + timestamps[i]) time = time.encode() acc = (";ACCELERATION=" + accelerations[i]) acc = acc.encode() steer = (";STEERING=" + steerings[i]) steer = steer.encode() message = time + acc + steer self.sock.send(message) print("All data sent.") self.mutex.release()
def getdailyjson(): itemnumber = request.forms.get("itemnumber") page = request.forms.get("page") date = request.forms.get("date") itemnumber = int(itemnumber) page = int(page) result = [] dbpath = 'db/' + date + '-count.db' conn = sqlite3.connect(dbpath) c = conn.cursor() SqlSentence = "SELECT * FROM statics" i = 0 j = 0 for row in c.execute(SqlSentence): if i == (page - 1) * itemnumber + j and j < itemnumber: j = j + 1 isbn = row[0] praise = row[1] comment = row[2] bookname = row[3] username = row[4] time = row[5] single = { "isbn": str(isbn.encode("utf-8")), "praise": str(praise.encode("utf-8")), "comment": str(comment.encode("utf-8")), "bookname": str(bookname.encode("utf-8")), "username": str(username.encode("utf-8")), "time": str(time.encode("utf-8")) } result.append(single) if j == itemnumber: break i = i + 1 conn.commit() c.close() conn.close() #print result out = json.dumps(result, ensure_ascii=False) #print i return str(out)
def write_txt(self, uid, time_list, weibo_list): # 一个人的id和他的时间,微博列表 zipper = zip(time_list, weibo_list) print "正在写入博文" if uid is not None: not_number = re.compile("\D") uid = re.sub(not_number, "", str(uid)) file = open(self.text_dir + "uid=" + str(uid) + ".txt", "w+") else: file = open(self.text_dir + self.default_title + ".txt", "w+") for i in zipper: time = i[0] weibo = i[1] print time print type(weibo) time = time.encode("utf-8", "ignore") print type(time), type(weibo), type(uid) file.write(uid + "," + time + "," + weibo + "\n") file.close() print "完成输入博文"
def write_txt(self, uid, time_list, weibo_list): #一个人的id和他的时间,微博列表 zipper = zip(time_list, weibo_list) print '正在写入博文' if uid is not None: not_number = re.compile('\D') uid = re.sub(not_number, '', str(uid)) file = open(self.text_dir + 'uid=' + str(uid) + '.txt', 'w+') else: file = open(self.text_dir + self.default_title + '.txt', 'w+') for i in zipper: time = i[0] weibo = i[1] print time print type(weibo) time = time.encode('utf-8', 'ignore') print type(time), type(weibo), type(uid) file.write(uid + ',' + time + ',' + weibo + '\n') file.close() print '完成输入博文'
def getdailyjson(): itemnumber = request.forms.get("itemnumber") page = request.forms.get("page") date = request.forms.get("date") itemnumber=int(itemnumber) page=int(page) result=[] dbpath='db/'+date+'-count.db' conn = sqlite3.connect(dbpath) c = conn.cursor() SqlSentence="SELECT * FROM statics" i=0 j=0 for row in c.execute(SqlSentence): if i==(page-1)*itemnumber+j and j<itemnumber: j=j+1 isbn=row[0] praise=row[1] comment=row[2] bookname=row[3] username=row[4] time=row[5] single={"isbn":str(isbn.encode("utf-8")), "praise":str(praise.encode("utf-8")), "comment":str(comment.encode("utf-8")), "bookname":str(bookname.encode("utf-8")), "username":str(username.encode("utf-8")), "time":str(time.encode("utf-8"))} result.append(single) if j==itemnumber: break i=i+1 conn.commit() c.close() conn.close() #print result out=json.dumps(result, ensure_ascii=False) #print i return str(out)
def parseRecallData(self): # Return a dictionary of values # Parse data when we know we are receiving a 'Recall Data' message # We assume the header has already been read out of the input buffer to be identified (the hard part) # Trash a space self.readings.clear() # TODO: Double check this header size is correct self.serialObj.read(116) # Assuming a result is in the input buffer # This could also be sanity checked to make sure it is 65 bytes wide self.serialObj.read(2) ID = self.serialObj.read(3) ID = int(ID.encode('ascii')) self.serialObj.read(2) reading = self.serialObj.read(3) reading = int(reading.encode('ascii')) self.serialObj.read(30) date = self.serialObj.read(10) # At the end of the day, the date will always be read by a human. No need to int() date = date.encode('ascii') self.serialObj.read(1) time = self.serialObj.read(11) time = time.encode('ascii') # XXX: Gives an error in pylinter. readings is not an array self.readings.append({ 'IDNum': ID, 'reading': reading, 'date': date, 'time': time }) return self.readings
def parseResultReportHeader(self): # Return a dictionary of values # Parse data when we know we are receiving a 'Result Reporting' message # We assume the header has already been read out of the input buffer to be identified (the hard part) self.readings = {} self.flags["resultReportingFlag"] = True # Clear some unneeded characters self.serialObj.read(13) # Begin parsing # Operator ID # Nb: If this field is unused, it will be a row of underscorees operatorID = self.serialObj.read(19) operatorID = operatorID.encode('ascii') # Clear some unneeded characters self.serialObj.read(5) serialNumber = self.serialObj.read(9) serialNumber = serialNumber.encode('ascii') # Clear some unneeded characters self.serialObj.read(1) date = self.serialObj.read(10) date = date.encode('ascii') # Clear some unneeded characters self.serialObj.read(1) time = self.serialObj.read(11) time = time.encode('ascii') self.serialObj.reset_input_buffer() # The 'readings' object acts as an intermediate buffer between the object going onto the stack, and multiple funciton self.readings["operatorID"] = operatorID self.readings["serialNumber"] = serialNumber self.readings["date"] = date self.readings["time"] = time
def main(day): global have global train_num global count global day_url headers = { "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8", "Referer": "https://train.qunar.com/stationToStation.htm?fromStation=%E5%93%88%E5%B0%94%E6%BB%A8&toStation=%E5%8C%97%E4%BA%AC&date=2018-02-12&drainage=", "User - Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6) AppleWebKit/603.3.8 (KHTML,like Gecko) Version/10.1.2 Safari/603.3.8" } day_url = "https://train.qunar.com/dict/open/s2s.do?callback=jQuery17207765959610044582_1516718746460&dptStation=%%E5%%8C%%97%%E4%%BA%%AC&arrStation=%%E5%%93%%88%%E5%%B0%%94%%E6%%BB%%A8&date=2018-02-%s&type=normal&user=neibu&source=site&start=1&num=500&sort=3&_=1516718746689" % ( str(day)) result = requests.get(day_url, headers=headers) result_all = result.content # result_re = re.findall(r"({(.+?)})",result_all) # print result_all p = r"{.*}" m = re.search(p, result_all) result_re = m.group(0) result_json = json.loads(result_re) sta_station = result_json["data"]["dptStation"] arr_station = result_json["data"]["arrStation"] date = result_json["data"]["dptDate"] train_list = result_json["data"]["s2sBeanList"] for key in train_list: time = key["extraBeanMap"]["interval"] time = time.encode("utf-8") time_long = time.split("小时") times = time_long[0] for seats in key["seats"]: if key["seats"][seats]["count"] > 0 and int(times) < 12: #print times,"+++++++",key["seats"][seats]["count"] have = 1 train_num = key["trainNo"].encode("utf-8") count = str(key["seats"][seats]["count"])
global LASTID LASTID = _id except Exception, e: print "错误发生在prase item解析" print e conn.test.fails.insert({"url": target_url}) break print "load to db....." dic = { "askMan": askMan.encode("utf8"), "askContent": askContent.encode("utf8"), "answerMan": answerMan.encode("utf8"), "answerContent": answerContent.encode("utf8"), "time": time.encode("utf8"), } try: db = conn.test collections = db.DATA collections.insert(dic) except Exception as e: print "prase 数据库录入失败" conn.test.fails.insert({"url": tartget_url}) print e print "数据录入mongo..." def worker(): """
while True: opening() choice = input("\t\t\t Enter Your selection : ") s.send(choice.encode()) if choice == '1': loop = 1 while loop == 1: _ = system('clear') time = timezone() s.send(time.encode()) if time == '1': # America / New York Time zone temp = s.recv(1024) print("\n\t\t\t Time in the New york : " + temp.decode()) input("\n\t\t\t Press Enter to proceed .....") _ = system('clear') elif time == '2': # Europe / London temp = s.recv(1024) print("\n\t\t\t Time in the London : " + temp.decode()) input("\n\t\t\t Press Enter to proceed .....") _ = system('clear') elif time == '9': loop = 0 input(
def getjobid(self, user): sha_1 = hashlib.sha1() time = str(datetime.datetime.now()) + '-' + user sha_1.update(time.encode('utf-8')) jobid = sha_1.hexdigest() return jobid
def open_KKT(self,data,time): self.comanda('10',data.encode('cp866'),razd,time.encode('cp866'),razd)
mod = csv.writer(m) modnon = csv.writer(mn) rest = csv.writer(r) for line in f: total += 1 try: j = json.loads(line) message = j[u'message'] time = j[u'created_time'] payment_id = str(j[u'payment_id']) what_type = j[u'type'] if(len(colouredRE.findall(message)) > 0): emojim+=1 mod.writerow([payment_id.encode("utf-8"), what_type.encode("utf-8"), message.encode("utf-8"), time.encode("utf-8")]) elif(len(ree.findall(message)) > 0): emojit += 1 modnon.writerow([payment_id.encode("utf-8"), what_type.encode("utf-8"), message.encode("utf-8"), time.encode("utf-8")]) else: rest.writerow([payment_id.encode("utf-8"), what_type.encode("utf-8"), message.encode("utf-8"), time.encode("utf-8")]) except: total -= 1 bf.write(line) te = [total, emojim, emojit] pickle.dump(te, open("stats.pkl","w"))
def spider(url): html = requests.get(url, headers=head) selector = etree.HTML(html.text) content = selector.xpath("//html") for each in content: title = each.xpath('//div[@class="v-title"]/h1/@title') if title: av = url.replace("http://bilibili.com/video/av", "") title = title[0] tminfo1_log = each.xpath('//div[@class="tminfo"]/a/text()') tminfo2_log = each.xpath('//div[@class="tminfo"]/span[1]/a/text()') tminfo3_log = each.xpath('//div[@class="tminfo"]/span[2]/a/text()') if tminfo1_log: tminfo1 = tminfo1_log[0] else: tminfo1 = "" if tminfo2_log: tminfo2 = tminfo2_log[0] else: tminfo2 = "" if tminfo3_log: tminfo3 = tminfo3_log[0] else: tminfo3 = "" tminfo = tminfo1 + '-' + tminfo2 + '-' + tminfo3 time_log = each.xpath('//div[@class="tminfo"]/time/i/text()') mid_log = each.xpath('//div[@class="b-btn f hide"]/@mid') name_log = each.xpath('//div[@class="usname"]/a/@title') article_log = each.xpath( '//div[@class="up-video-message"]/div[1]/text()') fans_log = each.xpath( '//div[@class="up-video-message"]/div[2]/text()') if time_log: time = time_log[0] else: time = "" if mid_log: mid = mid_log[0] else: mid = "" if name_log: name = name_log[0] else: name = "" if article_log: article = article_log[0].replace(u"投稿:", "") else: article = "-1" if fans_log: fans = fans_log[0].replace(u"粉丝:", "") else: fans = "-1" tag1_log = each.xpath('//ul[@class="tag-list"]/li[1]/a/text()') tag2_log = each.xpath('//ul[@class="tag-list"]/li[2]/a/text()') tag3_log = each.xpath('//ul[@class="tag-list"]/li[3]/a/text()') if tag1_log: tag1 = tag1_log[0] else: tag1 = "" if tag2_log: tag2 = tag2_log[0] else: tag2 = "" if tag3_log: tag3 = tag3_log[0] else: tag3 = "" cid_html_1 = each.xpath('//div[@class="scontent"]/iframe/@src') cid_html_2 = each.xpath('//div[@class="scontent"]/script/text()') if cid_html_1 or cid_html_2: if cid_html_1: cid_html = cid_html_1[0] else: cid_html = cid_html_2[0] cids = re.findall(r'cid=.+&aid', cid_html) cid = cids[0].replace("cid=", "").replace("&aid", "") info_url = "http://interface.bilibili.com/player?id=cid:" + str( cid) + "&aid=" + av video_info = requests.get(info_url) video_selector = etree.HTML(video_info.text) for video_each in video_selector: click_log = video_each.xpath('//click/text()') danmu_log = video_each.xpath('//danmu/text()') coins_log = video_each.xpath('//coins/text()') favourites_log = video_each.xpath('//favourites/text()') duration_log = video_each.xpath('//duration/text()') honor_click_log = video_each.xpath( '//honor[@t="click"]/text()') honor_coins_log = video_each.xpath( '//honor[@t="coins"]/text()') honor_favourites_log = video_each.xpath( '//honor[@t="favourites"]/text()') if honor_click_log: honor_click = honor_click_log[0] else: honor_click = 0 if honor_coins_log: honor_coins = honor_coins_log[0] else: honor_coins = 0 if honor_favourites_log: honor_favourites = honor_favourites_log[0] else: honor_favourites = 0 if click_log: click = click_log[0] else: click = -1 if danmu_log: danmu = danmu_log[0] else: danmu = -1 if coins_log: coins = coins_log[0] else: coins = -1 if favourites_log: favourites = favourites_log[0] else: favourites = -1 if duration_log: duration = duration_log[0] else: duration = "" json_url = "http://api.bilibili.com/x/reply?jsonp=jsonp&type=1&sort=0&pn=1&nohot=1&oid=" + av jsoncontent = requests.get(json_url, headers=head).content jsDict = json.loads(jsoncontent) if jsDict['code'] == 0: jsData = jsDict['data'] jsPages = jsData['page'] common = jsPages['acount'] try: conn = MySQLdb.connect(host='localhost', user='******', passwd='', port=3306, charset='utf8') cur = conn.cursor() conn.select_db('bilibili') cur.execute( 'INSERT INTO video VALUES (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)', [ str(av), str(av), str(cid), title.encode('utf-8'), str(tminfo.encode('utf-8')), time.encode('utf-8'), click.encode('utf-8'), danmu, coins, favourites, duration.encode('utf-8'), mid.encode('utf-8'), name.encode('utf-8'), article.encode('utf-8'), fans.encode('utf-8'), tag1.encode('utf-8'), tag2.encode('utf-8'), tag3.encode('utf-8'), str(common), honor_click, honor_coins, honor_favourites ]) print("Succeed: av" + str(av)) except MySQLdb.Error, e: print("Mysql Error %d: %s" % (e.args[0], e.args[1])) else: print("Error_Json: " + url) else: print("Error_noCid:" + url) else: print("Error_404: " + url)
def attendance_main(self): self.add_template_flag = 0 self.time_for_lecture = 0 self.today = datetime.datetime.now().day ############################### if not self.start: self.start = 1 #get the time in alphabetical form self.today = datetime.datetime.now() ############################### # please edit this part for saturdays and sundays #js = {"day":today.strftime("%a"),"location":"CMPE001"} js = {"day": "MON", "location": "CMPE001"} ############################### # try: #The following is the request to get attendance data for today self.current_attendance = requests.post(self.url, json.dumps(js)) self.current_attendance = json.loads( self.current_attendance.content) # print self.current_attendance self.lectures_array = self.current_attendance["records"] for lecture in self.lectures_array: self.current_time = datetime.datetime.now() for key in lecture.keys(): if (key != u"instrcutors"): time = key self.unedited_lecture_time = time time = time.replace(":", "") time = time.encode('ascii', 'ignore') self.lecture_time = self.current_time.replace( hour=int(time[0:2]), minute=int(time[3:5])) self.current_time = datetime.datetime.now() #remove the following line when finishing self.current_time = self.current_time.replace(hour=22) if self.current_time >= self.lecture_time and self.current_time <= ( self.lecture_time + datetime.timedelta(hours=2)): self.time_for_lecture = 1 # self.lecture_time= lecture break else: self.time_for_lecture = 0 if self.time_for_lecture: break ############################### elif self.today != datetime.datetime.now().day: self.today = datetime.datetime.now() # js = {"day":self.today.strftime("%a"),"location":"CMPE001"} js = {"day": "MON", "location": "CMPE001"} self.current_attendance = requests.post(self.url, json.dumps(js)) self.current_attendance = json.loads( self.current_attendance.content) self.lectures_array = self.current_attendance["records"] for lecture in self.lectures_array: self.current_time = datetime.datetime.now() for key in lecture.keys(): if (key != u"instrcutors"): time = key self.unedited_lecture_time = time time = time.replace(":", "") time = time.encode('ascii', 'ignore') self.lecture_time = self.current_time.replace( hour=int(time[0:2]), minute=int(time[3:5])) self.current_time = datetime.datetime.now() #remove the following line when finishing self.current_time = self.current_time.replace(hour=11) if self.current_time >= self.lecture_time and self.current_time <= ( self.lecture_time + datetime.timedelta(hours=2)): self.time_for_lecture = 1 # self.lecture_time= lecture break else: self.time_for_lecture = 0 if self.time_for_lecture: break else: for lecture in self.lectures_array: self.current_time = datetime.datetime.now() for key in lecture.keys(): if (key != u"instrcutors"): time = key self.unedited_lecture_time = time time = time.replace(":", "") time = time.encode('ascii', 'ignore') self.lecture_time = self.current_time.replace( hour=int(time[0:2]), minute=int(time[3:5])) self.current_time = datetime.datetime.now() #remove the following line when finishing self.current_time = self.current_time.replace(hour=11) if self.current_time >= self.lecture_time and self.current_time <= ( self.lecture_time + datetime.timedelta(hours=2)): self.time_for_lecture = 1 print "choco" # self.lecture_time= lecture break else: self.time_for_lecture = 0 if self.time_for_lecture: break # self.current_time = datetime.datetime.now() # # for the time being # self.current_time= self.current_time.replace(hour=11) # for lecture in self.lectures_array_edited: # if self.current_time >= lecture and self.current_time <= (lecture + datetime.timedelta(hours=2) ): # self.time_for_lecture=1 # self.lecture_time= lecture # break # else: # self.time_for_lecture=0 if self.time_for_lecture: self.admins_fingers = [] # x= str(self.lecture_time.hour) +":"+ str(self.lecture_time.minute) # print self.lectures_array # print self.current_attendance # get the instructors array from the lecture inst = "" i = 0 # print self.unedited_lecture_time # print self.current_attendance["admins"] for lecture in self.current_attendance["records"]: for key in lecture: if key == self.unedited_lecture_time: print key inst = self.current_attendance["records"][i][ "instrcutors"] i += 1 # print inst # get the admins array self.admins_array = self.current_attendance["admins"] + inst for admin in self.admins_array: # print type(admin["fingerPrint"][0]) if (admin["fingerPrint"] == None): temp = [0] * 512 self.admins_fingers.append(temp) else: temp = literal_eval(admin["fingerPrint"]) # print type(temp) # print type(temp[0]) self.admins_fingers.append(temp) print temp # for finger in self.admins_fingers: # print finger if not self.admin_scan_flag: self.finger = f() self.finger.f.clearDatabase() temp = self.finger.template_add(self.admins_fingers) print temp self.admin_scan_flag = 1 temp = self.finger.admin_scan() print temp print len(self.admins_fingers) - 1 if temp >= 0 and temp <= len(self.admins_fingers) - 1: self.app_gui.template = ("./attendance/student_scan.html", {}) elif temp == -1: self.app_gui.template = ("./attendance/admin_scan.html", { "errors": ["No match for the fingerprint given "] }) else: self.app_gui.template = ("./attendance/admin_scan.html", {}) else: self.app_gui.evaluate_javascript("myFunction1();") self.admin_scan_flag = 0 die_time = q.QTime.currentTime().addSecs(3) while q.QTime.currentTime() < die_time: q.QCoreApplication.processEvents(q.QEventLoop.AllEvents, 100) return