def __init__(self): """create a solved cube object""" #INDEX: 0 1 2 3 4 5 self.sides = ['F','R','B','L','U','D'] #just for convience self.colors = ['R','B','O','G','W','Y'] self.faces_list = [] #makes a face of each color for color in self.colors: self.faces_list.append(face(color)) #Hash table hashes to lists, easy handle self.faces = {'F': self.faces_list[0],'R': self.faces_list[1], 'B': self.faces_list[2],'L': self.faces_list[3], 'U': self.faces_list[4],'D': self.faces_list[5]} #this hashes characters to the member functions self.rotations ={'F': self.F, 'R': self.R, 'B': self.B, 'L': self.L, 'U': self.U, 'D': self.D} #this hashes characters to 4-direction tuples NESW self.directions = {'F': ('U','R','D','L'), 'R': ('U','B','D','F'), 'B': ('U','L','D','R'), 'L': ('U','F','D','B'), 'U': ('B','R','F','L'), 'D': ('F','R','B','L')}
def s(self): global trail_face print(trail_face) if trail_face==1: self.txt2=('.......Trail 2.......') img=face() record=database() record.connection() prediction=img.prediction(keypairs(record.get_profile())) print('prediction:',prediction) self.txt2='Done' print('user:'******'Authorized' trail_face=0 self.manager.current='tier2' break else: self.txt2='Failed! Try Again' trail_face+=1 self.manager.current='tier1' break if trail_face>1: trail_face=0 self.txt2='**No Access**' self.manager.current='first'
def process(self): resize.resize(self.path) temp_path = self.path while (temp_path[-1]!='\\'): temp_path = temp_path[:-1] img = fr.api.load_image_file(temp_path+'___temp.jpg') location = fr.api.face_locations(img, number_of_times_to_upsample=1, model='hog') encoding = fr.api.face_encodings(img, known_face_locations=location, num_jitters=1) for i in range(len(encoding)) : self.faces.append(face(self.path, location[i], encoding[i]))
def initFaces(self): self.faces = [None for i in range(6)] self.faces[FACE_TOP] = face.face(self.Depth, self.Width, self.Wall) self.faces[FACE_TOP].setTabType(face.FHEIGHT, SLOTS) self.faces[FACE_TOP].setTabType(face.FWIDTH, SLOTS) self.faces[FACE_BOTTOM] = face.face(self.Depth, self.Width, self.Wall) self.faces[FACE_BOTTOM].setTabType(face.FHEIGHT, SLOTS) self.faces[FACE_BOTTOM].setTabType(face.FWIDTH, SLOTS) self.faces[FACE_LEFT] = face.face(self.Height, self.Depth, self.Wall) self.faces[FACE_LEFT].setTabType(face.FHEIGHT, SLOTS) self.faces[FACE_LEFT].setTabType(face.FWIDTH, TABS) self.faces[FACE_RIGHT] = face.face(self.Height, self.Depth, self.Wall) self.faces[FACE_RIGHT].setTabType(face.FHEIGHT, SLOTS) self.faces[FACE_RIGHT].setTabType(face.FWIDTH, TABS) self.faces[FACE_FRONT] = face.face(self.Height, self.Width, self.Wall) self.faces[FACE_FRONT].setTabType(face.FWIDTH, TABS) self.faces[FACE_FRONT].setTabType(face.FWIDTH, TABS) self.faces[FACE_BACK] = face.face(self.Height, self.Width, self.Wall) self.faces[FACE_BACK].setTabType(face.FWIDTH, TABS) self.faces[FACE_BACK].setTabType(face.FWIDTH, TABS) for fc in self.faces: fc.setNoRelief()
def __init__(self): """create a solved cube object""" #INDEX: 0 1 2 3 4 5 self.sides = ['F', 'R', 'B', 'L', 'U', 'D'] #just for convience self.colors = ['R', 'B', 'O', 'G', 'W', 'Y'] self.faces_list = [] #makes a face of each color for color in self.colors: self.faces_list.append(face(color)) #Hash table hashes to lists, easy handle self.faces = { 'F': self.faces_list[0], 'R': self.faces_list[1], 'B': self.faces_list[2], 'L': self.faces_list[3], 'U': self.faces_list[4], 'D': self.faces_list[5] } #this hashes characters to the member functions self.rotations = { 'F': self.F, 'R': self.R, 'B': self.B, 'L': self.L, 'U': self.U, 'D': self.D } #this hashes characters to 4-direction tuples NESW self.directions = { 'F': ('U', 'R', 'D', 'L'), 'R': ('U', 'B', 'D', 'F'), 'B': ('U', 'L', 'D', 'R'), 'L': ('U', 'F', 'D', 'B'), 'U': ('B', 'R', 'F', 'L'), 'D': ('F', 'R', 'B', 'L') }
def collecting(): data = urlopen(base_url).read() soup = BeautifulSoup(data, "html.parser") dd = datetime.today() collect_time = str(dd.year) + "," + str(dd.month) + "," + str(dd.day) patter = '[^\w\s]' co = [] ll = [] for i in soup.find_all('div', {'class': 'hdline_flick_item'}): #헤드라인 사진포함된 것 추출 a = i.find('a') ll.append(base_url + a.get('href')) for i in soup.find_all('dd'): #대표기사들 추출 b = i.find('a') ll.append(b.get('href')) for k in soup.find_all('div', 'hdline_article_tit'): #헤드라인 추출 c = k.find('a') ll.append(base_url + c.get('href')) for data in soup.find_all('div', 'mtype_list_wide'): #나머지 기사 추출 try: for a in data.find_all('a'): link = a.get('href') # for getting link ll.append(link) except OSError: break for i in soup.find_all('ul', {'class': 'section_list_ranking'}): #가장많이본 뉴스 추출 for j in i.find_all('a'): link = j.get('href') ll.append(base_url + link) for i in ll: cs = [] article_body, title = parse(i) press_1 = press(i) good, nice, sad, angry, wanted, recommand = face(i) dic = { 'title': title, 'press': press_1, 'good': good, 'nice': nice, 'sad': sad, 'angry': angry, 'wanted': wanted, 'recommand': recommand } cs.append('naver_news') cs.append(title) cs.append(article_body) cs.append(collect_time) cs.append(i) cs.append(good) cs.append(nice) cs.append(sad) cs.append(angry) cs.append(wanted) cs.append(recommand) cs.append(press_1) try: save('naver_news', title, article_body, collect_time, i, good, nice, sad, angry, wanted, recommand, press_1) except: #헤드라인 뉴스와 분야별 순위에 같이 포함되면 기본키 중복으로 삽입 거절당하기 때문에 그것을 방지하기 위한 예외처리 pass co.append(cs) panda(co)
def train(self): s=face() record.connection() s.train2(keypairs(record.get_profile())) self.label='**Training Completed**' self.manager.current='training'
def get_frame(self): fn = None self.frames = open("stream.jpg", 'wb+') self.ret, self.frame = self.cap.read() # Resize frame of video to 1/4 size for faster face recognition processing small_frame = cv2.resize(self.frame, (0, 0), fx=0.25, fy=0.25) # Only process every other frame of video to save time if self.process_this_frame: # Find all the faces and face encodings in the current frame of video face_locations = face_recognition.face_locations(small_frame) face_encodings = face_recognition.face_encodings( small_frame, face_locations) face_names = [] for face_encoding in face_encodings: # See if the face is a match for the known face(s) # results = face_recognition.compare_faces(self.known_faces_encodings, face_encoding) face_distances = face_recognition.face_distance( self.known_faces_encodings, face_encoding) # print(face_distances) # print() # print(results) # import ipdb; ipdb.set_trace(); if min(face_distances) < 0.5: # uid = uids[results.index(True)] uid = self.uids[np.argmin(face_distances)] name = uid else: name = uuid.uuid4().hex self.uids.append(name) self.known_faces_encodings.append(face_encoding) # name = "Unknown" face_names.append(name) process_this_frame = not self.process_this_frame # Display the results for (top, right, bottom, left), name in zip(face_locations, face_names): # Scale back up face locations since the frame we detected in was scaled to 1/4 size top *= 4 right *= 4 bottom *= 4 left *= 4 crop_face = self.frame[top:bottom, left:right] fn = 'data/new_faces/{}.png'.format(name) if not os.path.isfile(fn): cv2.imwrite(fn, crop_face) cv2.imwrite('static/{}.png'.format(name), crop_face) # Draw a box around the face cv2.rectangle(self.frame, (left, top), (right, bottom), (0, 0, 255), 2) # Draw a label with a name below the face cv2.rectangle(self.frame, (left, bottom - 35), (right, bottom), (0, 0, 255), cv2.FILLED) font = cv2.FONT_HERSHEY_DUPLEX cv2.putText(self.frame, str(name), (left + 6, bottom - 6), font, 1.0, (255, 255, 255), 1) if self.ret: # frame captures without errors... cv2.imwrite("stream.jpg", self.frame) # Save image... with open('result.html', 'w') as f: result = pd.DataFrame() result['uid'] = face_names fns = [] vks = [] genders = [] for uid in face_names: if uid in df.index: print("&&&&&&&&&&&&&&&&") kind = 'familiar' fns.append('/static/{}.jpg'.format(uid)) vks.append('https://vk.com/id{}'.format(uid)) # import ipdb; ipdb.set_trace() genders.append(df.loc[uid]['sex']) else: print(self.prev_fn) print(fn) res = {} if self.prev_fn != fn: res = face(fn) print(res) if 'error' not in res: try: res = res[0].get('faceAttributes') except: print(res) res = {} self.prev_fn = fn kind = res.get('gender', 'None') print(kind) genders.append(kind) fns.append('/static/{}.png'.format(uid)) vks.append(None) if uid == '144144243': kind = 'friend' print(kind) if face_names: create_html(face_names[0], kind) else: create_html('0', 'empty') result['photo'] = fns result['vk'] = vks result['gender'] = genders # print(result) f.write(template.render({'x': result})) return self.frames.read()
def take_photo(albumPath): img = 0 display = 0 cap = cv2.VideoCapture(0, cv2.CAP_DSHOW) cv2.namedWindow('camera', cv2.WINDOW_NORMAL) cv2.setWindowProperty('camera', cv2.WND_PROP_FULLSCREEN, cv2.WINDOW_FULLSCREEN) cv2.setWindowProperty('camera', cv2.WND_PROP_FULLSCREEN, cv2.WINDOW_NORMAL) # ii = Image.open('background.png') # ret, camera = cap.read() # print(camera.shape) # im = ii.resize((ii.size[0]//3,ii.size[1]//3),Image.NEAREST) white = cv2.imread('../../../photo_email/background.png') faces = [] phrase = 0 while (True): # Capture camera-by-camera ret, camera = cap.read() display = camera.copy() location = fr.api.face_locations(display) if len(location) > 0: for fc in location: display = cv2.rectangle(display, (fc[3], fc[0]), (fc[1], fc[2]), (0, 255, 0), 4) if phrase == 0: display = np.concatenate((display[0:480, 140:500], white, white), axis=1) elif phrase == 1: display = np.concatenate( (faces[0], display[0:480, 140:500], white), axis=1) else: display = np.concatenate( (faces[0], faces[1], display[0:480, 140:500]), axis=1) cv2.imshow('camera', display) key = cv2.waitKey(1) if key == ord('q'): cap.release() cv2.destroyAllWindows() break elif key == ord('c') and len(location) > 0: faces.append(camera[0:480, 140:500]) if phrase < 2: phrase += 1 else: cap.release() cv2.destroyAllWindows() break photo = [] for f in faces: location = fr.api.face_locations(f) encoding = fr.api.face_encodings(f, known_face_locations=location) # print(encoding) photo.append(face(0, location, encoding)) alb = album.load(albumPath) ppp = alb.match(photo, 0.8) # print(r"""C:\Users\4E14ChuYatHong\Desktop\20190909_Prizegiving_ceremony\_DSC7317.JPG 0.4951498138713914 # C:\Users\4E14ChuYatHong\Desktop\20190909_Prizegiving_ceremony\_DSC7318.JPG 0.502228817791267""") # sys.exit() for p in ppp: print(p[0].path, p[1][0])
def process(frame, count=0): cv2.imwrite('donga.jpg', frame) if face('donga.jpg') == 1: print("Good") else: email('donga.jpg')
def post(self): str_xml = self.request.body #获得post来的数据 xml = etree.fromstring(str_xml)#进行XML解析 msgType=xml.find("MsgType").text fromUser=xml.find("FromUserName").text toUser=xml.find("ToUserName").text respContent = "" if msgType == "text": content=xml.find("Content").text#获得用户所输入的内容 if content.startswith("翻译"): #拆分以翻译开头 reinfo = re.compile("^翻译") #首先content值类型是unicode,所以要先转换为str在拆分,strip去除左右空格 args = reinfo.sub('', content.encode("utf-8")).strip() if not args: respContent = promptMsg.getFanYiMsg() else: respContent = translate.Translate().baiduFanYi(args) elif content.startswith("歌曲"): reinfo = re.compile("^歌曲") args = reinfo.sub('', content.encode("utf-8")).strip() argslist = args.split("@") des = "来自百度音乐" if not argslist[0]: respContent = promptMsg.getMusicMsg() elif len(argslist) == 1: respContent = music.Music().baiduMusic(argslist[0],"") else: des = argslist[1] respContent = music.Music().baiduMusic(argslist[0],argslist[1]) if argslist[0]: self.finish(weixinResult.result_music(fromUser, toUser, int(time.time()), argslist[0],des,respContent[0],respContent[1])) elif content.startswith("天气"): reinfo = re.compile("^天气") args = reinfo.sub('', content.encode("utf-8")).strip() if not args: respContent = promptMsg.getWeather() else: newsMsg = weather.Weather().baiduWeather(args, fromUser, toUser) self.finish(newsMsg) elif content.startswith("ip"): reinfo = re.compile("^ip") args = reinfo.sub('', content.encode("utf-8")).strip() if not args: respContent = promptMsg.getIp() else: respContent = ip.IP().taobaoIP(args) elif content.startswith("手机"): reinfo = re.compile("^手机") args = reinfo.sub('', content.encode("utf-8")).strip() if not args: respContent = promptMsg.getPhone() else: respContent = phone.Phone().tenpayPhone(args).encode("utf-8") elif content.startswith("苹果"): reinfo = re.compile("^苹果") args = reinfo.sub('', content.encode("utf-8")).strip() if not args: respContent = promptMsg.getApple() else: result, code = imei.IMEI().appleIMEI(args, toUser, fromUser) if code == 0: self.finish(result) elif code == -1: respContent = result elif content.encode("utf-8").strip() == "历史上的今天" or content.encode("utf-8").strip() == "lssdjt": respContent = todayonhistory.History().rijiben() elif len(content.replace("—","-").replace("-","-").split("-")) == 3: s = content.replace("—","-").replace("-","-").split("-") region = s[0] start = s[1] end = s[2] newsMsg = navigation.Navigation().baiduNavigation(toUser, fromUser, region, start, end) self.finish(newsMsg) else: respContent = promptMsg.getTextMsg() elif msgType == "image": #取得图片地址 picUrl = xml.find("PicUrl").text respContent = face.face(picUrl) elif msgType == "location": respContent = xml.find("Location_X").text elif msgType == "event": #事件类型 eventType = xml.find("Event").text #订阅 if eventType == "subscribe": respContent = "谢谢您关注娟子服装。" #取消订阅 #elif eventType == "unsubscribe": #取消订阅后用户再收不到公众号发送的消息,因此不需要回复消息 #自定义菜单点击事件 elif eventType == "CLICK": #事件KEY值,与创建自定义菜单时指定的KEY值对应 eventKey = xml.find("EventKey").text #TODO self.finish(weixinResult.result_text(fromUser, toUser, int(time.time()), respContent))
continue # frameDelta = cv2.absdiff(firstFrame, gray) consecDelta = cv2.absdiff(prevFrame, gray) # thresh = cv2.threshold(frameDelta, 25, 255, cv2.THRESH_BINARY)[1] consecThresh = cv2.threshold(consecDelta, 25, 255, cv2.THRESH_BINARY)[1] # dilate the thresholded image to fill in holes, then find contours # on thresholded image # pyautogui.confirm('Shall I confirm?') # thresh = cv2.dilate(thresh, None, iterations=2) consecThresh = cv2.dilate(consecThresh, None, iterations=2) # _, cnts, _ = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE) _, f**k, _ = cv2.findContours(consecThresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) detect_face = face(frame) for x in f**k: y = cv2.contourArea(x) if cv2.contourArea(x) < 3000 and not detect_face: text = "Unoccupied" continue else: text = "Occupied" if text == "Occupied": if detect_face: print("good") count = 0 else: count += 1
# dilate the thresholded image to fill in holes, then find contours # on thresholded image # pyautogui.confirm('Shall I confirm?') thresh = cv2.dilate(thresh, None, iterations=2) _, cnts, _ = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) # loop over the contours for c in cnts: # if the contour is too small, ignore it if cv2.contourArea(c) < args["min_area"]: continue text = "Occupied" if text == "Occupied": if face(frame) is 1: print("good") else: print('bad') cv2.imwrite('donga.jpg', frame) Process(target=email, args=('donga.jpg', )).start() # process(text,frame) # cv2.putText(frame, "Room Status: {}".format(text), (10, 20),cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2) # cv2.putText(frame, datetime.datetime.now().strftime("%A %d %B %Y %I:%M:%S%p"),(10, frame.shape[0] - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.35, (0, 0, 255), 1) # For testing purposes show these videos # Process(cv2.imshow("Security Feed", frame)).start() # cv2.imshow("Thresh", thresh) # cv2.imshow("Frame Delta", frameDelta)
def secret_cam(): global time # start program from command line ap = argparse.ArgumentParser() ap.add_argument("-v", "--video", help="path to the video file") ap.add_argument("-a", "--min-area", type=int, default=500, help="minimum area size") ap.add_argument("-t",default=False) args = vars(ap.parse_args()) #Video of thief fourcc = cv2.VideoWriter_fourcc(*'MJPG') #(h, w) = face_recognition.load_image_file('donga.jpg').shape[:2] # Webcam if args.get("video", None) is None: cam = cv2.VideoCapture(0) time.sleep(0.25) # otherwise, we are reading from a video file else: cam = cv2.VideoCapture(args["video"]) # initialize the first frame in the video stream firstFrame = None count,vid_no,x=0,0,True time_now = datetime.datetime.now() while True: w, h = cam.get(3), cam.get(4) if x is True: time_now = datetime.datetime.now() video=cv2.VideoWriter('videos/'+str(time_now)+'.avi',fourcc,15.0,(int(w),int(h)),True) x=False # grab the current frame and initialize the occupied/unoccupied text (grabbed, frame) = cam.read() text = "Unoccupied" # resize the frame, convert it to grayscale, and blur it # frame = imutils.resize(frame, width=500) gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) gray = cv2.GaussianBlur(gray, (21, 21), 0) # if the first frame is None, initialize it if firstFrame is None: firstFrame = gray prevFrame = gray continue consecDelta = cv2.absdiff(prevFrame,gray) consecThresh = cv2.threshold(consecDelta,25,255,cv2.THRESH_BINARY)[1] # dilate the thresholded image to fill in holes, then find contours # on thresholded image # pyautogui.confirm('Shall I confirm?') consecThresh=cv2.dilate(consecThresh,None,iterations=2) _, f**k, _ = cv2.findContours(consecThresh.copy(), cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE) # loop over the contours detect_face=face(frame) for dam in f**k: # if the contour is too small, ignore it if cv2.contourArea(dam) < 11000 and not detect_face: continue (x, y, w, h) = cv2.boundingRect(dam) # cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2) text = "Occupied" if text=="Occupied": if detect_face is 1: print("good") count=0 else: count+=1 print('bad') video.write(frame) if count >= 50: Process(target=drive,args=('videos/'+str(time_now)+'.avi',)).start() vid_no+=1 count=0 x=True prevFrame = gray cv2.putText(frame, "Room Status: {}".format(text), (10, 20),cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2) cv2.imshow("Security Feed", frame) # cv2.imshow('gray',gray) key = cv2.waitKey(1) & 0xFF if key == ord("q"): break # cleanup the cam and close any open windows try: video.release() except: pass cam.release() cv2.destroyAllWindows() return frame