# -*- coding:utf-8 -*- import os import cv2 #import subprocess #import serial import time from boss_train import Model #from image_show import show_image if __name__ == '__main__': cap = cv2.VideoCapture(0) #cap = cv2.VideoCapture('rtsp://192.168.43.209:554') cascade_path = "haarcascade_frontalface_alt.xml" model = Model() model.load() while True: _, frame = cap.read() frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) cascade = cv2.CascadeClassifier(cascade_path) # facerect = cascade.detectMultiScale(frame_gray,1.03,1) facerect = cascade.detectMultiScale(frame_gray, scaleFactor=1.4, minNeighbors=10, minSize=(10,10)) # facerect = cascade.detectMultiScale(frame_gray, scaleFactor=1.01, minNeighbors=3, minSize=(3, 3)) for (x,y,w,h) in facerect: cv2.rectangle(frame,(x,y),(x+w,y+h),(127,255,0),1) font = cv2.FONT_HERSHEY_TRIPLEX cv2.putText(frame,"Searching .....",(10,100),font,1,(255,255,0),1,False) # cv2.imshow('Opencv',frame)
# -*- coding:utf-8 -*- import cv2 from boss_train import Model from image_show import show_image if __name__ == '__main__': cap = cv2.VideoCapture(0) #cascade_path = "/usr/local/opt/opencv/share/OpenCV/haarcascades/haarcascade_frontalface_default.xml" cascade_path = "/opt/anaconda3/envs/venv/share/OpenCV/haarcascades/haarcascade_frontalface_default.xml" model = Model() model.load() print("Face Detect Start!") while True: _, frame = cap.read() # グレースケール変換 // 그레이스케일 변환 frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) # カスケード分類器の特徴量を取得する // 캐스케이드 분류기의 특징량을 취득한다. cascade = cv2.CascadeClassifier(cascade_path) # 物体認識(顔認識)の実行 // 물체인식 (얼굴인식) 의 실행 facerect = cascade.detectMultiScale(frame_gray, scaleFactor=1.2, minNeighbors=3, minSize=(10, 10)) #facerect = cascade.detectMultiScale(frame_gray, scaleFactor=1.01, minNeighbors=3, minSize=(3, 3))
from __future__ import print_function import cv2 import dlib from boss_train import Model from whois import who if __name__ == '__main__': # dlib 特徵選取器 detector = dlib.get_frontal_face_detector() cap = cv2.VideoCapture(0) # cap = cv2.VideoCapture("rtsp://*****:*****@192.168.1.127/h264.out") # cap = cv2.VideoCapture("d:/video/ooo.avi") # cap = cv2.VideoCapture("d:/video/www.mp4") # 訓練集載入 model = Model() model.load() while True: ret,frame = cap.read() if ret == True: color = (0,255,255) # 灰階 frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) # 使用detector進行人臉辨識 dets = detector(frame_gray, 1) for _, d in enumerate(dets): x1 = d.top() if d.top() > 0 else 0 y1 = d.bottom() if d.bottom() > 0 else 0 x2 = d.left() if d.left() > 0 else 0
# if os.path.exists(save_dir): # shutil.rmtree(save_dir) # os.mkdir(save_dir) count = 0 for (x1, y1, x2, y2) in faces: file_name = os.path.join( save_dir, str(count) + image_name.split(".")[0].split("/")[-1] + ".jpg") Image.open(image_name).crop((x1, y1, x2, y2)).save(file_name) count += 1 if __name__ == '__main__': cap = cv2.VideoCapture(0) cascade_path = "/usr/local/opt/opencv/share/OpenCV/haarcascades/haarcascade_frontalface_default.xml" model = Model() model.load() while True: _, frame = cap.read() pic_name = "pic.jpg" plt.imsave(pic_name, frame) faces = detectFaces(pic_name) if faces: rs = 1 for (x1, y1, x2, y2) in faces: face_name = "face.jpg" Image.open(pic_name).crop((x1, y1, x2, y2)).save(face_name) image = cv2.imread(face_name) rs = model.predict(image) if rs == 0:
# -*- coding:utf-8 -*- import cv2 from boss_train import Model from image_show import show_image if __name__ == '__main__': model = Model() model.load() frame = cv2.imread("./dog.jpg", 3) #cv2.imshow(frame) result = model.predict(frame) if result == 0: # boss print('Boss is approaching') #show_image() else: print('Not boss')