def __init__(self): # Using OpenCV to capture from device 0. If you have trouble capturing # from a webcam, comment the line below out and use a video file # instead. self.camera = camera.VideoCamera() self.known_face_encodings = [] self.known_face_names = [] self.org_frame = [] dirname = 'knowns' # Load sample pictures and learn how to recognize it. files = os.listdir(dirname) for filename in files: name, ext = os.path.splitext(filename) if ext == '.jpg': self.known_face_names.append(name) pathname = os.path.join(dirname, filename) img = face_recognition.load_image_file(pathname) face_encoding = face_recognition.face_encodings(img)[0] self.known_face_encodings.append(face_encoding) # Initialize some variables self.face_locations = [] self.face_encodings = [] self.face_names = [] self.process_this_frame = True self.face_cascade = cv2.CascadeClassifier( 'opencv_data/haarcascade_frontface.xml')
def __init__(self): # Using OpenCV to capture from device 0. If you have trouble capturing # from a webcam, comment the line below out and use a video file # instead. self.camera = camera.VideoCamera() self.known_face_encodings = [] self.known_face_names = [] # Load sample pictures and learn how to recognize it. dirname = 'knowns' files = os.listdir(dirname) # return all image files from dirname for filename in files: name, ext = os.path.splitext(filename) if ext == '.jpg': self.known_face_names.append(name) pathname = os.path.join(dirname, filename) img = face_recognition.load_image_file(pathname) face_encoding = face_recognition.face_encodings(img)[0] # not modified self.known_face_encodings.append(face_encoding) # Initialize some variables self.face_locations = [] self.face_encodings = [] self.face_names = [] self.process_this_frame = True
def __init__(self): print("FaceRecog execute...") # Using OpenCV to capture from device 0. If you have trouble capturing # from a webcam, comment the line below out and use a video file # instead. self.camera = camera.VideoCamera() self.known_face_encodings = [] self.known_face_names = [] # Load sample pictures and learn how to recognize it. dirname = 'my sample/' files = os.listdir(dirname) for filename in files: name, ext = os.path.splitext(filename) if ext == '.jpg': self.known_face_names.append(name) pathname = os.path.join(dirname, filename) img = face_recognition.load_image_file(pathname) face_encoding = face_recognition.face_encodings(img)[0] # -> 'IndexError: list index out of range' 발생 # 'my sample' 폴더에 사진 하나만 넣는다. self.known_face_encodings.append(face_encoding) # Initialize some variables self.face_locations = [] self.face_encodings = [] self.face_names = [] self.process_this_frame = True
def __init__(self): # 모듈 선언 self.camera = camera.VideoCamera() self.known_face_encodings = [] self.known_face_names = [] self.staticData = static.staticVar self.etc_module = etc_module # 해당 경로에 있는 이미지로 학습 시작 dirname = 'knowns' files = os.listdir(dirname) for filename in files: name, ext = os.path.splitext(filename) if ext == '.jpg': self.known_face_names.append(name) pathname = os.path.join(dirname, filename) img = face_recognition.load_image_file(pathname) face_encoding = face_recognition.face_encodings(img)[0] self.known_face_encodings.append(face_encoding) # 변수 초기화 self.face_locations = [] self.face_encodings = [] self.face_names = [] self.process_this_frame = True
def __init__(self): # Using OpenCV to capture from device 0. If you have trouble capturing # from a webcam, comment the line below out and use a video file # instead. self.camera = camera.VideoCamera() self.known_face_encodings = [] self.known_face_names = [] # Load sample pictures and learn how to recognize it. #knowns 디렉토리에서 사진 파일을 읽습니다. 파일 이름으로부터 사람 이름을 추출합니다 dirname = 'knowns' files = os.listdir(dirname) for filename in files: name, ext = os.path.splitext(filename) if ext == '.jpg': self.known_face_names.append(name) pathname = os.path.join(dirname, filename) # 사진에서 얼굴 영역을 알아내고, face landmarks라 불리는 # 68개 얼굴 특징의 위치를 분석한 데이터를 known_face_encodings에 저장합니다. img = face_recognition.load_image_file(pathname) #face_encoding = face_recognition.face_encodings(img)[0] face_encodings = face_recognition.face_encodings(img) if len(face_encodings) > 0: face_encoding = face_encodings[0] self.known_face_encodings.append(face_encoding) # Initialize some variables self.face_locations = [] self.face_encodings = [] self.face_names = [] self.process_this_frame = True
def __init__(self): self.camera = camera.VideoCamera() self.known_face_encodings = [] self.known_face_names = [] self.known_face_no = [] # Load sample pictures and learn how to recognize it. file = open("encoding.txt", 'r') datas = file.read().strip().split("])]") for data in datas: if len(data) != 0: imgdata = data.split("[array([") user_data = imgdata[0].split("<<") print(user_data[1] + ":" + user_data[0]) self.known_face_names.append(user_data[1]) self.known_face_no.append(user_data[0]) tmp = imgdata[1].split(',') self.known_face_val = [] for fval in tmp: self.known_face_val.append(float(fval)) face_encoding = self.known_face_val self.known_face_encodings.append(face_encoding) # Initialize some variables self.face_locations = [] self.face_encodings = [] self.face_names = [] self.process_this_frame = True
def __init__(self): # Using OpenCV to capture from device 0. If you have trouble capturing # from a webcam, comment the line below out and use a video file # instead. self.camera = camera.VideoCamera() self.known_face_encodings = [] self.known_face_names = [] # Load sample pictures and learn how to recognize it. dirname = 'knowns' files = os.listdir(dirname) for filename in files: name, ext = os.path.splitext(filename) if ext == '.jpg': self.known_face_names.append(name) ### Maybe its right moving this into if below pathname = os.path.join(dirname, filename) img = face_recognition.load_image_file(pathname) face_encoding = face_recognition.face_encodings(img) if len(face_encoding) > 0: face_encoding = face_encoding[0] self.known_face_encodings.append(face_encoding) else: print("No faces found in this image! \"", end='') print(name, end='') print("\"") # Initialize some variables self.face_locations = [] self.face_encodings = [] self.face_names = [] self.process_this_frame = True
def __init__(self): # Using OpenCV to capture from device 0. If you have trouble capturing # from a webcam, comment the line below out and use a video file # instead. self.camera = camera.VideoCamera(0) self.currentFace = None self.known_face_encodings = [] self.known_face_names = [] self.countDf = pd.DataFrame(data={ "user_name": [], "count": [], "timestamp": [] }) # # Load sample pictures and learn how to recognize it. # Initialize some variables self.face_locations = [] self.face_encodings = [] self.face_names = [] self.process_this_frame = True self.face_detected = False self.init_customer_face() # 좋아요 계산 self.cal_like()
def __init__(self): # 객체 생성 self.camera = camera.VideoCamera() self.known_face_encodings = [] self.known_face_names = [] # knowns 에서 사진파일을 읽고 인식하여 특징 추출 dirname = './' files = os.listdir(dirname) for filename in files: name, ext = os.path.splitext(filename) if ext == '.jpg' and name == 'recognizeface': self.known_face_names.append(name) # knowns 디렉토리에서 사진 파일을 읽어와서 사람 이름을 추출 pathname = os.path.join(dirname, filename) img = face_recognition.load_image_file(pathname) # 특징 추출 # 얼굴 특징을 검출할 수 없을 경우 에러 발생 ## face_encoding = face_recognition.face_encodings(img)[0] # 에러 발생시 해결하는 코드 encodings = face_recognition.face_encodings(img) if len(encodings) > 0: face_encoding = encodings[0] print("Face recogniezd.".format(name)) else: # 얼굴 특징을 찾을 수 없는 사진이 있을 경우 해당 사진을 출력해줌 print("Face recognize error.".format(name))
def __init__(self): # OpenCV를 사용하여 장치 0에서 캡처. 캡처에 문제가 있는 경우 # 웹캠에서 아래 줄을 읽고 비디오 파일을 사용하십시오. self.camera = camera.VideoCamera() self.known_face_encodings = [] self.known_face_names = [] # 사진을 업로드 하여 인지하는 것 확인 dirname = 'knowns' files = os.listdir(dirname) for filename in files: name, ext = os.path.splitext(filename) if ext == '.jpg': self.known_face_names.append(name) pathname = os.path.join(dirname, filename) img = face_recognition.load_image_file(pathname) face_encoding = face_recognition.face_encodings(img)[0] self.known_face_encodings.append(face_encoding) # 변수 초기화 self.face_locations = [] self.face_encodings = [] self.face_names = [] self.process_this_frame = True
def __init__(self): # Using OpenCV to capture from device 0. If you have trouble capturing # from a webcam, comment the line below out and use a video file # instead. self.camera = camera.VideoCamera(0) self.known_face_encodings = [] self.known_face_names = [] # # # Load sample pictures and learn how to recognize it. dirname = 'customers' files = os.listdir(dirname) for filename in files: name, ext = os.path.splitext(filename) name = name.split('_')[0] if ext == '.jpg' or ext == '.jpeg' or ext == '.png': self.known_face_names.append(name) pathname = os.path.join(dirname, filename) img = face_recognition.load_image_file(pathname) if len(face_recognition.face_encodings(img)) > 0: face_encoding = face_recognition.face_encodings(img)[0] self.known_face_encodings.append(face_encoding) print(self.known_face_names) # Initialize some variables self.face_locations = [] self.face_encodings = [] self.face_names = [] self.process_this_frame = True self.face_detected = False
def gen(fr): detector = ObjectDetector('ssd_mobilenet_v1_coco_2017_11_17') #detector = ObjectDetector('mask_rcnn_inception_v2_coco_2018_01_28') #detector = ObjectDetector('pet', label_file='data/pet_label_map.pbtxt') cam = camera.VideoCamera() while True: frame = cam.get_frame() frame = detector.detect_objects(frame) ret, jpg = cv2.imencode('.jpg', frame) jpg_bytes = jpg.tobytes() yield (b'--frame\r\n' b'Content-Type: image/jpeg\r\n\r\n' + jpg_bytes + b'\r\n\r\n')
def __init__(self): self.camera = camera.VideoCamera() self.known_face_encodings = [] self.known_face_names = [] dirname = 'knowns' files = os.listdir(dirname) for filename in files: name, ext = os.path.splitext(filename) if ext == '.jpg': self.known_face_names.append(name) pathname = os.path.join(dirname, filename) img = face_recognition.load_image_file(pathname) face_encoding = face_recognition.face_encodings(img)[0] self.known_face_encodings.append(face_encoding) self.face_locations = [] self.face_encodings = [] self.face_names = [] self.process_this_frame = True
def __init__(self): # 객체 생성 self.camera = camera.VideoCamera() self.known_face_encodings = [] self.known_face_names = [] # knowns 에서 사진파일을 읽고 인식하여 특징 추출 dirname = 'knowns' files = os.listdir(dirname) for filename in files: name, ext = os.path.splitext(filename) if ext == '.jpg': self.known_face_names.append(name) # knowns 디렉토리에서 사진 파일을 읽어와서 사람 이름을 추출 pathname = os.path.join(dirname, filename) img = face_recognition.load_image_file(pathname) # 특징 추출 # 얼굴 특징을 검출할 수 없을 경우 에러 발생 ## face_encoding = face_recognition.face_encodings(img)[0] # 에러 발생시 해결하는 코드 encodings = face_recognition.face_encodings(img) if len(encodings) > 0: face_encoding = encodings[0] else: # 얼굴 특징을 찾을 수 없는 사진이 있을 경우 해당 사진을 출력해줌 print("{} : No faces found in the image!".format(name)) ## quit() # 사진에서 얼굴 특징의 데이터를 분석한 데이터를 self.known_face_encodings 에 저장 self.known_face_encodings.append(face_encoding) # 변수 초기화 self.face_locations = [] self.face_encodings = [] self.face_names = [] self.process_this_frame = True
# Exception for overlapped folder name try: os.mkdir(face_path) print('Face directory is created :: ', face_path) except FileExistsError: print('Folder name exception :: already registered face') # Path adequacy check assert os.path.exists(face_path) # Set DB DBmanager = DBmanager.DBmanager("log.db") DBmanager.ShowAllWorker() # Set camera module cam = camera.VideoCamera(camera_num) # Camera loading assertion assert cam is not None # Camera loop while True: # Grab a single frame of video frame = cam.get_frame() # Frame loading assertion assert frame is not None copy_frame = frame.copy() # Resize frame of video to 1/4 size for faster face recognition processing small_frame = cv2.resize(frame, (0, 0), fx=0.25, fy=0.25) # Convert the image from BGR color (which OpenCV uses) to RGB color (which face_recognition uses) rgb_small_frame = small_frame[:, :, ::-1]
def video_feed(): return Response(gen(camera.VideoCamera()), mimetype='multipart/x-mixed-replace; boundary=frame')
import threading import time import hashlib import logging import datetime import ssl import cloud app = Flask(__name__) conf = config.Configuration() logging.basicConfig(filename='app.log', level=logging.DEBUG) auth = flask_httpauth.HTTPBasicAuth() app.secret_key = os.urandom(24) user = None online = None cmra = camera.VideoCamera(conf) drop = cloud.DropObj(conf) @auth.get_password def get_pw(username): global user user = username return conf.get('User')[username] @auth.hash_password def hash_pw(password): return hashlib.sha224(password).hexdigest()
def get_jpg_bytes(self): frame = self.get_frame() # We are using Motion JPEG, but OpenCV defaults to capture raw images, # so we must encode it into JPEG in order to correctly display the # video stream. ret, jpg = cv2.imencode('.jpg', frame) return jpg.tobytes() if __name__ == '__main__': import camera import calibration #detector = ObjectDetector('ssd_mobilenet_v1_coco_2017_11_17') detector = ObjectDetector('mask_rcnn_inception_v2_coco_2018_01_28') # instead. cam = camera.VideoCamera() print("press `q` to quit") while True: frame = cam.get_frame() frame = detector.detect_objects(frame) # show the frame cv2.imshow("Frame", frame) # if the `q` key was pressed, break from the loop key = cv2.waitKey(1) & 0xFF if key == ord('q'): break # q= False
label = 'Inference time: %.2f ms' % (t * 1000.0 / cv.getTickFrequency()) cv.putText(copy_frame, label, (0, 15), cv.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255)) return copy_frame if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--path', default=0, help='Input test video path or web-cam number.') args = parser.parse_args() cam = camera.VideoCamera(args.path) helmet_detection = helmet_detection() # Set window winName = 'Helmet detection' cv.namedWindow(winName, cv.WINDOW_NORMAL) while True: # Get frame from Camera module frame = cam.get_frame() frame = cv.resize(frame, dsize=(640, 480), interpolation=cv.INTER_AREA) frame = helmet_detection.get_detection(frame=frame, copy_frame=frame) # show the frame cv.imshow(winName, frame) key = cv.waitKey(1) & 0xFF
def __init__(self): # Using OpenCV to capture from device 0. If you have trouble capturing # from a webcam, comment the line below out and use a video file # instead. self.camera = camera.VideoCamera() self.facenet = cv2.dnn.readNet( 'models/deploy.prototxt', 'models/res10_300x300_ssd_iter_140000.caffemodel') self.known_face_encodings = [] self.known_face_names = [] self.face = [] # Load sample pictures and learn how to recognize it. dirname = 'knowns' files = os.listdir(dirname) for filename in files: name, ext = os.path.splitext(filename) if ext == '.jpeg': self.known_face_names.append(name) pathname = os.path.join(dirname, filename) img = face_recognition.load_image_file(pathname) test1 = cv2.imread(pathname) # print(test1.shape[:2]) h, w = test1.shape[:2] # print(test1.shape) # (960, 721, 3) blob = cv2.dnn.blobFromImage(img, scalefactor=1., size=(300, 300), mean=(104., 177., 123.)) self.facenet.setInput(blob) dets = self.facenet.forward() for i in range(dets.shape[2]): # print(name) # 검출한 결과가 신뢰도 confidence = dets[0, 0, i, 2] # 신뢰도를 0.5로 임계치 지정 if confidence < 0.5: continue # print('confidence :: ', confidence * 100) # 바운딩 박스를 구함 x1 = int(dets[0, 0, i, 3] * w) # 박스 시작점 x 좌표 y1 = int(dets[0, 0, i, 4] * h) # 박스 시작점 y 좌표 x2 = int(dets[0, 0, i, 5] * w) # 박스 끝점 x 좌표 y2 = int(dets[0, 0, i, 6] * h) # 박스 끝점 y 좌표 # load DB # dir_name = "result" # pdb = PersonDB() # pdb.load_db(dir_name) # pdb.print_persons() # 원본 이미지에서 얼굴영역 추출 face = img[y1:y2, x1:x2] # print(type(face)) # print('face.shape :: ', face.shape) self.face.append(face) for i in self.face: print(i.shape) face_encoding = face_recognition.face_encodings(i) if len(face_encoding) > 0: print('ok') self.known_face_encodings.append(face_encoding[0]) else: print("이미지에 얼굴이 없습니다!") # face_encoding = face_recognition.face_encodings(img)[0] # self.known_face_encodings.append(face_encoding) # Initialize some variables self.face_locations = [] self.face_encodings = [] self.face_names = [] self.process_this_frame = True
def __init__(self): self.camera = camera.VideoCamera() self.registered_names = os.listdir('knowns/') self.location = {'top' : 0, 'bottom' : 0, 'left' : 0, 'right' : 0}
def __init__(self, cnum): # Using OpenCV to capture from device 0. If you have trouble capturing # from a webcam, comment the line below out and use a video file # instead. self.camera = camera.VideoCamera(cnum) self.known_face_encodings = [] self.known_face_names = [] # 맨처음 실행할때 파일 생성하기, 이미 파일이 존재하면 실행 안하기 if os.path.isfile(encoding_filename): # 있으면 파일 열어서 읽어오기 f = open(encoding_filename, "r") while True: name = f.readline() if not name: print("txt 파일 끝까지 읽기 완료") break self.known_face_names.append(name) #print(name) # 얼굴 인코딩 데이터 읽어오기 datas = [] for i in range(0, 128): data = f.readline().split("\n") datas.append(float(data[0])) face_encoding = np.array(datas) datas.clear() self.known_face_encodings.append(face_encoding) #print(face_encoding) else: # 없으면 파일을 만들고, 파일에 인코딩 데이터 저장하기 f = open(encoding_filename, "w") # Load sample pictures and learn how to recognize it. dirname = 'knowns' files = os.listdir(dirname) for filename in files: name, ext = os.path.splitext(filename) if ext == '.jpg': self.known_face_names.append(name) pathname = os.path.join(dirname, filename) img = face_recognition.load_image_file(pathname) face_encoding = face_recognition.face_encodings(img)[0] #print(face_encoding) self.known_face_encodings.append(face_encoding) #파일에 이름+인코딩 데이터 저장 f.write(name + "\n") # 얼굴 인코딩 데이터 저장 np.savetxt(f, face_encoding, delimiter=", ") # Initialize some variables self.face_locations = [] self.face_encodings = [] self.face_names = [] self.process_this_frame = True # 파일 닫기 f.close() return
helmet_result = helmet.get_detection(frame=small_frame, copy_frame=face_recog_result) # show the frame cv.imshow(winName, helmet_result) key = cv.waitKey(2000) & 0xFF # if the `q` key was pressed, break from the loop if key == ord("q"): break # --image option process elif args.video: # Get known image from train_path helmet = helmet.helmet_detection() cam = camera.VideoCamera(test_path) face_recog = face_recog.FaceRecog(train_path) while True: frame = cam.get_frame() frame = cv.resize(frame, dsize=(640, 480), interpolation=cv.INTER_AREA) face_recog_result = face_recog.get_frame(frame) helmet_result = helmet.get_detection(frame, face_recog_result) # show the frame cv.imshow(winName, helmet_result) key = cv.waitKey(1) & 0xFF