예제 #1
0
from cv2 import cv2
face_cascade = cv2.CascadeClassifier('data//haarcascade_frontalface_alt.xml')
cap = cv2.VideoCapture(0)
while(cap.isOpened()):
    ret,frame = cap.read()
    gray = cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY)
    faces = face_cascade.detectMultiScale(gray,scaleFactor = 1.5,minNeighbors = 5)
    for(x,y,w,h) in faces:
        print(x,y,w,h)
        roi_gray = gray[y:y+h,x:x+w]
        roi_color = frame[y:y+h,x:x+w]
        color = (255,0,0) #BGR
        stroke = 2
        cv2.rectangle(frame,(x,y),(x+w,y+h),color,stroke )

    font = cv2.FONT_HERSHEY_SIMPLEX
    cv2.putText(frame,"Face Detected",(15,25),font ,1,color = (255,0,0),thickness=1)
    cv2.imshow('frame',frame)
    
    if cv2.waitKey(1) & 0xFF == ord('q'):
            break
cap.release()
cv2.destroyAllWindows()

예제 #2
0
    h = img.shape[0]
    #image = []
    for i in range(0,w):
        for j in range(0,h):
            for c in range(3):
                tmp = int(img[j,i,c]*alpha + bias)
                if tmp > 255:
                    tmp = 255
                elif tmp < 0:
                    tmp = 0
                img[j,i,c] = tmp
    return img


# 获取分类器
haar = cv2.CascadeClassifier(r'E:\ProgramData\Anaconda3\envs\tenserflow02\Lib\site-packages\cv2\data\haarcascade_frontalface_default.xml')

# 打开摄像头 参数为输入流,可以为摄像头或视频文件
camera = cv2.VideoCapture(0)

n = 1
while 1:
    if (n <= 5000):
        print('It`s processing %s image.' % n)
        # 读帧
        success, img = camera.read()
        gray_img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        faces = haar.detectMultiScale(gray_img, 1.3, 5)
        for f_x, f_y, f_w, f_h in faces:
            face = img[f_y:f_y+f_h, f_x:f_x+f_w]
            face = cv2.resize(face, (64,64))
예제 #3
0
import numpy as np
from cv2 import cv2
import os
import face_recognition

# 人脸识别分类器
faceCascade = cv2.CascadeClassifier(
    r'/Users/haodongsheng/.local/share/virtualenvs/hellopy-X998b6LV/lib/python3.7/site-packages/cv2/data/haarcascade_frontalface_default.xml'
)

save_dir = r'/Users/haodongsheng/Documents/Prog/mongoMaster/face/'


def get_video_path(dir_path, target_path):
    list_ = os.listdir(dir_path)
    for i in range(0, len(list_)):
        path = os.path.join(dir_path, list_[i])
        if os.path.isdir(path):
            get_video_path(path, target_path)
        if os.path.isfile(path):
            temp_path = str.lower(path)
            if temp_path.endswith(".mp4"):
                recognize(path, target_path)


def recognize(file_path, target_path, tolerance=0.5):
    global faceCascade
    global save_dir
    # 开启摄像头
    cap = cv2.VideoCapture(file_path)
from cv2 import cv2
import numpy as np

video = cv2.VideoCapture("video.mp4")
car_classifier = cv2.CascadeClassifier("cars.xml")
people_classifier = cv2.CascadeClassifier("haarcascade_fullbody.xml")

while True:
    ret, frame = video.read()

    gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

    cars = car_classifier.detectMultiScale(gray, 1.3, 3)
    people = people_classifier.detectMultiScale(gray, 1.1, 2)

    for (x, y, w, h) in cars:
        cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 3)

    for (x, y, w, h) in people:
        cv2.rectangle(frame, (x, y), (x + w, y + h), (255, 255, 0), 3)

    cv2.imshow("video", frame)
    if cv2.waitKey(1) & 0xFF == ord('q'):
        break

video.release()
cv2.destroyAllWindows()
예제 #5
0
import glob

import cv2.cv2 as cv2

if __name__ == "__main__":
    path_to_xml_classifier_file = "../classifier/cascade.xml"
    path_to_test_images = "../../Hauptprojekt/debugImages/found_vehicles/"

    files = glob.glob('{0}/*.png'.format(path_to_test_images))
    for file in files:
        image = cv2.imread(file)
        gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
        classifier = cv2.CascadeClassifier(path_to_xml_classifier_file)

        lps = classifier.detectMultiScale(gray_image)
        for (x, y, w, h) in lps:
            cv2.rectangle(image, (x, y), (x + w, y + h), (255, 0, 0), 2)
        cv2.imshow('img', image)
        cv2.waitKey(0)
    cv2.destroyAllWindows()
예제 #6
0
def facialRecognition(drone):
	faceCascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
	rectangleColor = (0,165,255)
	try:
		drone.connect()
		drone.wait_for_connection(60.0)

		container = av.open(drone.get_video_stream())

		tracker = dlib.correlation_tracker()
		
		trackingFace = False

		frameSkip = 300 #to eliminate initial lag
		while True:
			for frame in container.decode(video=0):
				
				if 0 < frameSkip:
                    			frameSkip = frameSkip - 1
                    			continue

				image = cv2.cvtColor(numpy.array(frame.to_image()), cv2.COLOR_RGB2BGR)
				
				frame = numpy.array(frame.to_image()) #convert frame 
				facialFrame = frame #make a copy of frame 
				
				if not trackingFace:
					grey = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
					faces = faceCascade.detectMultiScale(grey, 1.3, 5)

					maxArea = 0
					x = 0
					y = 0
					w = 0
					h = 0

					for (_x,_y,_w,_h) in faces:
					    if  _w*_h > maxArea:
						x = int(_x)
						y = int(_y)
						w = int(_w)
						h = int(_h)
						maxArea = w*h

                			if maxArea > 0 :
						tracker.start_track(frame,dlib.rectangle(x-10,y-20,x+w+10,y+h+20))
						trackingFace = True

				if trackingFace:
					trackingQuality = tracker.update(frame)

					if trackingQuality >= 8.75:
						tracked_position =  tracker.get_position()

						t_x = int(tracked_position.left())
						t_y = int(tracked_position.top())
						t_w = int(tracked_position.width())
						t_h = int(tracked_position.height())
						cv2.rectangle(facialFrame,(t_x, t_y),(t_x + t_w , t_y + t_h),rectangleColor ,2)
					else:
						trackingFace = False

				cv2.imshow('Facial Recognition',facialFrame)
				cv2.waitKey(1)
	except Exception as ex:
		exc_type, exc_value, exc_traceback = sys.exc_info()
		traceback.print_exception(exc_type, exc_value,exc_traceback)
		print(ex)
	except KeyboardInterrupt as ex:
		print('KEYBOARD INTERRUPT')
	finally:
		cv2.destroyAllWindows()
예제 #7
0
from cv2 import cv2

face_cascade = cv2.CascadeClassifier("haarcascade_frontalface_default.xml")

img = cv2.imread("photo.jpg")
gray_img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

faces = face_cascade.detectMultiScale(gray_img,
                                      scaleFactor=1.05,
                                      minNeighbors=5)

for x, y, w, h in faces:
    img = cv2.rectangle(img, (x, y), (x + w, y + h), (0, 255, 0), 3)

resized = cv2.resize(img, (int(img.shape[1] / 3), int(img.shape[0] / 3)))

cv2.imshow("Gray", resized)
cv2.waitKey(0)
cv2.destroyAllWindows()
예제 #8
0
from keras.preprocessing.image import img_to_array
import imutils
from cv2 import cv2
from keras.models import load_model
import numpy as np
import json
# parameters for loading data and images
detection_model_path = 'models/haarcascade_frontalface_default.xml'
emotion_model_path = 'models/_mini_XCEPTION.102-0.66.hdf5'

# hyper-parameters for bounding boxes shape
# loading models
face_detection = cv2.CascadeClassifier(detection_model_path)
emotion_classifier = load_model(emotion_model_path, compile=False)
EMOTIONS = [
    "angry", "disgust", "scared", "happy", "sad", "surprised", "neutral"
]
path = '../Assets/test.png'

while True:
    frame = cv2.imread(path)
    if frame is None:
        print('Reading while writing')
        continue
    # resize the frame
    frame = imutils.resize(frame, width=300)
    gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
    faces = face_detection.detectMultiScale(gray,
                                            scaleFactor=1.1,
                                            minNeighbors=5,
                                            minSize=(30, 30),
예제 #9
0
#!/usr/bin/env python
# -*- encoding: utf-8 -*-

import os
import numpy as np
from cv2 import cv2
#获取当前路径
dir_path = os.path.abspath(os.path.dirname(__file__))
# 加载人脸识别分类器
faceCascade = cv2.CascadeClassifier(
    dir_path + '\model\haarcascade_frontalface_default.xml')

# 加载识别眼睛的分类器
eyeCascade = cv2.CascadeClassifier(dir_path + '\model\haarcascade_eye.xml')

# 开启摄像头
cap = cv2.VideoCapture(0)
ok = cap.isOpened()
print("camera open is:", cap.isOpened())
while ok:
    # 读取摄像头中的图像,ok为是否读取成功的判断参数
    ok, img = cap.read()
    # 转换成灰度图像
    gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

    # 进行人脸检测
    faces = faceCascade.detectMultiScale(
        gray,
        scaleFactor=1.2,
        minNeighbors=5,  # 
        minSize=(32, 32)  # 识别框最小尺度大小
from cv2 import cv2
import numpy as np

img = cv2.imread('with_mask203.jpg')
face_cascade = cv2.CascadeClassifier(
    'C:/Users/truongnn/work_env/Lib/site-packages/cv2/data/haarcascade_frontalface_default.xml'
)
mouth = cv2.CascadeClassifier(
    'C:/Users/truongnn/work_env/Lib/site-packages/cv2/data/haarcascade_smile.xml'
)

# cv2.imshow('image',img)
img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

faces = face_cascade.detectMultiScale(img_gray, 1.1, 5)
for (x, y, w, h) in faces:
    cv2.rectangle(img, (x, y), (x + w, y + h), (0, 0, 255), 1)
    #cv2.putText(img, 'No Mask', (x, y-10), cv2.FONT_ITALIC, 0.8, (0,0,255), 1)
    roi_gray = img_gray[y:y + h, x:x + w]
    roi_color = img[y:y + h, x:x + w]
    is_mouth = mouth.detectMultiScale(roi_gray)
    for (a, b, c, d) in is_mouth:
        cv2.rectangle(roi_color, (a, b), (a + c, b + d), (0, 0, 255), 1)
    # if re:
    #     cv2.putText(img, 'No Mask', (x, y-10), cv2.FONT_ITALIC, 0.8, (0,0,255), 1)
    # else:
    #     cv2.putText(img, 'Masked', (x, y-10), cv2.FONT_ITALIC, 0.8,  (0,0,255), 1)

cv2.imshow('image', img)
cv2.waitKey(0)
cv2.destroyAllWindows()
import os
from cv2 import cv2 as cv

path = os.getcwd()
print(path)

cascade = cv.CascadeClassifier(path + "/datas/haar_cascade_files/haarcascade_frontalface_default.xml")
cascade_eye = cv.CascadeClassifier(path + "/datas/haar_cascade_files/haarcascade_eye.xml")
# cascade_nose = cv.CascadeClassifier(path + "/datas/haar_cascade_files/haarcascade_mcs_nose.xml")

img = cv.imread(path + "/datas/images/faces.jpg")

imgGray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)

faces = cascade.detectMultiScale(imgGray, 1.1, 4)
eyes = cascade_eye.detectMultiScale(imgGray, 1.1, 4)

for (x, y, w, h) in faces:
    cv.rectangle(img, (x, y), (x + w, y + h), (255, 0, 0), 2)
    for (a, b, c, d) in eyes:
        cv.rectangle(img, (a, b), (a + c, b + d), (0, 255, 0), 2)

img_resize = cv.resize(img, (1280,960))

cv.imshow("Result", img_resize)
cv.waitKey(0)
import sys
import numpy as np
from cv2 import cv2 as cv

haar_face = cv.CascadeClassifier("haarcascade_frontalface_default.xml")
haar_eyes = cv.CascadeClassifier("haarcascade_eye.xml")

video_capture = cv.VideoCapture(0)

if not video_capture.isOpened():
    print("Cannot open camera!!!")
    exit()
while True:
    ret, frame = video_capture.read()

    if not ret:
        print("Can't receive frame (stream end?). Exiting ...")
        break

    gray = cv.cvtColor(frame, cv.COLOR_BGR2GRAY)

    deteced_faces = haar_face.detectMultiScale(gray,
                                               scaleFactor=1.1,
                                               minNeighbors=5,
                                               minSize=(30, 30))

    for (x, y, w, h) in deteced_faces:
        cv.rectangle(frame, (x, y), (x + w, y + h), (255, 0, 0), 3)
        eye_color = frame[y:y + h, x:x + w]
        eye_gray = gray[y:y + h, x:x + w]
        deteced_eye = haar_eyes.detectMultiScale(eye_gray,
예제 #13
0
def trainer(request):

    # Creating a recognizer to train
    recognizer = cv2.face.LBPHFaceRecognizer_create()
    # train with the default data
    detector = cv2.CascadeClassifier(
        "media/haarcascade_frontalface_default.xml")

    path = 'media/dataset'

    # To get all the images, we need corresponing id
    def getImagesWithID(path):

        # And putting them in 'f' and join method is appending the f(file name) to the path with the '/'
        imagePaths = [os.path.join(path, f) for f in os.listdir(path)
                      ]  #concatinate the path with the image name
        #print imagePaths
        ids = []
        # Now, we loop all the images and store that userid and the face with different image list
        faceSamples = []

        for imagePath in imagePaths:
            # First we have to open the image then we have to convert it into numpy array
            PIL_img = Image.open(imagePath).convert(
                'L')  #convert it to grayscale
            # converting the PIL image to numpy array
            # @params takes image and convertion format
            img_numpy = np.array(PIL_img, 'uint8')
            # Now we need to get the user id, which we can get from the name of the picture
            # for this we have to slit the path() i.e dataset/user.1.7.jpg with path splitter and then get the second part only i.e. user.1.7.jpg
            # Then we split the second part with . splitter
            # Initially in string format so hance have to convert into int format
            id = int(
                os.path.split(imagePath)[-1].split(".")[1]
            )  # -1 so that it will count from backwards and slipt the second index of the '.' Hence id

            # extract the face from the training image sample
            faces = detector.detectMultiScale(img_numpy)
            # If a face is there then append that in the list as well as Id of it

            for (x, y, w, h) in faces:
                # images
                faceSamples.append(img_numpy[y:y + h, x:x + w])
                # label
                ids.append(id)

        return faceSamples, ids

    # Fetching ids and faces
    faces, ids = getImagesWithID(path)

    #Training the recognizer
    # For that we need face samples and corresponding labels

    recognizer.train(faces, np.array(ids))

    # Save the recogzier state so that we can access it later
    recognizer.write('media/trainer/trainingData.yml')
    cv2.destroyAllWindows()

    return redirect('/register')
예제 #14
0
def detect(request):
    faceDetect = cv2.CascadeClassifier(
        'media/haarcascade_frontalface_default.xml')

    # creating recognizer
    rec = cv2.face.LBPHFaceRecognizer_create()
    # loading the training data
    rec.read('media/trainer/trainingData.yml')

    font = cv2.FONT_HERSHEY_SIMPLEX
    # assign id = 0
    id = 0
    # creating name array
    #names = ['None', 'Akash', 'Sabin', 'Biplov', 'Dipesh']
    #fetching data from database
    em = Employee.objects.values_list('name', flat=True)

    names = list(em)

    cam = cv2.VideoCapture(0)
    cam.set(3, 640)  # set video widht
    cam.set(4, 480)  # set video height
    # Define min window size to be recognized as a face
    minW = 0.1 * cam.get(3)
    minH = 0.1 * cam.get(4)

    while (True):
        # cam.read will return the status variable and the captured colored image
        _, img = cam.read()
        img = cv2.flip(img, 1)
        gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        faces = faceDetect.detectMultiScale(
            gray,
            scaleFactor=1.2,
            minNeighbors=5,
            minSize=(int(minW), int(minH)),
        )
        for (x, y, w, h) in faces:
            cv2.rectangle(img, (x, y), (x + w, y + h), (0, 255, 0), 2)

            id, conf = rec.predict(
                gray[y:y + h, x:x + w])  #This will predict the id of the face

            if (conf < 100):
                id = names[id]
                conf = "  {0}".format(round(100 - conf))

                # performs attendance only when we have 35 or more accuracy
                if (int(conf) > 35):
                    date = datetime.datetime.now().strftime("%Y-%m-%d")

                    old = Attendance.objects.filter(name=id,
                                                    date__date=date).first()

                    if (old == None):
                        at = Attendance()
                        at.name = id
                        at.save()

                else:
                    messages.info(request, 'oops!! not detected!!')
            else:
                id = "unknown"
                conf = "  {0}".format(round(100 - conf))

            cv2.putText(img, str(id), (x + 5, y - 5), font, 1, (255, 255, 255),
                        2)
            cv2.putText(img, str(conf), (x + 5, y + h - 5), font, 1,
                        (255, 255, 0), 1)

        cv2.imshow("Face", img)
        k = cv2.waitKey(10) & 0xff  # Press 'ESC' for exiting video
        if k == 27:
            break

    cam.release()
    cv2.destroyAllWindows()
    return redirect('/attendance')
예제 #15
0
app.config['MAIL_PASSWORD'] = '******'
app.config['MAIL_USE_TLS'] = False
app.config['MAIL_USE_SSL'] = True
mail = Mail(app)
mysql = MySQL(app)

#courseid=''

import csv
from datetime import datetime
#from app import courseid
import cv2
import face_recognition
import os
import numpy as np
face_cascade = cv2.CascadeClassifier("haarcascade_frontalface_alt2.xml")
ds_factor = 0.6
filename = ''


class VideoCamera(object):
    #courseid=''
    def __init__(self):

        self.video = cv2.VideoCapture(0)

    def __del__(self):
        self.video.release()

    path = 'E:/flask_demo/static/images'
    images = []
예제 #16
0
from cv2 import cv2
import dlib
import math
import time

car_detect = cv2.CascadeClassifier('car_detect_harrcascade.xml')
video = cv2.VideoCapture('highway.mp4')
ret, image1 = video.read()

# Dinh nghia cac tham so dai , rong
f_width = 1280
f_height = 720

# Cai dat tham so : so diem anh / 1 met, o day dang de 1 pixel = 1 met
pixels_per_meter = 1

# Cac tham so phuc vu tracking
frame_idx = 0
car_number = 0
fps = 0

carTracker = {}
carNumbers = {}
carStartPosition = {}
carCurrentPosition = {}
speed = [None] * 1000


# fourcc=cv2.VideoWriter_fourcc(*'XVID')
# out = cv2.VideoWriter('outpy.mp4',fourcc,20.0, (f_width,f_height))
# Ham xoa cac tracker khong tot
예제 #17
0
 def __init__(self, Id, name):
     self.Id = Id
     self.name = name
     self.detector = cv2.CascadeClassifier("Assets\\haarcascade\\haarcascade_frontalface_default.xml")
     self.RES_W = 640  # 1280 # 640 # 256 # 320 # 480 # pixels
     self.RES_H = 480  # 720 # 480 # 144 # 240 # 360 # pixels
예제 #18
0
def scan():
    # load model
    model = model_from_json(open("fer.json", "r").read())
    # load weights
    model.load_weights('fer.h5')

    face_haar_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')

    cap = cv2.VideoCapture(0)

    while True:
        ret, test_img = cap.read()  # captures frame and returns boolean value and captured image
        if not ret:
            continue
        gray_img = cv2.cvtColor(test_img, cv2.COLOR_BGR2GRAY)

        faces_detected = face_haar_cascade.detectMultiScale(gray_img, 1.32, 5)

        for (x, y, w, h) in faces_detected:
            cv2.rectangle(test_img, (x, y), (x + w, y + h), (255, 0, 0), thickness=7)
            roi_gray = gray_img[y:y + w, x:x + h]  # cropping region of interest i.e. face area from  image
            roi_gray = cv2.resize(roi_gray, (48, 48))
            img_pixels = image.img_to_array(roi_gray)
            img_pixels = np.expand_dims(img_pixels, axis=0)
            img_pixels /= 255

            predictions = model.predict(img_pixels)

            # find max indexed array
            max_index = np.argmax(predictions[0])

            emotions = ('angry', 'disgust', 'fear', 'happy', 'sad', 'surprise', 'neutral')
            predicted_emotion = emotions[max_index]

            cv2.putText(test_img, predicted_emotion, (int(x), int(y)), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2)

        resized_img = cv2.resize(test_img, (600, 350))
        cv2.imshow('Emotion Melody', resized_img)
        key = cv2.waitKey(30) & 0xff
        if key == 13:  # wait until 'enter' key is pressed
            break

    cap.release()
    cv2.destroyAllWindows

    mp = '/Applications/VLC.app/Contents/MacOS/VLC'
    if predicted_emotion == "happy":
        file = '/Users/ashmika/Desktop/face-detection/songs/happy/'
        lab = Label(root, text="You looked happy, so an energetic happy playlist was created!")
        lab.pack()
        subprocess.call([mp, file])

    if predicted_emotion == "sad":
        lab=Label(root,text = "You looked sad, a sad songs playlist was created")
        lab.pack()
        file = '/Users/ashmika/Desktop/face-detection/songs/sad/'
        subprocess.call([mp, file])

    if predicted_emotion == "angry":
        lab=Label(root,text = "You looked angry, hope that playlist put you in a good mood!")
        lab.pack()
        file = '/Users/ashmika/Desktop/face-detection/songs/angry/'
        subprocess.call([mp, file])

    if predicted_emotion == "neutral":
        lab=Label(root,text = "You were calm, hope that playlist matched your vibe!")
        lab.pack()
        file = '/Users/ashmika/Desktop/face-detection/songs/neutral/'
        subprocess.call([mp, file])
예제 #19
0
from cv2 import cv2
import numpy as np

face_cascade = cv2.CascadeClassifier(
    '/Library/Frameworks/Python.framework/Versions/3.9/lib/python3.9/site-packages/cv2/data/haarcascade_frontalface_alt2.xml'
)

cap = cv2.VideoCapture(0)

while (True):
    # Capture frame by frame
    ret, frame = cap.read()
    gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

    faces = face_cascade.detectMultiScale(gray, scaleFactor=4, minNeighbors=5)

    for (x, y, w, h) in faces:
        #		print(x,y,w,h)
        roi_gray = gray[y:y + h, x:x + w]
        roi_bgr = frame[y:y + h, x:x + w]

        img_item = 'face_image.png'
        cv2.imwrite(img_item, roi_gray)
        #draw square
        color = (0, 0, 255)
        stroke = 3
        end_cordx = x + w
        end_cordy = y + h
        cv2.rectangle(frame, (x, y), (end_cordx, end_cordy), color, stroke)
    # show the camera with frames
    cv2.imshow('frame', frame)
예제 #20
0
import numpy as np
from cv2 import cv2

img_file_path = 'src/children.jpg'

# 얼굴 검출을 위한 케스케이드 분류기 생성 --- ①
face_cascade = cv2.CascadeClassifier(
    'data/haarcascade_frontalface_default.xml')
# 눈 검출을 위한 케스케이드 분류기 생성 ---②
eye_cascade = cv2.CascadeClassifier('data/haarcascade_eye.xml')
# 검출할 이미지 읽고 그레이 스케일로 변환 ---③
img = cv2.imread(img_file_path)
print(img)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# 얼굴 검출 ---④
faces = face_cascade.detectMultiScale(gray)
# 검출된 얼굴 순회 ---⑤
for (x, y, w, h) in faces:
    # 검출된 얼굴에 사각형 표시 ---⑥
    cv2.rectangle(img, (x, y), (x + w, y + h), (255, 0, 0), 2)
    # 얼굴 영역을 ROI로 설정 ---⑦
    roi = gray[y:y + h, x:x + w]
    # ROI에서 눈 검출 ---⑧
    eyes = eye_cascade.detectMultiScale(roi)
    # 검출된 눈에 사각형 표 ---⑨
    for (ex, ey, ew, eh) in eyes:
        cv2.rectangle(img[y:y + h, x:x + w], (ex, ey), (ex + ew, ey + eh),
                      (0, 255, 0), 2)
# 결과 출력
cv2.imshow('img', img)
cv2.waitKey(0)
예제 #21
0
from cv2 import cv2

face_cascade = cv2.CascadeClassifier(
    'C://Users/sheetal/Desktop/python_demoProjects/face_detector.xml')

img = cv2.imread('C://Users/sheetal/Desktop/python_demoProjects/dog.jpg')

faces = face_cascade.detectMultiScale(img, 1.1, 4)

for (x, y, w, h) in faces:
    cv2.rectangle(img, (x, y), (x + w, y + h), (255, 0, 0), 2)
cv2.imwrite("C://Users/sheetal/Desktop/python_demoProjects/face_detected.png",
            img)
print('Successfully saved')
예제 #22
0
def main():
    class NewlineFormatter(argparse.HelpFormatter):
        def _split_lines(self, text, width):
            return text.splitlines()

    parser = argparse.ArgumentParser(
        description="Detect and crop faces from an image.",
        formatter_class=NewlineFormatter,
    )
    parser.add_argument(
        "image",
        nargs="+",
        help="path to input image",
    )
    parser.add_argument(
        "--cascade",
        default="haarcascade_frontalface_default.xml",
        help="face detection cascade to be used by OpenCV",
    )
    parser.add_argument(
        "-o",
        "--output",
        default="{name}_{i}.png",
        help="Output path template, evaluates placehoders: \n\
{path} -> original file path, \n\
{name} -> original file name, \n\
{ext} -> original file extension, \n\
{i} -> index of detected face",
    )
    parser.add_argument(
        "-p",
        "--padding",
        type=float,
        default="0.3",
        help="relative space around recognized face (> 0), default=0.3",
    )
    parser.add_argument(
        "-s",
        "--size",
        type=int,
        default="200",
        help="export image resolution height / width, default=200",
    )
    parser.set_defaults(grayscale=False)
    parser.add_argument(
        "-g",
        "--grayscale",
        dest="grayscale",
        action="store_true",
        help="grayscale cropped image",
    )
    parser.add_argument(
        "-v",
        "--verbose",
        help="increase verbosity (may be applied multiple times)",
        action="count",
        default=0)
    parser.add_argument("-c",
                        "--color",
                        default=(255, 255, 255, 0),
                        type=eval,
                        help="background color for circular cutout, " +
                        "BRG(A)-format, default: (255, 255, 255, 0)")
    args = parser.parse_args()

    logging_level = max(10, logging.WARN - (args.verbose) * 10)
    logger.setLevel(logging_level)
    logger.debug(f"set logging level to {logging_level}")

    cascade_module = os.path.join(os.path.dirname(__file__),
                                  f"haarcascades/{args.cascade}")

    if os.path.exists(args.cascade):
        logger.info(f"Loading {args.cascade}")
        cascade = cv2.CascadeClassifier(args.cascade)
    elif os.path.exists(cascade_module):
        logger.info(f"Loading {cascade_module}")
        cascade = cv2.CascadeClassifier(cascade_module)
    else:
        logger.fatal(f"cascade could not be loaded, path: {cascade_module}")
        sys.exit(1)

    for image_path in args.image:
        img = cv2.imread(image_path)
        logger.info(f"Processing {image_path}, " +
                    f"resolution: {len(img)}x{len(img[0])}")

        # extract variables for output filename generation
        output_opts = {}
        output_opts["path"] = os.path.dirname(image_path)
        output_opts["name"], ext = os.path.splitext(
            os.path.basename(image_path))
        output_opts["ext"] = ext[1:]

        # extract faces by their matched bounding boxes
        faces = extract_faces(img, cascade, spacing=args.padding)
        logging.info(f"Found {len(faces)} faces")

        for i, face in enumerate(faces):
            masked = circle_mask(face, args.color, args.size)

            # format output path
            output_opts["i"] = i
            output_path = args.output.format(**output_opts)
            logger.info(
                f"Exporting {output_path}, grayscale: {args.grayscale}")

            export(masked, output_path, args.grayscale)
예제 #23
0
#we will use opencv's library to detect the faces. If you want to detect the custom object or you want to build something custom, use this tutorial: https://www.youtube.com/watch?v=dZ4itBvIjVY


from cv2 import cv2

faceCascade = cv2.CascadeClassifier("Resources/haarcascade_frontalface_default.xml")

img = cv2.imread('Resources/khabib.jpg')
imgGray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)

#find the faces of image using face cascade
faces = faceCascade.detectMultiScale(imgGray,1.1,4)

#bounding box around faces 
for (x,y,w,h) in faces: 
    cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2)

cv2.imshow("Result", img)
cv2.waitKey(0)
예제 #24
0
from cv2 import cv2
import os
from datetime import datetime
from imutils.video import VideoStream
import face_recognition
import imutils
import pickle
import time
import cv2
from imutils import paths
detector = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')


def screen(data_obj):

    while (data_obj.get_mode()):

        if (data_obj.get_screen()):
            frame = cv2.imread("resim.jpg")
            s = data_obj.get_data()
            cv2.putText(frame, "Yeni kayit yapmak icin 'Yeni'", (170, 150),
                        cv2.FONT_ITALIC, 1, (255, 255, 255), 2)
            cv2.putText(frame, "Cihazi baslatmak icin 'baslat'", (170, 200),
                        cv2.FONT_ITALIC, 1, (255, 255, 255), 2)
            cv2.putText(frame, "diye seslenebilirsiniz", (220, 250),
                        cv2.FONT_ITALIC, 1, (255, 255, 255), 2)
            cv2.putText(frame, "{}".format(s[0]), (220, 300), cv2.FONT_ITALIC,
                        1, (255, 255, 255), 2)
            cv2.putText(frame, "{}".format(s[1]), (170, 350), cv2.FONT_ITALIC,
                        1, (255, 255, 255), 2)
            cv2.imshow("Frame", frame)
예제 #25
0
from cv2 import cv2
import numpy as np

# Load the cascade file
cascade = cv2.CascadeClassifier('nose.xml')

# Load the webcamera
cap = cv2.VideoCapture(0)

# Capture frame (image) continuously using infinite loop
while True:
    # read func will capture a frame
    ret, frame = cap.read()
    # turn the image to grayscale
    gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

    #  use detectmultiscale func to detect the nose which returns nose coordinates
    rects = cascade.detectMultiScale(gray, 1.7, 11)
    # draw the recctangle on the frame

    # print(rects)

    for (x, y, w, h) in rects:
        y = int(y - 0.15 * h)
        cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 3)
        cv2.putText(frame, "No Mask", (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.7,
                    (0, 0, 255), 2)
        break
    else:
        cv2.putText(frame, "Detected Mask", (10, 30), cv2.FONT_HERSHEY_SIMPLEX,
                    0.7, (0, 0, 255), 2)
예제 #26
0
def screen(data_obj):

    while (data_obj.get_mode()):

        if (data_obj.get_screen()):
            frame = cv2.imread("resim.jpg")
            s = data_obj.get_data()
            cv2.putText(frame, "Yeni kayit yapmak icin 'Yeni'", (170, 150),
                        cv2.FONT_ITALIC, 1, (255, 255, 255), 2)
            cv2.putText(frame, "Cihazi baslatmak icin 'baslat'", (170, 200),
                        cv2.FONT_ITALIC, 1, (255, 255, 255), 2)
            cv2.putText(frame, "diye seslenebilirsiniz", (220, 250),
                        cv2.FONT_ITALIC, 1, (255, 255, 255), 2)
            cv2.putText(frame, "{}".format(s[0]), (220, 300), cv2.FONT_ITALIC,
                        1, (255, 255, 255), 2)
            cv2.putText(frame, "{}".format(s[1]), (170, 350), cv2.FONT_ITALIC,
                        1, (255, 255, 255), 2)
            cv2.imshow("Frame", frame)
        else:
            detector = cv2.CascadeClassifier(
                'haarcascade_frontalface_default.xml')
            total = 0
            cap = cv2.VideoCapture(0)
            datadir = 'dataset'
            name = data_obj.get_name()
            if os.path.isdir('dataset'):
                pass
            else:
                os.mkdir(datadir)
            path1 = '{}'.format(name)
            path = os.path.join(datadir, path1)
            os.mkdir(path)
            a = datetime.now()
            while (True):
                ret, frame = cap.read()
                orig = frame.copy()
                frame = imutils.resize(frame, width=800)
                rects = detector.detectMultiScale(cv2.cvtColor(
                    frame, cv2.COLOR_BGR2GRAY),
                                                  scaleFactor=1.1,
                                                  minNeighbors=5,
                                                  minSize=(30, 30))
                for (x, y, w, h) in rects:
                    cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0),
                                  2)

                cv2.imshow("Frame", frame)

                b = datetime.now()
                if ((b - a).seconds >= 2):
                    a = b
                    p = os.path.sep.join([path, "{}.png".format(str(total))])
                    cv2.imwrite(p, orig)
                    if total == 10:
                        break
                    total += 1

            cap.release()
            data_obj.set_screen(True)
        key = cv2.waitKey(1) & 0xFF
        if key == ord("q"):
            break
    if (not data_obj.get_mode()):

        imagePaths = list(paths.list_images("dataset"))

        knownEncodings = []
        knownNames = []

        for (i, imagePath) in enumerate(imagePaths):
            name = imagePath.split(os.path.sep)[-2]

            image = cv2.imread(imagePath)
            rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
            boxes = face_recognition.face_locations(rgb, model="hog")
            encodings = face_recognition.face_encodings(rgb, boxes)

            for encoding in encodings:
                knownEncodings.append(encoding)
                knownNames.append(name)

        data = {"encodings": knownEncodings, "names": knownNames}
        f = open("encodings.pickle", "wb")
        f.write(pickle.dumps(data))
        f.close()

        data = pickle.loads(open("encodings.pickle", "rb").read())
        detector = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')

        vs = VideoStream(src=0).start()
        time.sleep(2.0)

        while True:
            frame = vs.read()
            frame = imutils.resize(frame, width=800)

            gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
            rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)

            rects = detector.detectMultiScale(gray,
                                              scaleFactor=1.1,
                                              minNeighbors=5,
                                              minSize=(30, 30),
                                              flags=cv2.CASCADE_SCALE_IMAGE)

            boxes = [((int)(y), (int)(x + w), (int)(y + h), (int)(x))
                     for (x, y, w, h) in rects]

            encodings = face_recognition.face_encodings(rgb, boxes)
            names = []

            for encoding in encodings:
                matches = face_recognition.compare_faces(
                    data["encodings"], encoding)
                name = "Bilinmeyen"

                if True in matches:
                    matchedIdxs = [i for (i, b) in enumerate(matches) if b]
                    counts = {}

                    for i in matchedIdxs:
                        name = data["names"][i]
                        counts[name] = counts.get(name, 0) + 1

                    name = max(counts, key=counts.get)

                names.append(name)

            for ((top, right, bottom, left), name) in zip(boxes, names):
                cv2.rectangle(frame, (left, top), (right, bottom), (0, 255, 0),
                              2)
                y = top - 15 if top - 15 > 15 else top + 15
                cv2.putText(frame, name, (left, y), cv2.FONT_HERSHEY_SIMPLEX,
                            0.75, (0, 255, 0), 2)

            cv2.imshow("Frame", frame)
            key = cv2.waitKey(1) & 0xFF

            if key == ord("q"):
                break

        cv2.destroyAllWindows()
예제 #27
0
import numpy as np
from cv2 import cv2
import sqlite3

import openpyxl

face_cascade = cv2.CascadeClassifier('lib/haarcascade_frontalface_default.xml')
cap = cv2.VideoCapture(0)
rec = cv2.face.LBPHFaceRecognizer_create()
rec.read("trainingdata.yml")
id = 0

workbook = openpyxl.load_workbook('Diemdanh.xlsx')
ws = workbook.worksheets[0]

# set text style
fontface = cv2.FONT_HERSHEY_COMPLEX_SMALL
fontscale = 1
fontcolor = (255, 23, 252)


def getProfile(id):
    conn = sqlite3.connect("FaceBase.db")
    cmd = "SELECT * FROM People WHERE ID=" + str(id)
    cursor = conn.execute(cmd)
    profile = None
    for row in cursor:
        profile = row
    conn.close()
    return profile
예제 #28
0
from cv2 import cv2
import numpy as np
cap = cv2.VideoCapture('G:\\Python\\I_Processing\\footage.mp4')
human_cascade = cv2.CascadeClassifier('haarcascade_fullbody.xml')
while True:
    cord = []
    ret, frame = cap.read()
    gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
    human = human_cascade.detectMultiScale(gray, 1.1, 4)
    for (x, y, w, h) in human:
        cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 0, 220), 3)
        cord.append([x, y, w, h])
        cv2.imshow('video', frame)
    if cv2.waitKey(25) & 0xFF == ord('q'):
        break
print(cord)
cap.release()
cv2.destroyAllWindows()
예제 #29
0
from cv2 import cv2
import numpy as np
from sklearn.neighbors import KNeighborsClassifier

data = np.load("face_data.npy")
print(data.shape, data.dtype)

X = data[:, 1:].astype(int)
y = data[:, 0]

model = KNeighborsClassifier()
model.fit(X, y)

cap = cv2.VideoCapture(cv2.CAP_DSHOW)

detector = cv2.CascadeClassifier(
    "./Datasets/haarcascade_frontalface_default.xml")

while True:

    ret, frame = cap.read()

    if ret:
        faces = detector.detectMultiScale(frame)

        for face in faces:
            x, y, w, h = face

            cut = frame[y:y + h, x:x + w]

            fix = cv2.resize(cut, (100, 100))
            gray = cv2.cvtColor(fix, cv2.COLOR_BGR2GRAY)
# -*- coding: utf-8 -*-
"""
Created on Thu Jun  4 13:38:33 2020

@author: Vishal
"""

# Importing the libraries
from cv2 import cv2
import os

# os.environ["OPENCV_VIDEOIO_DEBUG"] = '1'

# Loading the cascades
face_cascade = cv2.CascadeClassifier('Face_Recognition/haarcascade/haarcascade_frontalface_default.xml')
eye_cascade = cv2.CascadeClassifier('Face_Recognition/haarcascade/haarcascade_eye.xml')
smile_cascade = cv2.CascadeClassifier('Face_Recognition/haarcascade/haarcascade_smile.xml')

# Detect the face and eye
def detect(gray, frame):
    # get the face from gray image
    faces = face_cascade.detectMultiScale(image=gray, scaleFactor=1.3, minNeighbors=5)
    for (x,y,w,h) in faces:
        # create the rectangle box on colored image
        cv2.rectangle(frame, (x,y), (x+w,y+h), (0,0,255), 2)
        
        # take the boundaries of face in gray and colo image
        roi_gray = gray[y:y+h, x:x+w]
        roi_color = frame[y:y+h, x:x+w]
        
        # get the face from roi_gray image