Ejemplo n.º 1
0
def detect_face():
    face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
    #cap = cv2.VideoCapture(0)
    cap = PiVideoStream().start()
    time.sleep(2.0)
    face_detected = False
    while (True):
        # Read the frame
        #_, img = cap.read()
        frame = cap.read()
        # Convert to grayscale
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        # Detect the faces
        faces = face_cascade.detectMultiScale(gray, 1.1, 4)
        # Draw the rectangle around each face
        for (x, y, w, h) in faces:
            cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 0, 0), 2)
#            print("face detected")
            face_detected = True

    # When everything done, release the capture
    cap.release()
    cv2.destroyAllWindows()
    if face_detected == True:
            return True
    return False
Ejemplo n.º 2
0
class Camera(object):
    def __init__(self):
        if cv2.__version__.startswith('2'):
            PROP_FRAME_WIDTH = cv2.cv.CV_CAP_PROP_FRAME_WIDTH
            PROP_FRAME_HEIGHT = cv2.cv.CV_CAP_PROP_FRAME_HEIGHT
        elif cv2.__version__.startswith('3'):
            PROP_FRAME_WIDTH = cv2.CAP_PROP_FRAME_WIDTH
            PROP_FRAME_HEIGHT = cv2.CAP_PROP_FRAME_HEIGHT

        self.video = PiVideoStream().start()
#        self.video = cv2.VideoCapture(0)
#self.video = cv2.VideoCapture(1)
#self.video.set(PROP_FRAME_WIDTH, 640)
#self.video.set(PROP_FRAME_HEIGHT, 480)
#self.video.set(PROP_FRAME_WIDTH, 320)
#self.video.set(PROP_FRAME_HEIGHT, 240)

    def __del__(self):
        self.video.release()

    def get_frame(self):
        while True:
            image = self.video.read()
            if image is not None:
                break
        ret, jpeg = cv2.imencode('.jpg', image)
        return jpeg.tostring()
Ejemplo n.º 3
0
class Camera(object):
    def __init__(self):
        if cv2.__version__.startswith('2'):
            PROP_FRAME_WIDTH = cv2.cv.CV_CAP_PROP_FRAME_WIDTH
            PROP_FRAME_HEIGHT = cv2.cv.CV_CAP_PROP_FRAME_HEIGHT
        elif cv2.__version__.startswith('3'):
            PROP_FRAME_WIDTH = cv2.CAP_PROP_FRAME_WIDTH
            PROP_FRAME_HEIGHT = cv2.CAP_PROP_FRAME_HEIGHT

        self.video = PiVideoStream().start()

    def __del__(self):
        self.video.release()

    def get_frame(self):

        while True:
            image = self.video.read()
            if image is not None:
                break

        gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)

        # detect faces in the grayscale frame by opencv's method
        rects = detector.detectMultiScale(gray,
                                          scaleFactor=1.1,
                                          minNeighbors=5,
                                          minSize=(30, 30),
                                          flags=cv2.CASCADE_SCALE_IMAGE)

        # loop over the face detections
        face_counter = 0
        for (x, y, w, h) in rects:
            rect = dlib.rectangle(int(x), int(y), int(x + w), int(y + h))

            shape = predictor(gray, rect)
            shape = face_utils.shape_to_np(shape)

            (x, y, w, h) = face_utils.rect_to_bb(rect)
            cv2.rectangle(image, (x, y), (x + w, y + h), (0, 255, 0), 2)

            cv2.putText(image, "Face #{}".format(face_counter + 1),
                        (x - 10, y - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5,
                        (0, 255, 0), 2)

            for (x, y) in shape:
                cv2.circle(image, (x, y), 1, (0, 0, 255), -1)

            face_counter = face_counter + 1

        ret, jpeg = cv2.imencode('.jpg', image)
        return jpeg.tostring()
            msg.attach(part)
            text = msg.as_string()
            server = smtplib.SMTP('smtp.gmail.com', 587)
            server.starttls()
            server.login(email_user, email_password)
            server.sendmail(email_user, email_send, text)
        server.quit()
        cv2.cv.PutText(cv2.cv.fromarray(img), str(name), (x, y + h), font, 255)
    cv2.imshow('Face', img)
    if cv2.waitKey(10) == ord('q'):
        break
lcd_byte(0x01, LCD_CMD)
lcd_string(" No Intruder", LCD_LINE_1)
time.sleep(1)
GPIO.cleanup()

cam.release()
cv2.destroyAllWindows()

if __name__ == '__main__':

    try:
        main()
    except KeyboardInterrupt:
        pass
    finally:
        lcd_byte(0x01, LCD_CMD)
        lcd_string(" No Intruder", LCD_LINE_1)
        time.sleep(1)
        GPIO.cleanup()
Ejemplo n.º 5
0
class EntryLog:

    def __init__(self):

        self.recognizer = cv2.face.LBPHFaceRecognizer_create()
        self.recognizer.read('trainer/trainer.yml')
        self.cascadePath = "haarcascade/haarcascade_frontalface_default.xml"
        self.faceCascade = cv2.CascadeClassifier(self.cascadePath)
        self.ops = sys.platform
        self.font = cv2.FONT_HERSHEY_DUPLEX
        self.d = database.Database().getAll()
        self.updated = False
        if self.ops == 'win32':
            # from imutils.video import WebcamVideoStream
            # self.cam = WebcamVideoStream(src=0).start()
            self.cam = cv2.VideoCapture(0)
        else:
            from imutils.video.pivideostream import PiVideoStream
            self.cam = PiVideoStream().start()
        self.ls = {}
        for doc in self.d:
            self.ls[doc['id']] = doc['name']
        self.name = 'unknown'
        self.idarr = []
        self.i = 0
        self.q = False
        self.counts = 0
        # params for ShiTomasi corner detection
        self.feature_params = dict(maxCorners=100,
                                   qualityLevel=0.3,
                                   minDistance=7,
                                   blockSize=7)
        # Parameters for lucas kanade optical flow
        self.lk_params = dict(winSize=(15, 15),
                              maxLevel=5,
                              criteria=(cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03))

        # define movement threshodls
        self.max_head_movement = 20
        self.movement_threshold = 50
        if self.ops == 'win32':
            self.gesture_threshold = 150
        else:
            self.gesture_threshold = 75
        self.x = 0
        self.y = 0
        # find the face in the image
        self.face_found = False
        self.frame_num = 0

    def get_coords(self, p1):
        try:
            return int(p1[0][0][0]), int(p1[0][0][1])
        except:
            return int(p1[0][0]), int(p1[0][1])

    def attn(self):

        # if self.ops == 'win32':
        #     # from imutils.video import WebcamVideoStream
        #     # self.cam = WebcamVideoStream(src=0).start()
        #     self.cam = cv2.VideoCapture(0)
        # else:
        #     from imutils.video.pivideostream import PiVideoStream
        #     self.cam = PiVideoStream().start()
        # self.cam = WebcamVideoStream(src=0).start()
        while True:
            if self.q:
                break
            while True:
                if self.ops == 'win32':
                    ret, im = self.cam.read()
                else:
                    im = self.cam.read()
                self.gray = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)
                faces = self.faceCascade.detectMultiScale(self.gray, 1.2, 5)
                if self.updated:
                    if (datetime.datetime.now() - self.t).total_seconds() < 2:
                        cv2.putText(im, 'Welcome ' + self.name, (50, 50), self.font, 0.8, (0, 255, 0), 2)
                for (x, y, w, h) in faces:
                    cv2.rectangle(im, (x, y), (x + w, y + h), (225, 0, 0), 2)
                    Id, conf = self.recognizer.predict(self.gray[y:y + h, x:x + w])
                    self.face_found = True
                    if cv2.waitKey(10) == ord('q'):
                        self.q = True
                        break
                    if (conf >= 50):
                        self.name = self.ls.get(Id)
                        self.id = Id
                        # print(self.name, Id, conf)
                        self.idarr.append(Id)

                    cv2.putText(im, str(self.name), (x, y + h), self.font, 1, (255, 255, 255))
                cv2.namedWindow("Entry")
                cv2.imshow("Entry", im)
                if cv2.waitKey(10) == ord('q'):
                    self.q = True
                    break
                if len(self.idarr) == 30:
                    self.counts = np.bincount(np.array(self.idarr))
                    break

            if self.q:
                break

            if self.face_found:
                face_center = x + w / 2, y + h / 3
                self.p0 = np.array([[face_center]], np.float32)

            # self.ls.get(np.argmax())
            if self.confirm():
                now = datetime.datetime.now()
                day = str(now.year) + '-' + str(now.month) + '-' + str(now.day)
                log = []
                d = database.Database().getlogbydate(day)
                # if not d:
                #     m = {'_id': day, 'logs': []}
                #     database.Database().pushEntryLog(m)
                #     log = []
                if d:
                    log = d['logs']
                # self.now = datetime.datetime.now()
                self.updated = True
                l = {'id': self.id, 'name': self.name, 'timestamp': now.strftime("%H:%M:%S.%f")}
                log.append(l)
                database.Database().updatelog(day, log, self.name)
                self.t = datetime.datetime.now()

        if self.ops == 'win32':
            self.cam.release()
        else:
            self.cam.stop()
        cv2.waitKey(1)
        cv2.destroyAllWindows()

    def confirm(self):
        self.idarr = []
        self.counts = 0
        if self.gesture():
            return True
        else:
            return False

    def gesture(self):
        gesture = False
        x_movement = 0
        y_movement = 0

        while True:

            if self.ops == 'win32':
                ret, frame = self.cam.read()

            else:
                frame = self.cam.read()
            old_gray = self.gray.copy()
            self.gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
            p1, st, err = cv2.calcOpticalFlowPyrLK(old_gray, self.gray, self.p0, None, **self.lk_params)
            cv2.circle(frame, self.get_coords(p1), 4, (0, 0, 255), -1)
            cv2.circle(frame, self.get_coords(self.p0), 4, (255, 0, 0))

            # get the xy coordinates for points p0 and p1
            a, b = self.get_coords(self.p0), self.get_coords(p1)
            x_movement += abs(a[0] - b[0])
            y_movement += abs(a[1] - b[1])

            if not gesture: cv2.putText(frame, 'detected:', (50, 50), self.font, 0.8, (0, 0, 0), 2)
            if not gesture: cv2.putText(frame, self.name, (180, 50), self.font, 0.8, (255, 0, 0), 2)
            # text = 'x_movement: ' + str(x_movement)
            text = 'nod to confirm'
            if not gesture: cv2.putText(frame, text, (50, 100), self.font, 0.8, (0, 255, 0), 2)
            # text = 'y_movement: ' + str(y_movement)
            text = 'shake to cancel'
            if not gesture: cv2.putText(frame, text, (50, 150), self.font, 0.8, (0, 0, 255), 2)

            if cv2.waitKey(10) == ord('q'):
                self.q = True
                break

            if x_movement > self.gesture_threshold:
                self.updated = False
                return False
            if y_movement > self.gesture_threshold:
                return True

            self.p0 = p1

            cv2.imshow("Entry", frame)
            cv2.waitKey(1)
Ejemplo n.º 6
0
from imutils.video.pivideostream import PiVideoStream
import detectLines
import imutils
import time
import cv2
import numpy as np

#vs = cv2.VideoCapture('/home/pi/Desktop/output5.mp4')
vs = PiVideoStream().start()
time.sleep(2)
i = 0

while i <= 1:
    frame = vs.read()

    gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
    centerLine = detectLines.centerLine(gray)
    print(centerLine)
    for i in centerLine:
        cv2.line(frame, (i[0], i[1]), (i[2], i[3]), (255, 0, 255), 2)
    cv2.imshow("frame", frame)
    key = cv2.waitKey(50000) & 0xFF
    i += 1
    if key == ord("q"):
        break

vs.release()
cv2.destroyAllWindows()
Ejemplo n.º 7
0
class Camera():
    def __init__(self, config={}):
        self.config = config
        self.flip = self.config['flip_cam']
        self.frame_width = self.config['res_x']
        self.frame_height = self.config['res_y']
        if not isLoadPiCam:
            self.cap = cv2.VideoCapture(0)
            # getting width and height from the capture device
            self.cap.set(cv2.CAP_PROP_FRAME_WIDTH, self.frame_width)
            self.cap.set(cv2.CAP_PROP_FRAME_HEIGHT, self.frame_height)
        else:
            self.cap = PiVideoStream(resolution=(
                self.frame_width, self.frame_height)).start()
        self.canRecord = True
        self.timeWithoutBody = 0
        self.startRecordingTime = 0
        self.pauseRecording = 0
        # self.fourcc = 0x00000021
        self.fourcc = cv2.VideoWriter_fourcc(*'avc1')

        self.out = cv2.VideoWriter()
        self.maxRecordingTime = self.config['max_record_time']
        self.classifier = cv2.CascadeClassifier(self.config['classifier'])
        self.isRecording = False
        self.currentFrame = None

    def recording(self):
        if isLoadPiCam is False:
            _, img = self.cap.read()
        else:
            img = self.cap.read()
        if self.flip:
            img = cv2.flip(img, 0)
        gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        gray = resize(gray, int(self.frame_width / 2),
                      int(self.frame_height / 2))

        self.isRecording = self.out.isOpened()

        objects = self.classifier.detectMultiScale(gray,
                                                   scaleFactor=1.3,
                                                   minNeighbors=5,
                                                   minSize=(30, 30))

        rects = np.array([[x, y, x + w, y + h]
                          for (x, y, w, h) in objects])
        filtered = non_max_suppression(rects, overlapThresh=0.65)
        for (x1, y1, x2, y2) in filtered:
            cv2.rectangle(img, (x1 * 2, y1 * 2), (x2 * 2, y2 * 2),
                          (0, 255, 0), 1)

            if not self.out.isOpened() and self.canRecord:
                print('starting recording')
                self.startRecordingTime = GetMilliSecs(time.time())
                self.pauseRecording = 0
                filename = self.config['video_folder'] + \
                    GetTimeStamp(time) + '.mp4'

                res = self.out.open(filename,
                                    self.fourcc,
                                    20,
                                    (self.frame_width, self.frame_height))
        if self.out.isOpened():
            self.out.write(img)
            if (self.startRecordingTime +
                    self.maxRecordingTime) < GetMilliSecs(time.time()):
                self.out.release()
                self.canRecord = False
                self.pauseRecording = GetMilliSecs(time.time())
                print('Stopped recording')

        if not self.canRecord and (self.pauseRecording +
                                   self.config['time_in_between'] <
                                   GetMilliSecs(time.time())):
            self.canRecord = True
        self.currentFrame = img

    def get_frame(self):
        if self.currentFrame is None:
            if isLoadPiCam is False:
                _, self.currentFrame = self.cap.read()
            else:
                self.currentFrame = self.cap.read()
        _, jpeg = cv2.imencode('.jpg', self.currentFrame)
        return jpeg.tobytes()

    def __del__(self):
        try:
            self.cap.release()
            cv2.destroyAllWindows()
        except:
            pass