Пример #1
0
def detect_motion(frameCount):
    global vs, outputFrame, lock
    # 가중치 파일을 사용한 움직임 검출 클래스
    md = SingleMotionDetector(accumWeight=0.1)
    total = 0

    while True:
        frame = vs.read()
        frame = imutils.resize(frame, width=400)

        # 영상 처리가 용이한 Gray 스케일로 변환
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

        # 엣지 검출을 통한 빠른 모션 감지를 위해 선형 필터인 가우시안 필터 적용.
        gray = cv2.GaussianBlur(gray, (7, 7), 0)

        # 타임 스탬프를 화면 좌측 하단에 출력
        timestamp = datetime.datetime.now()
        cv2.putText(frame, timestamp.strftime("%A %d %B %Y %I:%M:%S%p"),
                    (10, frame.shape[0] - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.35,
                    (0, 0, 255), 1)

        if total > frameCount:
            motion = md.detect(gray)

            if motion is not None:
                (thresh, (minX, minY, maxX, maxY)) = motion
                cv2.rectangle(frame, (minX, minY), (maxX, maxY), (0, 0, 255),
                              2)

        md.update(gray)
        total += 1

        with lock:
            outputFrame = frame.copy()
Пример #2
0
def detect_motion(frameCount):
    # grab global references to the video stream, output frame, and
    # lock variables
    global vs, outputFrame, lock

    # initialize the motion detector and the total number of frames
    # read thus far
    md = SingleMotionDetector(accumWeight=0.1)
    total = 0
    try:
        # loop over frames from the video stream
        # read the next frame from the video stream, resize it,
        while True:
            # convert the frame to grayscale, and blur it
            start = time.time()
            frame = vs.read()

            # frame = imutils.resize(frame, width=400)
            gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
            gray = cv2.GaussianBlur(gray, (7, 7), 0)

            # grab the current timestamp and draw it on the frame
            timestamp = datetime.datetime.now()
            cv2.putText(frame, timestamp.strftime("%A %d %B %Y %I:%M:%S%p"),
                        (10, frame.shape[0] - 10), cv2.FONT_HERSHEY_SIMPLEX,
                        0.35, (0, 0, 255), 1)

            # if the total number of frames has reached a sufficient
            # number to construct a reasonable background model, then
            # continue to process the frame
            if total > frameCount:
                # detect motion in the image
                motion = md.detect(gray)

                # cehck to see if motion was found in the frame
                if motion is not None:
                    # unpack the tuple and draw the box surrounding the
                    # "motion area" on the output frame
                    (thresh, (minX, minY, maxX, maxY)) = motion
                    cv2.rectangle(frame, (minX, minY), (maxX, maxY),
                                  (0, 0, 255), 2)

            # update the background model and increment the total number
            # of frames read thus far
            md.update(gray)
            total += 1

            fps = (1 / (time.time() - start))
            info = {'FPS': fps}
            add_text_overlays(frame, **info)
            writer.write(frame)
            # acquire the lock, set the output frame, and release the
            # lock
            with lock:
                outputFrame = frame.copy()

    except KeyboardInterrupt:
        # quit
        print('this is a leave message')
        sys.exit()
Пример #3
0
def detect_motion(frameCount):
    global vs, outputFrame, lock

    md = SingleMotionDetector(accumWeight=0.1)
    total = 0

    while True:
        frame = vs.read()
        frame = imutils.resize(frame, width=400)
        gray = cv2.cvtColor(frame, cv2.Color_BGR2GRAY)
        gray = cv2.GaussianBlur(gray, (7, 7), 0)

        timestamp = datatime.datetime.now()
        cv2.putText(frame, timestamp.strftime("%A %d %B %Y %I:%M:%S%p"),
                    (10, frame.shape[0] - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.35,
                    (0, 0, 255), 1)

        if total > frameCount:
            motion = md.detect(gray)

            if motion is not None:
                (thresh, (minX, minY, maxX, maxY)) = motion
                cv2.rectangle(frame, (minX, minY), (maxX, maxY), (0, 0, 255),
                              2)

        md.update(gray)
        total += 1

        with lock:
            outputFrame = frame.copy()
def detect_motion(frameCount):
    # grab global references to the video stream, output frame, and
    # lock variables
    global vs, outputFrame, lock, reportMode

    # initialize the motion detector and the total number of frames
    # read thus far
    md = SingleMotionDetector(accumWeight=0.1)
    total = 0

    # loop over frames from the video stream
    while True:
        # read the next frame from the video stream, resize it,
        # convert the frame to grayscale, and blur it
        frame = vs.read()
        frame = imutils.resize(frame, width=400)
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        gray = cv2.GaussianBlur(gray, (7, 7), 0)

        # grab the current timestamp and draw it on the frame
        timestamp = datetime.datetime.now()
        cv2.putText(frame, timestamp.strftime(
            "%A %d %B %Y %I:%M:%S%p"), (10, frame.shape[0] - 10),
            cv2.FONT_HERSHEY_SIMPLEX, 0.35, (0, 0, 255), 1)

        # if the total number of frames has reached a sufficient
        # number to construct a reasonable background model, then
        # continue to process the frame
        if total > frameCount:
            # detect motion in the image
            motion = md.detect(gray)

            # cehck to see if motion was found in the frame
            if motion is not None:
                # unpack the tuple and draw the box surrounding the
                # "motion area" on the output frame
                if reportMode:
                    data = {
                        'deviceId': 'Camera_CM1',
                        'title': 'Cảnh báo bất thường',
                        'body': 'Có người đột nhập',
                    }
                    res = requests.post(
                        'http://192.168.2.101:4000/api/v1/messaging/send-message', json=data)
                    reportMode = False

                (thresh, (minX, minY, maxX, maxY)) = motion
                cv2.rectangle(frame, (minX, minY), (maxX, maxY),
                              (0, 0, 255), 2)

        # update the background model and increment the total number
        # of frames read thus far
        md.update(gray)
        total += 1

        # acquire the lock, set the output frame, and release the
        # lock
        with lock:
            outputFrame = frame.copy()
Пример #5
0
def detect_motion(frameCount):
    # grab global references to the video stream, output frame, and
    # lock variables
    global vs, outputFrame, lock, cameraAction

    # initialize the motion detector and the total number of frames
    # read thus far
    md = SingleMotionDetector(accumWeight=0.1)
    total = 0

    # loop over frames from the video stream
    while True:
        # read the next frame from the video stream, resize it,
        # convert the frame to grayscale, and blur it
        frame = vs.read()
        frame = imutils.resize(frame, width=400)
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        gray = cv2.GaussianBlur(gray, (7, 7), 0)

        # if the total number of frames has reached a sufficient
        # number to construct a reasonable background model, then
        # continue to process the frame
        if total > frameCount:
            # detect motion in the image
            motion = md.detect(gray)
            # check to see if motion was found in the frame
            if motion is not None:
                # unpack the tuple and draw the box surrounding the
                # "motion area" on the output frame
                (thresh, (minX, minY, maxX, maxY)) = motion
                if cameraAction == 5:
                    cv2.rectangle(frame, (minX, minY), (maxX, maxY),
                                  (0, 0, 255), 2)

        # update the background model and increment the total number
        # of frames read thus far
        md.update(gray)
        total += 1

        #if cameraAction == 0: do nothing

        if cameraAction == 1:  #rotate left
            frame = cv2.rotate(frame, cv2.ROTATE_90_COUNTERCLOCKWISE)

        elif cameraAction == 2:  #rotate right
            frame = cv2.rotate(frame, cv2.ROTATE_90_CLOCKWISE)

        elif cameraAction == 3:  #flip
            frame = cv2.rotate(frame, cv2.ROTATE_180)

        elif cameraAction == 4:  #convert to greyscale
            frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

        # acquire the lock, set the output frame, and release the
        # lock

        with lock:
            outputFrame = frame.copy()
def detect_motion(frameCount, prototxt, model):
    # grab global references to the video stream, output frame, and
    # lock variables
    global vs, outputFrame, lock

    # initialize the motion detector and the total number of frames
    # read thus far
    md = SingleMotionDetector(accumWeight=0.1)
    total = 0

    # loop over frames from the video stream
    while True:
        # read the next frame from the video stream, resize it,
        # convert the frame to grayscale, and blur it
        #		frame = vs.read()
        #		frame = imutils.resize(frame, width=720)
        #		gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        #		gray = cv2.GaussianBlur(gray, (7, 7), 0)
        #
        #		# grab the current timestamp and draw it on the frame
        #		timestamp = datetime.datetime.now()
        #		cv2.putText(frame, timestamp.strftime(
        #			"%A %d %B %Y %I:%M:%S%p"), (10, frame.shape[0] - 10),
        #			cv2.FONT_HERSHEY_SIMPLEX, 0.35, (0, 0, 255), 1)

        # if the total number of frames has reached a sufficient
        # number to construct a reasonable background model, then
        # continue to process the frame
        frame = server.main(prototxt, model)
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        gray = cv2.GaussianBlur(gray, (7, 7), 0)

        # grab the current timestamp and draw it on the frame
        timestamp = datetime.datetime.now()
        cv2.putText(frame, timestamp.strftime("%A %d %B %Y %I:%M:%S%p"),
                    (10, frame.shape[0] - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.35,
                    (0, 0, 255), 1)
        if total > frameCount:
            # detect motion in the image
            motion = md.detect(gray)

            # cehck to see if motion was found in the frame
            if motion is not None:
                # unpack the tuple and draw the box surrounding the
                # "motion area" on the output frame
                (thresh, (minX, minY, maxX, maxY)) = motion
                cv2.rectangle(frame, (minX, minY), (maxX, maxY), (0, 0, 255),
                              2)

        # update the background model and increment the total number
        # of frames read thus far
        md.update(gray)
        total += 1

        # acquire the lock, set the output frame, and release the
        # lock
        with lock:
            outputFrame = frame.copy()
Пример #7
0
def detect_motion(frameCount):
    # grab global references to the video stream, output frame, and lock variables
    global vs, outputFrame, lock, out

    # initialize the motion detector and the total number of frames, read thus far
    md = SingleMotionDetector(accumWeight=0.1)
    total = 0

    # loop over frames from the video stream
    while True:
        # read the next frame from the video stream, resize it, convert the frame to grayscale, and blur it
        frame = vs.read()

        # write stream to video clip and save to local drive:
        out.write(frame)

        # open window to show how the the client will see:
        #cv2.imshow('(with menu)', frame)
        #cv2.waitKey(1)
        #out.write(frame)

        frame = imutils.resize(frame, width=400)
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        gray = cv2.GaussianBlur(gray, (7, 7), 0)

        # grab the current timestamp and draw it on the frame
        timestamp = datetime.datetime.now()
        cv2.putText(frame, timestamp.strftime("%A %d %B %Y %I:%M:%S%p"),
                    (10, frame.shape[0] - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.35,
                    (0, 0, 255), 1)

        # if the total number of frames has reached a sufficient number to construct a reasonable background model,
        # then continue to process the frame
        if total > frameCount:
            # detect motion in the image
            motion = md.detect(gray)

            # check to see if motion was found in the frame
            if motion is not None:
                # unpack the tuple and draw the box surrounding the "motion area" on the output frame
                (thresh, (minX, minY, maxX, maxY)) = motion
                cv2.rectangle(frame, (minX, minY), (maxX, maxY), (0, 0, 255),
                              2)

        # update the background model and increment the total number of frames read thus far
        md.update(gray)
        total += 1

        # acquire the lock, set the output frame, and release the lock
        with lock:
            outputFrame = frame.copy()
Пример #8
0
def detect_motion(frameCount):

    global outputFrame, lock

    # initialize the motion detector and the total number of frames
    # read thus far
    md = SingleMotionDetector(accumWeight=0.1)
    total = 0

    # loop over frames from the video stream
    while True:
        rpi_name, frame = image_hub.recv_image()
        frame = imutils.resize(frame, width=400)
        # gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        # gray = cv2.GaussianBlur(gray, (7, 7), 0)

        # # grab the current timestamp and draw it on the frame
        # timestamp = datetime.datetime.now()
        # cv2.putText(frame, timestamp.strftime(
        # 	"%A %d %B %Y %I:%M:%S%p"), (10, frame.shape[0] - 10),
        # 	cv2.FONT_HERSHEY_SIMPLEX, 0.35, (0, 0, 255), 1)

        # # if the total number of frames has reached a sufficient
        # # number to construct a reasonable background model, then
        # # continue to process the frame
        # if total > frameCount:
        # 	# detect motion in the image
        # 	motion = md.detect(gray)

        # 	# cehck to see if motion was found in the frame
        # 	if motion is not None:
        # 		# unpack the tuple and draw the box surrounding the
        # 		# "motion area" on the output frame
        # 		(thresh, (minX, minY, maxX, maxY)) = motion
        # 		cv2.rectangle(frame, (minX, minY), (maxX, maxY),
        # 			(0, 0, 255), 2)

        # # update the background model and increment the total number
        # # of frames read thus far
        # md.update(gray)
        # total += 1

        # acquire the lock, set the output frame, and release the
        # lock
        with lock:
            outputFrame = frame.copy()
            image_hub.send_reply(b'OK')
Пример #9
0
def detect_motion(frameCount):
	# grab global references to the video stream, output frame, and
	# lock variables
	global vs, outputFrame, lock

	# initialize the motion detector and the total number of frames
	# read thus far
	md = SingleMotionDetector(accumWeight=0.1)
	total = 0

	# loop over frames from the video stream
	while True:
		# read the next frame from the video stream, resize it,
		# convert the frame to grayscale, and blur it
		frame = vs.read()
		frame = imutils.resize(frame, width=400)
		
		effect1.applyFilter(frame)
		total += 1

		# acquire the lock, set the output frame, and release the
		# lock
		with lock:
			outputFrame = frame.copy()
Пример #10
0
        def detect_motion(frameCount):
            conn = sqlite3.connect('data/database.db')
            conn.row_factory = dict_factory
            curr = conn.cursor()
            statusReset = "No"

            curr.execute("UPDATE names SET status = 'No';")
            conn.commit()
            frame_number = 0
            global cap, outputFrame, lock, data

            md = SingleMotionDetector(accumWeight=0.1)
            total = 0
            print("[INFO] loading encodings + face detector...")
            detector = cv2.CascadeClassifier(args["cascade"])

            fps = FPS().start()

            while True:

                frame = vs.read()
                frame = imutils.resize(frame, width=500)

                gray = cv2.cvtColor(frame,
                                    cv2.COLOR_BGR2GRAY)  # face detection
                rgb = cv2.cvtColor(frame,
                                   cv2.COLOR_BGR2RGB)  # face recognition

                rects = detector.detectMultiScale(
                    gray,
                    scaleFactor=1.1,
                    minNeighbors=5,
                    minSize=(30, 30),
                    flags=cv2.CASCADE_SCALE_IMAGE)

                boxes = [(y, x + w, y + h, x) for (x, y, w, h) in rects]

                encodings = face_recognition.face_encodings(rgb, boxes)
                names = []

                for (top, right, bottom,
                     left), face_encoding in zip(boxes, encodings):
                    data = pickle.loads(open(args["encodings"], "rb").read())

                    matches = face_recognition.compare_faces(data["encodings"],
                                                             face_encoding,
                                                             tolerance=0.4)

                    face_distances = face_recognition.face_distance(
                        data["encodings"], face_encoding)
                    best_match_index = np.argmin(face_distances)

                    if matches[best_match_index]:
                        name = data["names"][best_match_index]
                        status = True

                        namaa = str(name)

                        check_if_data_exist = "SELECT * from names WHERE nameA='" + str(
                            name) + "';"
                        result = curr.execute(check_if_data_exist).fetchall()

                        if result[0]['status'] == "No":

                            name = result[0]['nameA']
                            position = result[0]['positionA']
                            dateTime = datetime.now()
                            dateSt = str(datetime.now())

                            status = "Found"

                            data = {
                                "id": result[0]['id'],
                                "nameA": name,
                                "positionA": position,
                                "status": status,
                                "dateTime": dateSt
                            }

                            update_sql = '''UPDATE names SET status = ?, dateTime = ? WHERE nameA = ? '''
                            curr.execute(update_sql, (status, dateSt, name))
                            conn.commit()
                            dateSt = str(datetime.now())

                        check_if_data_exist = "SELECT * from names WHERE nameA='" + str(
                            name) + "';"
                        result = curr.execute(check_if_data_exist).fetchall()

                        if result[0]['status'] == "Found":

                            name = result[0]['nameA']
                            position = result[0]['positionA']
                            dateTime = datetime.now()
                            dateSt = str(datetime.now())

                            status = "Found"

                            update_sql = '''UPDATE names SET status = ?, dateTime = ? WHERE nameA = ? '''
                            curr.execute(update_sql, (status, dateSt, name))
                            conn.commit()
                            dateSt = str(datetime.now())

                            check_if_data_exist = "SELECT id from names WHERE nameA='" + name + "';"
                            result = curr.execute(
                                check_if_data_exist).fetchall()

                            data = {
                                "id": result[0]['id'],
                                "nameA": name,
                                "positionA": position,
                                "status": status,
                                "dateTime": dateSt
                            }

                        cv2.rectangle(frame, (left, top), (right, bottom),
                                      (255, 0, 0), 2)
                        font = cv2.FONT_HERSHEY_DUPLEX
                        cv2.putText(frame, name, (left + 6, bottom - 6), font,
                                    0.5, (255, 255, 255), 1)
                        face_image = frame[top:bottom, left:right]
                        cv2.imwrite("person_found/" + name + '_Face.jpg',
                                    face_image)

                    else:

                        cv2.rectangle(frame, (left, top), (right, bottom),
                                      (0, 0, 255), 2)
                        font = cv2.FONT_HERSHEY_DUPLEX
                        cv2.putText(frame, "Unknown", (left + 6, bottom - 6),
                                    font, 0.5, (255, 255, 255), 1)

                timestamp = Hari.datetime.now()
                cv2.putText(frame,
                            timestamp.strftime("%A %d %B %Y %I:%M:%S%p"),
                            (10, frame.shape[0] - 10),
                            cv2.FONT_HERSHEY_SIMPLEX, 0.35, (0, 0, 255), 1)

                if total > frameCount:
                    motion = md.detect(gray)
                    if motion is not None:
                        (thresh, (minX, minY, maxX, maxY)) = motion

                md.update(gray)
                total += 1

                with lock:
                    outputFrame = frame.copy()
Пример #11
0
def detect_motion(frameCount):
    # grab global references to the video stream, output frame, and
    # lock variables
    global vs, outputFrame, lock
    # initialize the motion detector and the total number of frames
    # read thus far
    md = SingleMotionDetector(accumWeight=0.1)
    total = 0
    lineSpace = 30
    # loop over frames from the video stream
    while True:
        start_time = time.time()
        # read the next frame from the video stream, resize it,
        # convert the frame to grayscale, and blur it
        frame = vs.read()
        frame = imutils.resize(frame, height=videoWidth, width=videoHeight)
        #gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        #gray = cv2.GaussianBlur(gray, (7, 7), 0)

        # grab the current timestamp and draw it on the frame
        timestamp = datetime.datetime.now()
        #cv2.putText(frame, timestamp.strftime("%A %d %B %Y %I:%M:%S%p"), (10, frame.shape[0] - 10),cv2.FONT_HERSHEY_SIMPLEX, 0.35, (0, 0, 255), 1)
        #Framerate
        cv2.putText(frame, str(int(1.0 / (time.time() - start_time))),
                    (screenMargin, frame.shape[0] - lineSpace),
                    cv2.FONT_HERSHEY_SIMPLEX, 0.35, (0, 255, 0), 1)
        #Resolution
        cv2.putText(frame, str(frame.shape[0]),
                    (frame.shape[1] - 80, frame.shape[0] - lineSpace),
                    cv2.FONT_HERSHEY_SIMPLEX, 0.35, (0, 255, 0), 1)
        cv2.putText(frame, "x",
                    (frame.shape[1] - 65, frame.shape[1] - lineSpace),
                    cv2.FONT_HERSHEY_SIMPLEX, 0.35, (0, 255, 0), 1)
        cv2.putText(frame, str(frame.shape[1]),
                    (frame.shape[1] - 50, frame.shape[0] - lineSpace),
                    cv2.FONT_HERSHEY_SIMPLEX, 0.35, (0, 255, 0), 1)
        drawYPRH(frame, screenMargin, 2 * lineSpace)
        #cv2.putText(frame, str(yprh[3]), (10, frame.shape[0] - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.35, (onScreenColorR, onScreenColorG, onScreenColorB), 1)

        drawCrosshair(frame, frame.shape[1], frame.shape[0])
        #drawCompass(frame, frame.shape[1], frame.shape[0], int(round(float(yprh[3]))))
        drawCompass(frame, frame.shape[1], frame.shape[0],
                    int(round(float(yprh[3]))))
        # if the total number of frames has reached a sufficient
        # number to construct a reasonable background model, then
        # continue to process the frame
        #if total > frameCount:
        # detect motion in the image
        #	motion = md.detect(gray)

        # cehck to see if motion was found in the frame
        #	if motion is not None:
        # unpack the tuple and draw the box surrounding the
        # "motion area" on the output frame
        #		(thresh, (minX, minY, maxX, maxY)) = motion
        #		cv2.rectangle(frame, (minX, minY), (maxX, maxY),
        #			(0, 0, 255), 2)

        # update the background model and increment the total number
        # of frames read thus far
        #md.update(gray)
        total += 1

        # acquire the lock, set the output frame, and release the
        # lock
        with lock:
            outputFrame = frame.copy()
Пример #12
0
def detect_motion(frameCount):
    # grab global references to the video stream, output frame, and
    # lock variables
    global vs, outputFrame, lock

    # initialize the motion detector and the total number of frames
    # read thus far
    md = SingleMotionDetector(accumWeight=0.1)
    total = 0

    # loop over frames from the video stream
    while True:
        # read the next frame from the video stream, resize it,
        # convert the frame to grayscale, and blur it
        frame = vs.read()
        time.sleep(0.05)
        if frame is None:
            source = application.config['SOURCE']
            application.logger.info(f"Reconnecting to: {source}")
            vs2 = VideoStream(src=source).start()
            time.sleep(0.25)
            application.logger.info(f"Trying to read a frame from {source}...")
            frame = vs2.read()
            # still nothing falling back
            if frame is None:
                file = application.config['FALLBACK_SOURCE']
                application.logger.warn(
                    f"Unable to connect to video source {source}! Falling back to streaming: {file}"
                )
                vs = FileVideoStream(file).start()
                time.sleep(0.25)
                application.logger.info(
                    f"Trying to read a frame from {file}...")
                frame = vs.read()
            else:
                vs = vs2

            continue
        frame = imutils.resize(frame, width=400)
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        gray = cv2.GaussianBlur(gray, (7, 7), 0)

        # grab the current timestamp and draw it on the frame
        timestamp = datetime.datetime.now()
        cv2.putText(frame, timestamp.strftime("%A %d %B %Y %I:%M:%S%p"),
                    (10, frame.shape[0] - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.35,
                    (0, 0, 255), 1)

        # if the total number of frames has reached a sufficient
        # number to construct a reasonable background model, then
        # continue to process the frame
        if total > frameCount:
            # detect motion in the image
            motion = md.detect(gray)

            # cehck to see if motion was found in the frame
            if motion is not None:
                # unpack the tuple and draw the box surrounding the
                # "motion area" on the output frame
                (thresh, (minX, minY, maxX, maxY)) = motion
                cv2.rectangle(frame, (minX, minY), (maxX, maxY), (0, 0, 255),
                              2)

        # update the background model and increment the total number
        # of frames read thus far
        md.update(gray)
        total += 1

        # acquire the lock, set the output frame, and release the
        # lock
        with lock:
            outputFrame = frame.copy()
Пример #13
0
def detect_motion(frameCount, ):
    # grab global references to the video stream, output frame, and
    # lock variables
    global vs, outputFrame, lock

    # initialize the motion detector and the total number of frames
    # read this far
    md = SingleMotionDetector(accumWeight=0.7)
    total = 0
    img_count = 0

    # loop over frames from the video stream
    while True:
        # read the next frame from the video stream, resize it,
        # convert the frame to grayscale, and blur it
        # time str in one row
        T = datetime.datetime.now()
        print(' * ', T, end='\r', flush=True)
        frame = vs.read()
        frame = imutils.resize(frame, width=720)
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        gray = cv2.GaussianBlur(gray, (7, 7), 1)

        # grab the current timestamp and draw it on the frame
        timestamp = datetime.datetime.now()
        cv2.putText(frame, timestamp.strftime(
            "%A %d %B %Y %I:%M:%S%p"), (10, frame.shape[0] - 10),
                    cv2.FONT_HERSHEY_SIMPLEX, 0.45, (255, 255, 0), 1)

        # if the total number of frames has reached a sufficient
        # number to construct a reasonable background model, then
        # continue to process the frame
        if total > frameCount:
            # detect motion in the image
            motion = md.detect(gray)

            # check to see if motion was found
            if motion is not None:
                # unpack the tuple and draw the box surrounding the
                # "motion area" on the output frame
                (thresh, (minX, minY, maxX, maxY)) = motion
                cv2.rectangle(frame, (minX, minY), (maxX, maxY),
                              (255, 255, 0), 1)
                cv2.putText(frame, str('ALARM'), (30, 50), cv2.FONT_HERSHEY_SIMPLEX, 0.65, (255, 255, 0), 2)


            elif (keyboard.is_pressed('CTRL+s')):
                # S key
                img_name = "static/opencv_alarm_{}.jpg".format(img_count)
                cv2.imwrite(filename=img_name, img=frame)
                print("Motion Detected -  Saved ")
                img_new = cv2.imread('static/opencv_alarm_{}.jpg'.format(img_count))
                cv2.imshow('Alarm Image', img_new)
                cv2.waitKey(500)
                img_count += 1
                if img_count == 5:
                    img_count = 0
                cv2.destroyAllWindows()

        # update the background model and increment the total number
        # of frames read this far
        md.update(gray)
        total += 1

        # acquire the lock, set the output frame, and release the
        # lock
        with lock:
            outputFrame = frame.copy()
Пример #14
0
def compute_frames():
    global vs, outputFrame, output_frame_lock
    global motion_active, motion_lock
    global face_detection_active, face_detection_lock
    global firebase_conf, firebase_utils, ts

    motionCounter = 0
    # initialize the motion detector and the total number of frames
    # read thus far
    md = SingleMotionDetector(accumWeight=0.1)
    total = 0
    lastUploaded = datetime.datetime.now()

    # loop over frames from the video file stream
    while True:

        # grab the frame from the threaded video stream and resize it
        # to 500px (to speedup processing)
        frame = vs.read()
        frame = imutils.resize(frame, width=500)

        # FACE DETECTION FRAMES
        # convert the input frame from (1) BGR to grayscale (for face
        # detection) and (2) from BGR to RGB (for face recognition)
        gray_face = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        rgb_face = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)

        # MOTION DETECTION FRAME
        gray_motion = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        gray_motion = cv2.GaussianBlur(gray_motion, (7, 7), 0)

        # grab the current timestamp and draw it on the frame
        timestamp = datetime.datetime.now()
        ts = timestamp.strftime("%A %d %B %Y %I:%M:%S%p")
        cv2.putText(frame, ts, (10, frame.shape[0] - 10),
                    cv2.FONT_HERSHEY_SIMPLEX, 0.35, (0, 0, 255), 1)

        ## MOTION DETECTION STARTS
        with motion_lock:
            motion_status = motion_active
        if motion_status == True:
            # grab the current timestamp and draw it on the frame
            timestamp = datetime.datetime.now()
            text = "Unoccupied"
            ts = timestamp.strftime("%A %d %B %Y %I:%M:%S%p")
            cv2.putText(frame, ts, (10, frame.shape[0] - 10),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.35, (0, 0, 255), 1)

            # if the total number of frames has reached a sufficient
            # number to construct a reasonable background model, then
            # continue to process the frame
            if total > conf["frame_count"]:
                # detect motion in the image
                motion = md.detect(gray_motion, conf)

                # cehck to see if motion was found in the frame
                if motion is None:
                    text = "Unoccupied"
                    cv2.putText(frame, "Room Status: {}".format(text),
                                (10, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.5,
                                (0, 0, 255), 2)
                    motionCounter = 0
                else:
                    text = "Occupied"
                    # unpack the tuple and draw the box surrounding the
                    # "motion area" on the output frame
                    (thresh, (minX, minY, maxX, maxY)) = motion
                    cv2.rectangle(frame, (minX, minY), (maxX, maxY),
                                  (0, 0, 255), 2)
                    cv2.putText(frame, "Room Status: {}".format(text),
                                (10, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.5,
                                (0, 0, 255), 2)
                    # Upload photo
                    # check to see if enough time has passed between uploads
                    if (timestamp - lastUploaded
                        ).seconds >= conf["min_upload_seconds"]:
                        # increment the motion counter
                        motionCounter += 1
                    # check to see if the number of frames with consistent motion is
                    # high enough
                    if motionCounter >= conf["min_motion_frames"]:
                        # check to see if firebase sohuld be used
                        if firebase_conf["use_firebase"] == False:
                            firebase_conf = json.load(
                                "pyconf/firebase_conf.json")
                        if firebase_conf["use_firebase"]:
                            firebase_utils.upload(frame, ts)
                        # update the last uploaded timestamp and reset the motion
                        # counter
                        lastUploaded = timestamp
                        motionCounter = 0

        # update the background model and increment the total number
        # of frames read thus far
        md.update(gray_motion)
        total += 1
        ## MOTION DETECTION ENDS

        ## FACE DETECTION STARTS
        with face_detection_lock:
            face_detection_status = face_detection_active
        if face_detection_status == True:
            # put room status text
            cv2.putText(frame, "Face detection active", (10, 20),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)
            # detect faces in the grayscale frame
            rects = detector.detectMultiScale(gray_face,
                                              scaleFactor=1.1,
                                              minNeighbors=5,
                                              minSize=(30, 30),
                                              flags=cv2.CASCADE_SCALE_IMAGE)

            # OpenCV returns bounding box coordinates in (x, y, w, h) order
            # but we need them in (top, right, bottom, left) order, so we
            # need to do a bit of reordering
            boxes = [(y, x + w, y + h, x) for (x, y, w, h) in rects]

            # compute the facial embeddings for each face bounding box
            encodings = face_recognition.face_encodings(rgb_face, boxes)
            names = []
            known_face = False

            # loop over the facial embeddings
            for encoding in encodings:
                # attempt to match each face in the input image to our known
                # encodings
                matches = face_recognition.compare_faces(
                    data["encodings"], encoding)
                name = "Unknown"

                # check to see if we have found a match
                if True in matches:
                    known_face = True
                    # find the indexes of all matched faces then initialize a
                    # dictionary to count the total number of times each face
                    # was matched
                    matchedIdxs = [i for (i, b) in enumerate(matches) if b]
                    counts = {}

                    # loop over the matched indexes and maintain a count for
                    # each recognized face face
                    for i in matchedIdxs:
                        name = data["names"][i]
                        counts[name] = counts.get(name, 0) + 1

                    # determine the recognized face with the largest number
                    # of votes (note: in the event of an unlikely tie Python
                    # will select first entry in the dictionary)
                    name = max(counts, key=counts.get)
                # check if there is a known face in the frame
                # update the list of names
                names.append(name)

            # loop over the recognized faces
            for ((top, right, bottom, left), name) in zip(boxes, names):
                # draw the predicted face name on the image
                cv2.rectangle(frame, (left, top), (right, bottom), (0, 255, 0),
                              2)
                y = top - 15 if top - 15 > 15 else top + 15
                cv2.putText(frame, name, (left, y), cv2.FONT_HERSHEY_SIMPLEX,
                            0.75, (0, 255, 0), 2)
            # upload photo
            if (timestamp -
                    lastUploaded).seconds >= conf["min_upload_seconds"]:
                # increment the motion counter
                motionCounter += 1
            if firebase_conf["use_firebase"] == False:
                # Check if use_firebase condition is changed
                firebase_conf = json.load("pyconf/firebase_conf.json")
            if motionCounter >= conf["min_motion_frames"] and firebase_conf[
                    "use_firebase"] and boxes and not known_face:
                # Upload image
                firebase_utils.upload(frame, ts)
                # Reset motion counter
                motionCounter = 0
            # Reset motion counter if no face detected
            if not boxes:
                motionCounter = 0
        ## FACE DETECTION ENDS

        # STREAM OUTPUT
        with output_frame_lock:
            outputFrame = frame.copy()
Пример #15
0
def detect_motion(frameCount):
    # grab global references to the video stream, output frame, and
    # lock variables
    global vs, outputFrame, lock

    # initialize the motion detector and the total number of frames
    # read thus far
    md = SingleMotionDetector(accumWeight=0.1)
    total = 0

    # initialize KeyClipWriter, set counter for frames with no motion detected
    kcw = KeyClipWriter()
    consecFramesNoMotion = 0

    # loop over frames from the video stream
    while True:
        timestamp = datetime.datetime.now()
        text = "Unoccupied"
        # read the next frame from the video stream, resize it,
        # convert the frame to grayscale, and blur it
        frame = vs.read()
        frame = imutils.resize(frame, width=400)
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        gray = cv2.GaussianBlur(gray, (7, 7), 0)

        # if the total number of frames has reached a sufficient
        # number to construct a reasonable background model, then
        # continue to process the frame
        if total > frameCount:
            # detect motion in the image
            motion = md.detect(gray)

            # check to see if motion was found in the frame
            if motion is not None:
                # unpack the tuple and draw the box surrounding the
                # "motion area" on the output frame
                (thresh, (minX, minY, maxX, maxY)) = motion
                cv2.rectangle(frame, (minX, minY), (maxX, maxY), (0, 0, 255),
                              2)
                text = "Occupied"

                # send email to notify user of motion
                # send_email(timestamp)

                # motion has occured, so reset frames with no motion counter
                consecFramesNoMotion = 0
            else:
                consecFramesNoMotion += 1

            record_video(kcw, frame, motion, consecFramesNoMotion, timestamp)

        # grab the current timestamp and draw it on the frame
        cv2.putText(frame, timestamp.strftime("%A %d %B %Y %I:%M:%S%p"),
                    (10, frame.shape[0] - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.35,
                    (0, 0, 255), 1)
        cv2.putText(frame, "Room Status: {}".format(text), (10, 20),
                    cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)

        # update the background model and increment the total number
        # of frames read thus far
        md.update(gray)
        total += 1

        # acquire the lock, set the output frame, and release the
        # lock
        with lock:
            outputFrame = frame.copy()
Пример #16
0
def detect_hand(frameCount):
    # grab global references to the video stream, output frame, and
    # lock variables
    global vs, outputFrame, lock

    # initialize the motion detector and the total number of frames
    # read thus far
    md = SingleMotionDetector(accumWeight=0.1)
    total = 0

    convert = {0: 'la', 1: 'dam', 2: 'keo'}
    checkpoint = torch.load('MBN_epoch_1_loss_0.10.pth',
                            map_location=torch.device('cpu'))
    # print(checkpoint)
    model = MobileNetV2(num_classes=3)
    model.load_state_dict(checkpoint)
    # print(model)
    model.eval()

    # set robot and man score
    robot = 0
    man = 0

    mapping = [22, 27, 17]
    naming = ['paper', 'rock', 'scissors']

    isEnd = 1

    # 2 = scissors = GPIO17
    # 1 = rock = GPIO27
    # 0 = paper = GPIO22
    # 17 keo 27 bua 22 la

    # loop over frames from the video stream
    while True:
        # read the next frame from the video stream, resize it,
        # convert the frame to grayscale, and blur it
        # robot random
        x = randint(0, 2)
        frame = vs.read()
        frame = imutils.resize(frame, width=400)
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        gray = cv2.GaussianBlur(gray, (7, 7), 0)

        # grab the current timestamp and draw it on the frame
        timestamp = datetime.datetime.now()
        cv2.putText(frame, timestamp.strftime("%A %d %B %Y %I:%M:%S%p"),
                    (10, frame.shape[0] - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.35,
                    (0, 0, 255), 1)

        # if the total number of frames has reached a sufficient
        # number to construct a reasonable background model, then
        # continue to process the frame
        if total > frameCount:

            image1 = preprocess_image(frame)

            output = model(image1)

            _, predicted = torch.max(output.data, 1)
            print(convert[int(predicted)])

            human = int(predicted)

            cv2.putText(frame, convert[int(predicted)],
                        (10, frame.shape[0] - 30), cv2.FONT_HERSHEY_SIMPLEX,
                        0.35, (0, 0, 255), 1)

            if human == 0 and isEnd == 1:  # la
                isEnd = 0
                runUpServo(mapping[x])
                if x == 0:
                    print('Robot ' + naming[x] + ' x human ' + naming[human] +
                          ' : Draw')
                elif x == 1:
                    print('Robot ' + naming[x] + ' x human ' + naming[human] +
                          ' : Human win')
                    man += 1
                else:
                    print('Robot ' + naming[x] + ' x human ' + naming[human] +
                          ' : Robot win')
                    robot += 1

            elif human == 2 and isEnd == 1:  # keo
                isEnd = 0
                runUpServo(mapping[x])
                if x == 0:
                    print('Robot ' + naming[x] + ' x human ' + naming[human] +
                          ' : Human win')
                    man += 1
                elif x == 1:
                    print('Robot ' + naming[x] + ' x human ' + naming[human] +
                          ' : Robot  win')
                    robot += 1
                else:
                    print('Robot ' + naming[x] + ' x human ' + naming[human] +
                          ' : Draw')

            elif human == 1:  # reset game
                runDownServo()
                isEnd = 1
                print('Scoreboard Robot vs Human: %d : %d' % (robot, man))

        # update the background model and increment the total number
        # of frames read thus far
        md.update(gray)
        total += 1

        # acquire the lock, set the output frame, and release the
        # lock
        with lock:
            outputFrame = frame.copy()
Пример #17
0
def detect_motion(frameCount):
    # grab global references to the video stream, output frame, and
    # lock variables
    global vs, outputFrame, lock

    # initialize the motion detector and the total number of frames
    # read thus far
    md = SingleMotionDetector(accumWeight=0.1)
    total = 0

    # loop over frames from the video stream
    while True:
        # read the next frame from the video stream, resize it,
        # convert the frame to grayscale, and blur it
        frame = vs.read()
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        gray = cv2.GaussianBlur(gray, (7, 7), 0)
        face_locations = face_recognition.face_locations(frame)

        face_landmarks_list = face_recognition.face_landmarks(
            frame, face_locations, 'large')
        # print(face_landmarks_list[0]['left_eye'])
        red = [0, 0, 255]
        if face_landmarks_list:
            for (x, y) in face_landmarks_list[0]['left_eye']:
                frame[y - 2:y + 2, x - 2:x + 2] = red
            for (x, y) in face_landmarks_list[0]['right_eye']:
                frame[y - 2:y + 2, x - 2:x + 2] = red
            for (x, y) in face_landmarks_list[0]['left_eyebrow']:
                frame[y - 2:y + 2, x - 2:x + 2] = red
            for (x, y) in face_landmarks_list[0]['right_eyebrow']:
                frame[y - 2:y + 2, x - 2:x + 2] = red
            for (x, y) in face_landmarks_list[0]['nose_tip']:
                frame[y - 2:y + 2, x - 2:x + 2] = red
            for (x, y) in face_landmarks_list[0]['nose_bridge']:
                frame[y - 2:y + 2, x - 2:x + 2] = red
            for (x, y) in face_landmarks_list[0]['top_lip']:
                frame[y - 2:y + 2, x - 2:x + 2] = red
            for (x, y) in face_landmarks_list[0]['bottom_lip']:
                frame[y - 2:y + 2, x - 2:x + 2] = red
            for (x, y) in face_landmarks_list[0]['chin']:
                frame[y - 2:y + 2, x - 2:x + 2] = red
        #grab the current timestamp and draw it on the frame
        timestamp = datetime.datetime.now()
        cv2.putText(frame, timestamp.strftime("%A %d %B %Y %I:%M:%S%p"),
                    (10, frame.shape[0] - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.35,
                    (0, 0, 255), 1)

        # if the total number of frames has reached a sufficient
        # number to construct a reasonable background model, then
        # continue to process the frame
        if total > frameCount:
            # detect motion in the image
            motion = md.detect(gray)

            # cehck to see if motion was found in the frame
            if motion is not None:
                # unpack the tuple and draw the box surrounding the
                # "motion area" on the output frame
                (thresh, (minX, minY, maxX, maxY)) = motion
                cv2.rectangle(frame, (minX, minY), (maxX, maxY), (0, 0, 255),
                              2)

        # update the background model and increment the total number
        # of frames read thus far
        md.update(gray)
        total += 1

        # acquire the lock, set the output frame, and release the
        # lock
        with lock:
            outputFrame = frame.copy()
Пример #18
0
def detect_motion(frameCount):
    frame_number = 0
    global cap, outputFrame, lock

    storePlate = 0 # to count frame that already capture !!!

    md = SingleMotionDetector(accumWeight=0.1)
    total = 0

    while True:
        ret, gmbr = cap.read()
        # cv2.imshow('VIDEO', gmbr)
        gray = cv2.cvtColor(gmbr, cv2.COLOR_BGR2GRAY)

        # noise removal with iterative bilateral filter(remove noise while preserving edges)
        gray = cv2.bilateralFilter(gray, 11, 17, 17)
        # cv2.imshow("2 - Bilateral Filter", gray)

        plates = plate_cascade.detectMultiScale(gray, 1.3, 5)

            # used for resizing the video
            # img = cv2.resize(img, (640,480))

        for (x,y,w,h) in plates:
            cv2.rectangle(gmbr,(x,y),(x+w,y+h),(255,0,0),2)
            plate_gray = gray[y:y+h, x:x+w] # gray plate
            plate_color = gmbr[y:y+h, x:x+w] # colour plate
            font = cv2.FONT_HERSHEY_SIMPLEX
            cv2.putText(gmbr,'Plate',(x,y), font, 0.5, (11,255,255), 2, cv2.LINE_AA)

            # cv2.imshow('Colour Plate',plate_color)

            # time captured 
            dt = str(datetime.now())
            print(dt)

            # to store the grayscale image as a TEMPORARY file to apply the OCR
            filename = "./captured/temp" + ".png"
            cv2.imwrite(filename, plate_gray)

            # load image, apply OCR
            txt = pytesseract.image_to_string(Image.open(filename), lang = 'eng')
            print(txt)

            cv2.putText(gmbr, txt ,(x,y), font, 1, (11,255,255), 3, cv2.LINE_AA)

            # here -> cara untuk baca image dari file and hntr ke pusher
            for filename in glob.glob('captured/*.png'):
                if ".png" in filename:
                    with open(filename,"rb") as f:
                        data = f.read()
                        # data['img'] = base64.encodebytes(img).decode("utf-8")
                        # image = json.dumps(data)
                        image = base64.b64encode(data)
                        create_database(image=data, txt=txt, masa=dt )

            image = image.decode("utf-8")            
            print(image)

            data = {"image": image, "txt": txt, "masa": dt}
            pusher_client.trigger('Plate4', 'new-record', {'data': data})

        # to display day, date and time on the video
        timestamp = Day.datetime.now()
        cv2.putText(gmbr, timestamp.strftime("%A %d %B %Y %I:%M:%S%p"), (10,gmbr.shape[1] - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.35, (0, 0, 255), 1)

        md.update(gray)
        total += 1

        with lock:
            outputFrame = gmbr.copy()
Пример #19
0
def detect_motion(frameCount):
    frame_number = 0
    # grab global references to the video stream, output frame, and
    # lock variables
    global cap, outputFrame, lock

    # initialize the motion detector and the total number of frames
    # read thus far
    md = SingleMotionDetector(accumWeight=0.1)
    total = 0

    # loop over frames from the video stream
    while True:
        # read the next frame from the video stream, resize it,
        # convert the frame to grayscale, and blur it
        # frame = vs.read()
        # frame = imutils.resize(frame, width=400)
        # gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        # gray = cv2.GaussianBlur(gray, (7, 7), 0)

        # ---------------------------------------------------

        # ret, img = cap.read()
        # small_frame = cv2.resize(img, (0, 0), fx=0.2, fy=0.2) # 1/5

        # rgb_frame = small_frame[:, :, ::-1]

        # face_locations = face_recognition.face_locations(rgb_frame)
        # face_encodings = face_recognition.face_encodings(rgb_frame, face_locations)
        # gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        # gray = cv2.GaussianBlur(gray, (7, 7), 0)

        # for face_encodings in face_encodings:
        # 	match = face_recognition.compare_faces(known_faces, face_encodings, tolerance=0.90)
        # 	name = None

        # 	if match[0]:
        # 		name = "Shafiq"
        # 		cv2.imwrite( str(name) + '_muka.jpg', img)

        # 	elif match[1]:
        # 		name = "Hafizal"
        # 		cv2.imwrite( str(name) + '_muka.jpg', img)

        # 	elif match[2]:
        # 		name = "Dalila"
        # 		cv2.imwrite( str(name) + '_muka.jpg', img)

        # 	elif match[3]:
        # 		name = "Ayu"
        # 		cv2.imwrite( str(name) + '_muka.jpg', img)

        # 	elif match[4]:
        # 		name = "Izzat"
        # 		cv2.imwrite( str(name) + '_muka.jpg', img)

        # 	elif match[5]:
        # 		name = "Tinggy"
        # 		cv2.imwrite( str(name) + '_muka.jpg', img)

        # 	else:
        # 		name = "Unknown"
        # 		cv2.imwrite( str(name) + '_muka.jpg', img)

        # 	face_names.append(name)

        # for (top, right, bottom, left), name in zip(face_locations, face_names):

        # 	top *= 5
        # 	right *= 5
        # 	bottom *= 5
        # 	left *= 5

        # 	# cv2.rectangle(img, (left, top), (right, bottom), (0, 0, 255), 2)
        # 	# cv2.rectangle(img, (left, bottom - 25), (right, bottom), (0, 0, 255), cv2.FILLED)
        # 	font = cv2.FONT_HERSHEY_DUPLEX
        # 	cv2.putText(img, name, (left + 6, bottom - 6), font, 0.5, (255, 255, 255), 1)

        #------------------------------------------------------------------------

        ret, img = cap.read()
        frame_number += 1
        gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        faces = face_cascade.detectMultiScale(gray, 1.3, 5)
        # rgb_frame = img[:, :, ::-1]

        # face_locations = face_recognition.face_locations(rgb_frame)
        # face_encodings = face_recognition.face_encodings(rgb_frame, face_locations)
        cv2.circle(img, (960, 720), 10, (0, 0, 255), 2)

        for (x, y, w, h) in faces:
            cv2.rectangle(img, (x, y), (x + w, y + h), (255, 0, 0), 2)

            face_gray = gray[y:y + h, x:x + w]
            face_color = img[y:y + h, x:x + w]
            font = cv2.FONT_HERSHEY_SIMPLEX
            cv2.putText(img, 'Face', (x, y), font, 0.5, (11, 255, 255), 2,
                        cv2.LINE_AA)

            eyes = eye_cascade.detectMultiScale(face_gray)
            for (ex, ey, ew, eh) in eyes:
                eyesC = cv2.rectangle(face_color, (ex, ey), (ex + ew, ey + eh),
                                      (0, 255, 0), 2)
                font = cv2.FONT_HERSHEY_SIMPLEX
                cv2.putText(eyesC, 'Eye', (ex, ey), font, 0.5, (11, 255, 255),
                            2, cv2.LINE_AA)

            smiles = smileCascade.detectMultiScale(face_gray,
                                                   scaleFactor=1.7,
                                                   minNeighbors=3,
                                                   minSize=(15, 15))
            for (ex, ey, ew, eh) in smiles:
                smileC = cv2.rectangle(face_color, (ex, ey),
                                       (ex + ew, ey + eh), (0, 255, 0), 1)
                font = cv2.FONT_HERSHEY_SIMPLEX
                cv2.putText(smileC, 'Mouth', (ex, ey), font, 0.5,
                            (11, 255, 255), 2, cv2.LINE_AA)

            nose = noseCascade.detectMultiScale(face_gray)
            for (ex, ey, ew, eh) in nose:
                noseC = cv2.rectangle(face_color, (ex, ey), (ex + ew, ey + eh),
                                      (0, 255, 0), 2)
                font = cv2.FONT_HERSHEY_SIMPLEX
                cv2.putText(noseC, 'Nose', (ex, ey), font, 0.5, (11, 255, 255),
                            2, cv2.LINE_AA)

    #------------------------------------------------------------------------

        timestamp = datetime.datetime.now()
        cv2.putText(img, timestamp.strftime("%A %d %B %Y %I:%M:%S%p"),
                    (10, img.shape[0] - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.35,
                    (0, 0, 255), 1)

        if total > frameCount:
            motion = md.detect(gray)
            if motion is not None:
                (thresh, (minX, minY, maxX, maxY)) = motion
                cv2.rectangle(img, (minX, minY), (maxX, maxY), (0, 0, 255), 2)

        md.update(gray)
        total += 1

        with lock:
            outputFrame = img.copy()
Пример #20
0
def detect_motion(frameCount):
    frame_number = 0
    global cap, outputFrame, lock, data

    md = SingleMotionDetector(accumWeight=0.1)
    total = 0
    print("[INFO] loading encodings")
    detector = cv2.CascadeClassifier(args["cascade"])

    fps = FPS().start()

    while True:
        data = pickle.loads(open(args["encodings"], "rb").read())
        frame = vs.read()
        frame = imutils.resize(frame, width=500)

        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)  # face detection
        rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)  # face recognition

        rects = detector.detectMultiScale(gray,
                                          scaleFactor=1.1,
                                          minNeighbors=5,
                                          minSize=(30, 30),
                                          flags=cv2.CASCADE_SCALE_IMAGE)

        boxes = [(y, x + w, y + h, x) for (x, y, w, h) in rects]

        encodings = face_recognition.face_encodings(rgb, boxes)
        names = []

        for (top, right, bottom, left), face_encoding in zip(boxes, encodings):

            matches = face_recognition.compare_faces(data["encodings"],
                                                     face_encoding,
                                                     tolerance=0.4)

            face_distances = face_recognition.face_distance(
                data["encodings"], face_encoding)
            best_match_index = np.argmin(face_distances)

            if matches[best_match_index]:
                name = data["names"][best_match_index]
                status = True

                cv2.rectangle(frame, (left, top), (right, bottom), (255, 0, 0),
                              2)
                # cv2.rectangle(img, (left, bottom - 25), (right, bottom), (0, 0, 255), cv2.FILLED)
                font = cv2.FONT_HERSHEY_DUPLEX
                cv2.putText(frame, name, (left + 6, bottom - 6), font, 0.5,
                            (255, 255, 255), 1)
                face_image = frame[top:bottom, left:right]
                cv2.imwrite("person_found/" + name + '_Face.jpg', face_image)

            else:

                cv2.rectangle(frame, (left, top), (right, bottom), (0, 0, 255),
                              2)
                font = cv2.FONT_HERSHEY_DUPLEX
                cv2.putText(frame, "Unknown", (left + 6, bottom - 6), font,
                            0.5, (255, 255, 255), 1)

        timestamp = Hari.datetime.now()
        cv2.putText(frame, timestamp.strftime("%A %d %B %Y %I:%M:%S%p"),
                    (10, frame.shape[0] - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.35,
                    (0, 0, 255), 1)

        if total > frameCount:
            motion = md.detect(gray)
            if motion is not None:
                (thresh, (minX, minY, maxX, maxY)) = motion
                # cv2.rectangle(img, (minX, minY), (maxX, maxY),(0, 0, 255), 2)

        md.update(gray)
        total += 1

        with lock:
            outputFrame = frame.copy()
Пример #21
0
def detect_motion(frameCount):
    # grab global references to the video stream, output frame, and
    # lock variables
    global vs, outputFrame, lock, socketio

    # initialize the motion detector and the total number of frames
    # read thus far
    md = SingleMotionDetector(accumWeight=0.1)
    total = 0
    frameCnt = 0
    word = ""
    sentence = ""
    pred_text = ' '
    old_text = ' '
    dup_time = 0
    none_time = 0
    # loop over frames from the video stream
    while True:
        # read the next frame from the video stream, resize it,
        # convert the frame to grayscale, and blur it
        if vs.read()[0] == False:
            vs = cv2.VideoCapture(0)
            vs.set(cv2.CAP_PROP_FPS, 30)
        frame = vs.read()[1]

        if frame.any() != None:
            frameCnt = frameCnt + 1
            frame = cv2.flip(frame, 1)
            if frameCnt % 15 == 0:
                pred_text = get_pred_from_contour(frame)
                if pred_text != None and pred_text != 'NOTHING':
                    if old_text != pred_text:  #and pred_text != 'Thank you!' and pred_text != 'Awesome!':
                        word = word + pred_text
                        old_text = pred_text
                        dup_time = 0
                        none_time = 0
                        socketio.emit('message', 'Symbol :' + pred_text)
                    elif old_text == pred_text:
                        dup_time = dup_time + 1
                        print("dup_time=" + str(dup_time))
                elif pred_text == 'NOTHING':
                    none_time = none_time + 1
                if pred_text == 'NOTHING' and none_time == 2:
                    correctWord = wordMatcher.correction(word)
                    old_text = ' '
                    word = word if correctWord is None else correctWord
                    if len(word.strip()) > 0:
                        socketio.emit('message', 'Word :' + word)
                        sentence = sentence + " " + word
                    word = ""
                if pred_text == 'NOTHING' and none_time > 4:
                    sentence = sentence + " " + word
                    none_time = 0
                    word = ""
                    if len(sentence.strip()) > 0:
                        socketio.emit('pred', sentence)
                    sentence = ""
                frameCnt = 0
            #cv2.rectangle(frame, (x,y), (x+w, y+h), (0,0,0), 1)
            #res = np.hstack((frame, blackboard))
            res = frame
            with lock:
                outputFrame = res.copy()
Пример #22
0
ap.add_argument("-o", "--output", required=True, help="path to the output directory")
ap.add_argument("-m", "--min-frames", type=int, default=120,
	help="minimum # of frames containing motion before writing to file")
ap.add_argument("-p", "--picamera", type=int, default=-1,
	help="whether or not the Raspberry Pi camera should be used")
args = vars(ap.parse_args())

# initialize the video stream and allow the cammera sensor to warmup
print("[INFO] warming up camera...")
vs = VideoStream(usePiCamera=args["picamera"] > 0).start()
time.sleep(2.0)

# initialize the motion detector, the total number of frames read thus far, the
# number of consecutive frames that have contained motion, and the spatial
# dimensions of the frame
md = SingleMotionDetector(accumWeight=0.1)
total = 0
consec = None
frameShape = None

# loop over frames
while True:
	# read the next frame from the video stream, resize it, convert the frame to
	# grayscale, and blur it
	frame = vs.read()
	frame = imutils.resize(frame, width=400)
	gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
	gray = cv2.GaussianBlur(gray, (7, 7), 0)

	# grab the current timestamp and draw it on the frame
	timestamp = datetime.datetime.now()
Пример #23
0
def detect_motion(frameCount):
    # grab global references to the video stream, output frame, and
    # lock variables
    global vs, outputFrame, lock

    # initialize the motion detector and the total number of frames
    # read thus far
    md = SingleMotionDetector(accumWeight=0.1)
    total = 0

    # loop over frames from the video stream
    while True:
        # read the next frame from the video stream, resize it,
        # convert the frame to grayscale, and blur it
        frame = vs.read()
        frame = imutils.resize(frame, width=400)
        height, width, channels = frame.shape

        blob = cv2.dnn.blobFromImage(frame,
                                     0.00392, (416, 416), (0, 0, 0),
                                     True,
                                     crop=False)
        net.setInput(blob)
        outs = net.forward(output_layers)
        # Showing informations on the screen
        class_ids = []
        confidences = []
        boxes = []
        for out in outs:
            for detection in out:
                scores = detection[5:]
                class_id = np.argmax(scores)
                confidence = scores[class_id]
                if confidence > 0.2:
                    # Object detected
                    center_x = int(detection[0] * width)
                    center_y = int(detection[1] * height)
                    w = int(detection[2] * width)
                    h = int(detection[3] * height)

                    # Rectangle coordinates
                    x = int(center_x - w / 2)
                    y = int(center_y - h / 2)

                    boxes.append([x, y, w, h])
                    confidences.append(float(confidence))
                    class_ids.append(class_id)

        indexes = cv2.dnn.NMSBoxes(boxes, confidences, 0.4, 0.3)

        for i in range(len(boxes)):
            if i in indexes:
                x, y, w, h = boxes[i]
                label = str(classes[class_ids[i]])
                confidence = confidences[i]
                color = colors[class_ids[i]]
                cv2.rectangle(frame, (x, y), (x + w, y + h), color, 2)
                cv2.rectangle(frame, (x, y), (x + w, y + 30), color, -1)
                cv2.putText(frame, label + " " + str(round(confidence, 2)),
                            (x, y + 30), font, 3, (255, 255, 255), 3)
                elapsed_time = time.time() - starting_time
                fps = frame_id / elapsed_time
                cv2.putText(frame, "FPS: " + str(round(fps, 60)), (10, 50),
                            font, 3, (0, 0, 0), 3)
                cv2.imshow("Image", frame)
                key = cv2.waitKey(1)
                if key == 27:
                    break

                # acquire the lock, set the output frame, and release the
                # lock
                with lock:
                    outputFrame = frame.copy()
Пример #24
0
def detect_motion(frameCount):
    # grab global references to the video stream, output frame, and
    # lock variables
    global vs, outputFrame, lock

    # initialize the motion detector and the total number of frames
    # read thus far
    md = SingleMotionDetector(accumWeight=0.1)
    total = 0

    # render_template('1.html')
    EYE_AR_THRESH = 0.25
    EYE_AR_CONSEC_FRAMES = 30

  # initialize the frame counter as well as a boolean used to
  # indicate if the alarm is going off
    COUNTER = 0
    ALARM_ON = False

    # initialize dlib's face detector (HOG-based) and then create
    # the facial landmark predictor
    print("[INFO] loading facial landmark predictor...")
    detector = dlib.get_frontal_face_detector()
    predictor = dlib.shape_predictor('shape_predictor_68_face_landmarks.dat')
    # grab the indexes of the facial landmarks for the left and
    # right eye, respectively
    (lStart, lEnd) = face_utils.FACIAL_LANDMARKS_IDXS["left_eye"]
    (rStart, rEnd) = face_utils.FACIAL_LANDMARKS_IDXS["right_eye"]

    # start the video stream thread
    print("[INFO] starting video stream thread...")

    # loop over frames from the video stream
    while True:
        # read the next frame from the video stream, resize it,
        # convert the frame to grayscale, and blur it
        frame = vs.read()
        frame = imutils.resize(frame, width=450)
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

        # detect faces in the grayscale frame
        rects = detector(gray, 0)

        # loop over the face detections
        for rect in rects:
            # determine the facial landmarks for the face region, then
            # convert the facial landmark (x, y)-coordinates to a NumPy
            # array
            shape = predictor(gray, rect)
            shape = face_utils.shape_to_np(shape)

            # extract the left and right eye coordinates, then use the
            # coordinates to compute the eye aspect ratio for both eyes
            leftEye = shape[lStart:lEnd]
            rightEye = shape[rStart:rEnd]
            leftEAR = eye_aspect_ratio(leftEye)
            rightEAR = eye_aspect_ratio(rightEye)

            # average the eye aspect ratio together for both eyes
            ear = (leftEAR + rightEAR) / 2.0

            # compute the convex hull for the left and right eye, then
            # visualize each of the eyes
            leftEyeHull = cv2.convexHull(leftEye)
            rightEyeHull = cv2.convexHull(rightEye)
            cv2.drawContours(frame, [leftEyeHull], -1, (0, 255, 0), 1)
            cv2.drawContours(frame, [rightEyeHull], -1, (0, 255, 0), 1)

            # check to see if the eye aspect ratio is below the blink
            # threshold, and if so, increment the blink frame counter
            if ear < EYE_AR_THRESH:
                COUNTER += 1

                # if the eyes were closed for a sufficient number of
                # then sound the alarm
                if COUNTER >= EYE_AR_CONSEC_FRAMES:
                    # if the alarm is not on, turn it on
                    if not ALARM_ON:
                        ALARM_ON = True

                        # check to see if an alarm file was supplied,
                        # and if so, start a thread to have the alarm
                        # sound played in the background
                        if ('Alarm.mp3') != "":
                            t = Thread(target=sound_alarm(True),
                                       args=(True))
                            t.deamon = True
                            t.start()

                    # draw an alarm on the frame
                    cv2.putText(frame, "DROWSINESS ALERT!", (10, 30),
                                cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)
                else:
                    sound_alarm(False)

            # otherwise, the eye aspect ratio is not below the blink
            # threshold, so reset the counter and alarm
            else:
                COUNTER = 0
                ALARM_ON = False

            # draw the computed eye aspect ratio on the frame to help
            # with debugging and setting the correct eye aspect ratio
            # thresholds and frame counters
            cv2.putText(frame, "EAR: {:.2f}".format(ear), (300, 30),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)

        # update the background model and increment the total number
        # of frames read thus far
        md.update(gray)
        total += 1

        # acquire the lock, set the output frame, and release the
        # lock
        with lock:
            outputFrame = frame.copy()