コード例 #1
0
ファイル: picam.py プロジェクト: MrGrayCode/Ant-Bot
    def getColor(self):
        vs = VideoStream(usePiCamera=True).start()
        time.sleep(0.5)
        count = 3
        colors = {'r': 0, 'b': 0, 'g': 0, 'y': 0}
        while count:
            count -= 1
            frame = vs.read()
            hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
            mask_red = cv2.inRange(hsv, self.lower_red, self.upper_red)
            mask_blue = cv2.inRange(hsv, self.lower_blue, self.upper_blue)
            mask_green = cv2.inRange(hsv, self.lower_green, self.upper_green)
            mask_yellow = cv2.inRange(hsv, self.lower_yellow,
                                      self.upper_yellow)

            colors['r'] = cv2.countNonZero(mask_red)
            colors['b'] = cv2.countNonZero(mask_blue)
            colors['g'] = cv2.countNonZero(mask_green)
            colors['y'] = cv2.countNonZero(mask_yellow)

            #res = cv2.bitwise_and(frame,frame,mask = mask_yellow)
            #cv2.imshow("Res",res)
            #cv2.waitKey(10)
        vs.stop()
        print(colors)
        for color in colors:
            if colors[color] > 5000:
                return color
        return 'x'
コード例 #2
0
ファイル: picam.py プロジェクト: MrGrayCode/Ant-Bot
 def getArucoID(self):
     vs = VideoStream(usePiCamera=True).start()
     time.sleep(0.5)
     ids = []
     while len(ids) < 4:
         ID = 0  #stores the detected ID
         frame = vs.read()
         aruco_list = self.detectAruco(frame)
         if len(aruco_list):
             foundID = True
             ID = list(aruco_list.keys())
             ID = ID[0]
         #check that the detected ID is not repeated and add to the list of ids
         if ID > 0 and ID not in ids:
             ids.append(ID)
             #self.IDs.append(bin(ID)[2:]) #store ID in binary format
             print("ID Detected: {}".format(ID))
     vs.stop()
コード例 #3
0
import imutils
from imutils.video.videostream import VideoStream
import cv2
from imutils.video import FPS
import datetime
import time
import numpy as np

# if a video path was not supplied, grab the reference to the webcam
camera = VideoStream(src=0).start()

fps = FPS().start()
current_fps = 0

# keep looping
while True:
    frame = camera.read()

    if fps._numFrames % 2 == 0:
        # 帧率计数
        start_time = datetime.datetime.now()

    frame = imutils.resize(frame, width=400)

    # 生成插入帧
    if fps._numFrames % 2 == 0:
        insert_frame = frame

    if fps._numFrames % 2 == 1:
        # 帧率计数
        end_time = datetime.datetime.now()
コード例 #4
0
def gen_frames_detection():  # generate frame by frame from camera
    # camera = cv2.VideoCapture(0)
    # Capture frame-by-frame
    EYE_AR_THRESH = 0.3
    EYE_AR_CONSEC_FRAMES = 3

    # initialize the frame counters and the total number of blinks
    COUNTER = 0
    TOTAL = 0

    # initialize dlib's face detector (HOG-based) and then create
    # the facial landmark predictor
    print("\n[INFO] loading facial landmark predictor...")
    detector = dlib.get_frontal_face_detector()
    predictor = dlib.shape_predictor(
        "shape_predictor_68_face_landmarks.dat")

    # grab the indexes of the facial landmarks for the left and
    # right eye, respectively
    (lStart, lEnd) = face_utils.FACIAL_LANDMARKS_IDXS["left_eye"]
    (rStart, rEnd) = face_utils.FACIAL_LANDMARKS_IDXS["right_eye"]

    # start the video stream thread
    print("\n[INFO] starting video stream thread...")
    # vs = FileVideoStream(args["video"]).start()
    # fileStream = True
    # vs = VideoStream(src=0).start()
    # vs = VideoStream(usePiCamera=True).start() #Use for Raspberry Pi
    vs = VideoStream().start()
    fileStream = False
    time.sleep(1.0)

    j = 0
    result = False
    #  for cctv camera use rtsp://username:password@ip_address:554/user=username_password='******'_channel=channel_number_stream=0.sdp' instead of camera
    # for local webcam use cv2.VideoCapture(0)

    user_name = ""
    key = cv2. waitKey(1)
    while True and result == False:
        try:
            # loop over frames from the video stream
            while True and result == False:
                try:
                    j = j+1
                    # if this is a file video stream, then we need to check if
                    # there any more frames left in the buffer to process
                    if fileStream and not vs.more():
                        break

                    # grab the frame from the threaded video file stream, resize
                    # it, and convert it to grayscale
                    # channels)
                    frame = vs.read()
                    frame = imutils.resize(frame, width=800)
                    gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

                    # detect faces in the grayscale frame
                    rects = detector(gray, 0)
                    # loop over the face detections
                    for rect in rects:
                        # determine the facial landmarks for the face region, then
                        # convert the facial landmark (x, y)-coordinates to a NumPy
                        # array
                        shape = predictor(gray, rect)
                        shape = face_utils.shape_to_np(shape)

                        # extract the left and right eye coordinates, then use the
                        # coordinates to compute the eye aspect ratio for both eyes
                        leftEye = shape[lStart:lEnd]
                        rightEye = shape[rStart:rEnd]
                        leftEAR = eye_aspect_ratio(leftEye)
                        rightEAR = eye_aspect_ratio(rightEye)

                        # average the eye aspect ratio together for both eyes
                        ear = (leftEAR + rightEAR) / 2.0

                        # compute the convex hull for the left and right eye, then
                        # visualize each of the eyes
                        leftEyeHull = cv2.convexHull(leftEye)
                        rightEyeHull = cv2.convexHull(rightEye)
                        cv2.drawContours(
                            frame, [leftEyeHull], -1, (0, 255, 0), 1)
                        cv2.drawContours(
                            frame, [rightEyeHull], -1, (0, 255, 0), 1)

                        # check to see if the eye aspect ratio is below the blink
                        # threshold, and if so, increment the blink frame counter
                        if ear < EYE_AR_THRESH:
                            COUNTER += 1

                        # otherwise, the eye aspect ratio is not below the blink
                        # threshold
                        else:
                            # if the eyes were closed for a sufficient number of
                            # then increment the total number of blinks
                            if COUNTER >= EYE_AR_CONSEC_FRAMES:
                                TOTAL += 1

                            # reset the eye frame counter
                            COUNTER = 0
                            # draw the total number of blinks on the frame along with
                        # the computed eye aspect ratio for the frame
                        cv2.putText(frame, "Blinks: {}".format(TOTAL), (10, 30),
                                    cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)
                        cv2.putText(frame, "EAR: {:.2f}".format(ear), (200, 30),
                                    cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)

                    if TOTAL >= 3 or j > 101:  # 3 is how many times the face must blink
                        result = True
                        if TOTAL >= 3:
                            cv2.putText(frame, "Real face, comparing now ...", (500, 30),
                                        cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 255, 255), 2)
                            check_extract_faces(frame)
                            decision, user_name = final_compare_blink(result)
                            cv2.putText(frame, "Welcome {}".format(user_name), (500, 55),
                                        cv2.FONT_HERSHEY_SIMPLEX, 0.7, (255, 255, 255), 2)
                        else:  # not real face, maybe photo
                            cv2.putText(frame, "Eyes blinks not detectec!", (500, 30),
                                        cv2.FONT_HERSHEY_SIMPLEX, 0.7, (255, 0, 255), 2)
                    else:
                        if j % 10 == 0:  # tictac
                            cv2.putText(frame, "Unrecognized face", (10, 55),
                                        cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)
                        else:
                            cv2.putText(frame, "# Unrecognized face", (10, 55),
                                        cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)
                    print(j)

                    # return the frame
                    ret, buffer = cv2.imencode('.jpg', frame)
                    frame = buffer.tobytes()
                    yield (b'--frame\r\n'
                           b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n')

                except(KeyboardInterrupt):
                    vs.stop()
                    break

        except(KeyboardInterrupt):
            vs.stop()
            break

    vs.stop()
    stop_image = np.zeros((500, 500, 3), np.uint8)
    if user_name == "":
        cv2.putText(stop_image, "Unrecognized face", (150, 250),
                    cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)
    else:
        cv2.putText(stop_image, "Welcome {}".format(user_name), (150, 250),
                    cv2.FONT_HERSHEY_SIMPLEX, 0.7, (255, 255, 255), 2)
    ret, buffer = cv2.imencode('.jpg', stop_image)
    frame = buffer.tobytes()
    yield (b'--frame\r\n'
           b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n')
コード例 #5
0
                default=0.5,
                help="minimum probability to filter weak detections")
args = vars(ap.parse_args())

# face detector model을 불러옴
print("[INFO] loading face detector model...")
faceNet = cv2.dnn.readNet('models/deploy.prototxt',
                          'models/res10_300x300_ssd_iter_140000.caffemodel')

# load the face mask detector model from disk
print("[INFO] loading face mask detector model...")
maskNet = load_model(args["model"])

# initialize the video stream and allow the camera sensor to warm up
print("[INFO] starting video stream...")
vs = VideoStream(src=0).start()
time.sleep(2.0)

while True:

    # 400픽셀로 resize
    frame = vs.read()

    if np.shape(frame) != ():

        # grab the frame dimensions and convert it to a blob
        (h, w) = frame.shape[:2]

    #frame = imutils.resize(frame, width=400)

    # 프레임에서 마스크 착용 유무 판별