コード例 #1
0
ファイル: stream.py プロジェクト: robinlh/web-elements
def detect_motion(frameCount):
    global vs, outputFrame, lock, motion_detected

    # initialize the motion detector tot frames
    md = MotionDetector(accumWeight=0.1)
    total = 0

    while True:
        # read next frame from stream, resize, to grayscale, blur
        frame = vs.read()
        frame = imutils.resize(frame, width=400)
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        gray = cv2.GaussianBlur(gray, (7, 7), 0)

        # timestamp on image
        timestamp = datetime.datetime.now()
        cv2.putText(frame, timestamp.strftime(
            "%A %d %B %Y %I:%M:%S%p"), (10, frame.shape[0] - 10),
                    cv2.FONT_HERSHEY_SIMPLEX, 0.35, (0, 0, 255), 1)

        # if tot frames reached enough to construct background model, continue

        if total > frameCount:

            # detect
            motion = md.detect(gray)

            # check if motion was found
            if motion is not None:
                # draw bounding box
                (thresh, (minX, minY, maxX, maxY)) = motion
                cv2.rectangle(frame, (minX, minY), (maxX, maxY),
                              (0, 0, 255), 2)

                if motion_detected is False:
                    res = telegram_bot.telegram_bot_sendtext("motion detected: " + timestamp.strftime(
            "%A %d %B %Y %I:%M:%S%p"))
                    print(res)
                    motion_detected = True


        # update background model and increment total frames
        md.update(gray)
        total += 1

        # acquire the lock, set output frame, release
        with lock:
            outputFrame = frame.copy()
    frame = cv2.flip(frame, 2)
    #copy frame ke variabel clone
    clone = frame.copy()
    #mengambil tinggi dan lebar dari frame
    (frameH, frameW) = frame.shape[:2]

    #crop frame pada ROI untuk dideteksi
    roi = frame[top:bot, right:left]
    #dijadikan ke grayscale
    gray = cv2.cvtColor(roi, cv2.COLOR_BGR2GRAY)
    #smoothing gambar dengan gaussian blur
    gray = cv2.GaussianBlur(gray, (7, 7), 0)

    #frame pertama digunakan sebagai background
    if numFrames < 1:
        md.update(gray)
    else:
        #deteksi gerakan
        skin = md.detect(gray)
        #jika ada gerakan
        if skin is not None:
            (thresh, c) = skin
            #gambar contour
            cv2.drawContours(
                clone, [c + (right, top)], -1, (0, 255, 0), 2
            )  #draw objek skin dengan menambahkan nilai kontur ke batasan box
            #pengenalan dan menghitung jumlah jari terangkat
            fingers = gd.detect(thresh, c)

            if gesture is None:
                gesture = [1, fingers]
コード例 #3
0
# Initialize face detector
face_detector = FaceDetector()

# FPS calculation
fps = FPS().start()

print("[INFO] Start collecting face images.")

while True:
    # grab frame
    frame = frame_grabber.read()
    frame_show = frame.copy()
    frame_roi = frame[up_offsetY:down_offsetY, left_offsetX:right_offsetX]
    frame_gray = cv2.cvtColor(frame_roi, cv2.COLOR_BGR2GRAY)
    frame_gray = cv2.GaussianBlur(frame_gray, (21, 21), 0)
    motion_locs = motion_detector.update(frame_gray)

    # form a nice average before motion detection
    if num_frame_read < 15:
        num_frame_read += 1
        continue

    if len(motion_locs) > 0:
        # @ZC may consider to process in every other frame to accelerate
        face_locs = face_detector.detect(frame_roi, motion_locs)
        if len(face_locs) > 0:
            # Save image with faces detected
            timestamp = datetime.datetime.now()
            ts = timestamp.strftime("%Y-%m-%d_%H:%M:%S_%f")
            print("[INFO] " + str(len(face_locs)) + " face found." + ts)
            image_save_path = "images/" + ts + ".jpg"