Ejemplo n.º 1
0
def detect_motion(frameCount):
    # grab global references to the video stream, output frame, and
    # lock variables
    global vs, outputFrame, lock
    # initialize the motion detector and the total number of frames
    # read thus far
    md = SingleMotionDetector(accumWeight=0.1)
    total = 0
    # loop over frames from the video stream
    while True:
        # read the next frame from the video stream, resize it,
        # convert the frame to grayscale, and blur it
        frame = vs.read()
        frame = imutils.resize(frame, width=1280)
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        gray = cv2.GaussianBlur(gray, (7, 7), 0)
        # grab the current timestamp and draw it on the frame

        # update the background model and increment the total number
        # of frames read thus far
        md.update(gray)
        total += 1
        # acquire the lock, set the output frame, and release the
        # lock
        with lock:
            outputFrame = frame.copy()
Ejemplo n.º 2
0
def detect_motion(frameCount):
    # grab global references to the video stream, output frame, and
    # lock variables
    global vs, outputFrame, lock

    # initialize the motion detector and the total number of frames
    # read thus far
    md = SingleMotionDetector(accumWeight=0.1)
    total = 0

    # loop over frames from the video stream
    while True:
        # read the next frame from the video stream, resize it,
        # convert the frame to grayscale, and blur it
        frame = vs.read()
        frame = imutils.resize(frame, width=400)
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        gray = cv2.GaussianBlur(gray, (7, 7), 0)

        # # grab the current timestamp and draw it on the frame
        # timestamp = datetime.datetime.now()
        # cv2.putText(frame, timestamp.strftime(
        # 	"%A %d %B %Y %I:%M:%S%p"), (10, frame.shape[0] - 10),
        # 	cv2.FONT_HERSHEY_SIMPLEX, 0.35, (0, 0, 255), 1)

        # # if the total number of frames has reached a sufficient
        # # number to construct a reasonable background model, then
        # # continue to process the frame
        # if total > frameCount:
        # 	# detect motion in the image
        # 	motion = md.detect(gray)

        # 	# check to see if motion was found in the frame
        # 	if motion is not None:
        # 		# unpack the tuple and draw the box surrounding the
        # 		# "motion area" on the output frame
        # 		(thresh, (minX, minY, maxX, maxY)) = motion
        # 		cv2.rectangle(frame, (minX, minY), (maxX, maxY),
        # 			(0, 0, 255), 2)
        face_frame = detect_faces(frame, face_cascade)
        if face_frame is not None:
            eyes = detect_eyes(face_frame, eye_cascade)
            for eye in eyes:
                if eye is not None:
                    # threshold = r = cv2.getTrackbarPos('threshold', 'image')
                    eye = cut_eyebrows(eye)
                    keypoints = blob_process(eye, 50, detector)
                    eye = cv2.drawKeypoints(
                        eye, keypoints, eye, (0, 0, 255),
                        cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)

# update the background model and increment the total number
# of frames read thus far
        md.update(gray)
        total += 1

        # acquire the lock, set the output frame, and release the
        # lock
        with lock:
            outputFrame = frame.copy()
Ejemplo n.º 3
0
def detect_motion(frameCount=32):
    
    # grab global references to the video stream, output frame, and lock variables
    global vs, outputFrame, lock, count
    
    # initialize the motion detector and the total number of frames
    # read thus far
    md = SingleMotionDetector(accumWeight=0.1)
    total = 0

    # loop over frames from the video stream
    while True:
        # read the next frame from the video stream, resize it,
        # convert the frame to grayscale, and blur it
        frame = vs.read()
        
        #frame = imutils.resize(frame, width=720)
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        gray = cv2.GaussianBlur(gray, (7, 7), 0)
        

        # grab the current timestamp and draw it on the frame
        """
        timestamp = datetime.datetime.now()
        cv2.putText(frame, timestamp.strftime(
            "%A %d %B %Y %I:%M:%S%p"), (10, frame.shape[0] - 10),
            cv2.FONT_HERSHEY_SIMPLEX, 0.35, (0, 0, 255), 1)
        """


        # if the total number of frames has reached a sufficient number 
        # to construct a reasonable background model, then continue to process the frame
        if total > frameCount:
            
            # detect motion in the image
            motion = md.detect(gray)
            
            # check to see if motion was found in the frame
            """
            if motion is not None:
                # unpack the tuple and draw the box surrounding the
                # "motion area" on the output frame
                (thresh, (minX, minY, maxX, maxY)) = motion
                cv2.rectangle(frame, (minX, minY), (maxX, maxY),
                    (255, 255, 255), 1)
            """

        # update the background model and increment the total number
        # of frames read thus far
        md.update(gray)
        total += 1
        count += 1
        
        # acquire the lock, set the output frame, and release the
        # lock
        with lock:
            outputFrame = frame.copy()
Ejemplo n.º 4
0
def detect_motion(frameCount):
    # Grab global references to video stream, output frame, and lock variables
    global vs, outputFrame, lock

    # Initialize motion detector and total number of frames read
    md = SingleMotionDetector(accumWeight=0.1)
    total = 0

    # Start looping over frames
    while True:
        # Read next frame from video stream, resize, convert to grayscale, blur
        frame = vs.read()
        frame = imutils.resize(frame, width=400)
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        gray = cv2.GaussianBlur(gray, (7, 7), 0)

        # Grab the current timestamp and draw it on the frame
        timestamp = datetime.datetime.now()
        cv2.putText(frame, timestamp.strftime("%A %d %B %Y %I:%M:%S%p"),
                    (10, frame.shape[0] - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.35,
                    (0, 0, 255), 1)
        #

        # If total number of frames is sufficient to construct the
        # background model, then process the frame
        if total > frameCount:
            motion = md.detect(gray)

            # Check if motion was found
            if motion is not None:
                # Unpack tuple and draw the box surrounding the
                # motion area on output frame
                (thresh, (minX, minY, maxX, maxY)) = motion
                cv2.rectangle(frame, (minX, minY), (maxX, maxY), (0, 0, 255),
                              2)
            #
        #

        # Udpate background model and increment total number of frames
        md.update(gray)
        total += 1

        # Acquire lock (mutex), set the output frame, and release lock
        with lock:
            outputFrame = frame.copy()
Ejemplo n.º 5
0
def detect_motion(frameCount):
    # traz variáveis globais para dentro da função
    global vs, outputFrame, lock
    # inicializa o detector de movimento e o número total
    # de frames lidos até o momento
    md = SingleMotionDetector(accumWeight=0.1)
    total = 0
    # laço que acessa os frames vindos da stream
    while True:
        # acessa o próximo frame da stream, redimensiona a imagem,
        # converte para uma escala de cinza e aplica um filtro para
        # redução de ruído
        frame = vs.read()
        frame = imutils.resize(frame, width=400)
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        gray = cv2.GaussianBlur(gray, (7, 7), 0)
        # Aplica a data atual no frame
        timestamp = datetime.datetime.now()
        cv2.putText(frame, timestamp.strftime("%A %d %B %Y %I:%M:%S%p"),
                    (10, frame.shape[0] - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.35,
                    (0, 0, 255), 1)

        # garante que o número mínimo de frames foram lidos para a construção
        # do modelo de background
        if total > frameCount:
            # Detecta se houve movimento no frame
            motion = md.detect(gray)
            # checagem se para confirmar se houve movimento no frame
            # e se houve realizar as ações
            if motion is not None:
                # Desenha uma borda em volta da área de movimento no frame
                (thresh, (minX, minY, maxX, maxY)) = motion
                cv2.rectangle(frame, (minX, minY), (maxX, maxY), (0, 0, 255),
                              2)

        # atualiza o modelo de background e incrementa o número de frames
        # lidos até o momento
        md.update(gray)
        total += 1

        # utilizando do lock, define o frame que vai ser enviado ao cliente
        with lock:
            outputFrame = frame.copy()
Ejemplo n.º 6
0
def detect_motion(frameCount):
    global vs,outputFrame,lock
    md = SingleMotionDetector(accumWeight=0.1)
    total = 0
    while True:
        frame = vs.read()
        frame = imutils.resize(frame,width=400)
        gray = cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY)
        gray = cv2.GaussianBlur(gray,(7,7),0)
        timestamp = datetime.datetime.now()
        cv2.putText(frame,timestamp.strftime("%A %d %B %Y %I:%M:%S%p"),(10,frame.shape[0]-10),cv2.FONT_HERSHEY_SIMPLEX,0.35,(0,0,255),1)
        if total > frameCount:
            motion = md.detect(gray)
            if motion is not None:
                (thresh,(minX,minY,maxX,maxY))=motion
                cv2.rectangle(frame,(minX,minY),(maxX,maxY),(0,0,255),2)
        md.update(gray)
        total += 1
        with lock:
            outputFrame = frame.copy()
def detect_motion(frameCount):
    global vs, outputFrame, lock

    md = SingleMotionDetector(accumWeight=0.1)
    total = 0

    while True:
        frame = vs.read()
        frame = imutils.resize(frame, width=400)
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        gray = cv2.GaussianBlur(gray, (7, 7), 0)
        faces = faceCascade.detectMultiScale(gray,
                                             scaleFactor=1.5,
                                             minNeighbors=5,
                                             minSize=(30, 30),
                                             flags=cv2.CASCADE_SCALE_IMAGE)

        timestamp = datetime.datetime.now()
        cv2.putText(frame, timestamp.strftime("%A %d %B %Y %I:%M:%S%p"),
                    (10, frame.shape[0] - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.35,
                    (0, 0, 255), 1)
        if len(faces) == 0:
            nbr = 0

        else:
            nbr = str(faces.shape[0])
        for (x, y, w, h) in faces:
            cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
            cv2.putText(frame, "Number of faces detected: " + nbr,
                        (0, frame.shape[0] - frame.shape[0] + 10),
                        cv2.FONT_HERSHEY_TRIPLEX, 0.35, (0, 255, 0), 1)
        md.update(gray)
        total += 1

        with lock:
            outputFrame = frame.copy()