Ejemplo n.º 1
0
def process_video(file_name, n=1):
    """
	iterate over frames of videofile
	:param file_name: filename
	:param n:process one out of every n frames-> default 1: take all frames
	:return:list of finish frames
	"""

    fvs = FileVideoStream(file_name).start()  #load video

    cam = cv2.VideoCapture(file_name)
    fps = cam.get(cv2.CAP_PROP_FPS)  #get original fps of video

    counter = 1
    frame_list = []
    teller = 0
    while fvs.running():
        if fvs.more():

            teller += 1
            frame = fvs.read()

            if frame is not None:

                frame_list.append(
                    cv2.resize(
                        frame,
                        (int(frame.shape[1] / 2), int(frame.shape[0] / 2)))
                )  #append frame to list and resize it: height/2, width/2

            counter += 1
        else:
            time.sleep(2)

    return (frame_list, fps)
Ejemplo n.º 2
0
def run_detection(fast_mtcnn, filenames):
    frames = []
    frames_processed = 0
    faces_detected = 0
    batch_size = 60
    start = time.time()

    for filename in tqdm(filenames):

        v_cap = FileVideoStream(filename).start()
        v_len = int(v_cap.stream.get(cv2.CAP_PROP_FRAME_COUNT))

        for j in range(v_len):

            frame = v_cap.read()
            frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
            frames.append(frame)

            if len(frames) >= batch_size or j == v_len - 1:

                faces = fast_mtcnn(frames)
                for i in range(len(faces)):
                    file_name = str(j) + str(i) + '.jpg'
                # cv2.imwrite(file_name,faces[i])
                #print(faces)
                frames_processed += len(frames)
                faces_detected += len(faces)
                frames = []

                print(
                    f'Frames per second: {frames_processed / (time.time() - start):.3f},',
                    f'faces detected: {faces_detected}\r',
                    end='')

        v_cap.stop()
def main():
    # Get ROI from frames
    file_name = "/home/smart/WBC286 InvL-Pillars -35mbar 15fps 29-11-2019 v3.4.avi"
    #file_name = "C:/Users/Me/Desktop/capstone/WBC286 InvL-Pillars -350mbar 150fps 29-11-2019 v3.4.avi"
    cap = FileVideoStream(file_name).start()
    image = cap.read()
    print("***** PROCESSING ROI for RUN 1 ***** File: %s" % file_name)
    cap.stop()
    print("***** PROCESSING RUN 1 ***** File: %s" % file_name)
    print("Frame size: ", image.shape)
    Channels = 34
    #original size: [502,1402,3]
    # r = [84, 357, 1238, 130]
    r = [
        int(0.167 * image.shape[0]),
        int(0.25 * image.shape[1]),
        int(0.883 * image.shape[1]), 130
    ]
    x1, x2, y1, y2, sub_ch, channel_len = to_crop(image, r, Channels)

    # run count
    run_standard = standard(file_name, Channels)
    (fps) = run_standard.standard_run(x1, x2, y1, y2, sub_ch, channel_len)
    print("[INFO] elasped time: {:.2f}".format(fps.elapsed()))
    print("[INFO] approx. FPS: {:.2f}".format(fps.fps()))

    rbc_counts = run_standard.rbc_detection()
    run_standard.process_results(rbc_counts)

    print(
        "----------------------------------------------------------------------"
    )
Ejemplo n.º 4
0
    def UAMS(self):
        video_file = FileVideoStream(self.args["video"]).start()
        time.sleep(2.0)
        fps = FPS().start()

        while video_file.more():
            # Converting each frame to matrix of numbers
            try:
                self.frame = video_file.read()
                gray = cv2.cvtColor(self.frame, cv2.COLOR_BGR2GRAY)
                gray = np.dstack([gray, gray, gray])

            except Exception as e:
                print(e)

            self.frame, self.faces, conf, detection, startX, y = self.cnn_op.CNNOperation(
                self.frame, self.net, self.args)
            try:
                msg = self.calculation(startX, y)
            except Exception as e:
                print(e)
            try:
                cv2.imshow("frame-this is main frame", self.frame)
            except Exception as e:
                print(e)
            fps.update()
            key = cv2.waitKey(1) & 0xFF
            if key == ord("q"):
                break
        fps.stop()
        print("elapsed time : {:.2f}".format(fps.elapsed()))
        print("FPS  : {:.2f}".format(fps.fps()))
    def face_verification(self, path):
        """
            Face verification from input file video Stream

            :param path: Location of the video
            :return  name: Recognized person's name
        """

        video_path = path
        result = True
        while (result):
            vs = FileVideoStream(video_path).start()
            frame = vs.read()

            frame = ndimage.rotate(frame, 180)
            frame = ndimage.rotate(frame, 90)
            cap_image = frame
            check = SPOOF_DETECTION().spoof_detection(video_path)
            result = False
            vs.stop()
        cv2.destroyAllWindows()

        if check == True:
            name = obj.face_recognize(cap_image)
            print('Recognized image {}'.format(name))
        else:
            print('SPOOFING ')
class VideoStreamer(Camera):
    def __init__(self, src=0, use_pi=-1, resolution=480, framerate=30):

        super(VideoStreamer, self).__init__(use_pi=use_pi,
                                            resolution=resolution,
                                            framerate=framerate)

        # VideoStream class is used for live stream.
        # FileVideoStream class is used for streaming from a saved video.
        if isinstance(src, int):
            self.vs = VideoStream(src=src,
                                  usePiCamera=self.use_pi > 0,
                                  resolution=self.resolution,
                                  framerate=self.framerate).start()
        else:
            self.vs = FileVideoStream(path=src)

        # Let camera warm up
        time.sleep(1.0)

    # Returns the frame as a np array
    def read(self):
        if isinstance(self.vs, FileVideoStream):
            return self.vs.stream.read()[1]
        else:
            return self.vs.read()

    # Terminate the capture thread.
    def stop(self):
        self.vs.stop()
def OfflineVideo():
    # a function to initialize an pre-recorded video for offfline analysis
    print "[INFO]          - All *.avi files in .." + chr(92) + "videos" + chr(
        92) + ":"
    for file in os.listdir("videos"):  # show the content of the videos folder
        if "avi" in file:  # only print avi files
            print file
    Selection = raw_input("select file to play (without extension): "
                          )  # ask user to type in the file to play
    if Selection == "":  # if nothing is entered
        #vs = cv2.VideoCapture("videos" + chr(92) + "LongVideo1_quer" + ".avi")                                      # take the default video
        camera = FileVideoStream("videos" + chr(92) + "LongVideo1_quer" +
                                 ".avi").start()
    else:
        if os.path.isfile("videos" + chr(92) + Selection +
                          ".avi"):  # check if the file is existing
            camera = FileVideoStream(
                "videos" + chr(92) + Selection + ".avi"
            ).start(
            )  # take the file defined by the user if it is existing in the video folder
            print "[INFO]          - Selected file: " + str(camera)
        else:
            print "[INFO]          - File not found!"  # if file was not found,
            OfflineVideo()  # restart function and re-ask for name

    return camera
Ejemplo n.º 8
0
class ImageRouter(QtCore.QObject):
    source_is_file = False
    on_frame = QtCore.pyqtSignal(np.ndarray)

    def __init__(self, filename=''):
        super().__init__()
        if filename == '':
            self.vs = VideoStream(src=0).start()
            self.source_is_file = False
        else:
            self.vs = FileVideoStream(filename).start()
            self.source_is_file = True

    def __del__(self):
        self.vs.stop()

    def tick(self):
        if self.source_is_file and not self.vs.more():
            return False
        frame = self.vs.read()
        if frame is None:
            return False
        self.on_frame.emit(frame)
        return True

    def run(self):
        print("[INFO] Started ImageRouter")
        while self.tick():
            None
        print("[INFO] Stopped ImageRouter")
Ejemplo n.º 9
0
    def start (self):
        if self.filename is not None:
            self.log.debug('Video file: %s', self.filename)
            self.fvs = FileVideoStream(self.filename).start()
            self.stream = self.fvs.stream
            time.sleep(1.0)
        else: 
            if self.use_pi_camera:
                self.log.debug('Pi Camera (%d %d)', self.resolution[0], self.resolution[1])
                self.stream = VideoStream(src=0,
                    usePiCamera=True,                    
                    resolution=self.resolution,
                    framerate=self.framerate,
                    sensor_mode=5
                    ).start()

            else:
                self.log.debug('Web Camera')
                self.stream = cv2.VideoCapture(0)
                self.stream.set(cv2.CAP_PROP_BUFFERSIZE, 2)
                self.resolution = (
                    self.stream.get(cv2.CAP_PROP_FRAME_WIDTH),
                    self.stream.get(cv2.CAP_PROP_FRAME_HEIGHT)
                )
                self.framerate = self.stream.get(cv2.CAP_PROP_FPS)

        return self.resolution, self.framerate
    def scan_file(self, fname):
        vs = FileVideoStream(fname).start()

        while True:

            frame = vs.read()
            # frame = imutils.resize(frame)

            barcodes = pyzbar.decode(frame)

            for code in barcodes:
                (x, y, w, h) = code.rect
                cv2.rectangle(frame, (x, y), (x + w, x + h), (0, 0, 255), 2)

                bar_data = code.data.decode('utf-8')
                bar_type = code.type

                text = f'{bar_data} ({bar_type})'
                cv2.putText(frame, text, (x, y - 10), cv2.FONT_HERSHEY_SIMPLEX,
                            0.5, (0, 0, 255), 2)

            cv2.imshow("Barcode Scanner", frame)
            key = cv2.waitKey(1) & 0xFF

            if key == ord('q'):
                break

        cv2.destroyAllWindows()
        vs.stop()
class VideoCamera(object):
    def __init__(self):
        # Using OpenCV to capture from device 0. If you have trouble capturing
        # from a webcam, comment the line below out and use a video file
        # instead.
        # self.video = cv2.VideoCapture(0)
        # If you decide to use video.mp4, you must have this file in the folder
        # as the main.py.
        self.video = FileVideoStream("cropvideo.mp4").start()

    def __del__(self):
        self.video.release()

    def get_frame(self):

        image = self.video.read()
        faceCascade = cv2.CascadeClassifier('haarcascade_profileface.xml')
        gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
        faces = faceCascade.detectMultiScale(gray, 1.1, 4)

        # We are using Motion JPEG, but OpenCV defaults to capture raw images,
        # so we must encode it into JPEG in order to correctly display the
        # video stream.

        for (x, y, w, h) in faces:
            cv2.rectangle(image, (x, y), (x + w, y + h), (0, 255, 0), 2)

        ret, jpeg = cv2.imencode('.jpg', image)
        return jpeg.tobytes()
Ejemplo n.º 12
0
    def __init__(self, kwargs):
        self.args = kwargs

        # initialize dlib's face detector (HOG-based) and then create
        # the facial landmark predictor
        print("[INFO] loading facial landmark predictor...")
        self.detector = dlib.get_frontal_face_detector()
        self.predictor = dlib.shape_predictor(SHAPE_PREDICTOR)

        # grab the indexes of the facial landmarks for the left and
        # right eye, respectively
        (self.l_start,
         self.l_end) = face_utils.FACIAL_LANDMARKS_IDXS["left_eye"]
        (self.r_start,
         self.r_end) = face_utils.FACIAL_LANDMARKS_IDXS["right_eye"]

        # start the video stream thread
        print("[INFO] starting video stream thread...")

        try:
            video_src = int(self.args["webcam"])
            print('[INFO] Using webcam...')
            self.vs = VideoStream(src=video_src).start()
        except:
            print('[INFO] Using video file...')
            self.vs = FileVideoStream(path=self.args["webcam"]).start()

        time.sleep(1.0)
Ejemplo n.º 13
0
 def __init__(self, src=0, use_cv2=False):
     if use_cv2:
         self.obj = cv2.VideoCapture(src)
     elif src == 0:
         self.obj = WebcamVideoStream(src)
     elif src != 0:
         self.obj = FileVideoStream(src)
Ejemplo n.º 14
0
def eye_close(image):
    path = "shape_predictor_68_face_landmarks.dat"
    predictor = dlib.shape_predictor(path)
    detector = dlib.get_frontal_face_detector()

    EYE_AR_THRESH = 0.4

    (lStart, lEnd) = face_utils.FACIAL_LANDMARKS_IDXS["left_eye"]
    (rStart, rEnd) = face_utils.FACIAL_LANDMARKS_IDXS["right_eye"]
    (mStart, mEnd) = face_utils.FACIAL_LANDMARKS_IDXS["mouth"]

    vs = FileVideoStream(image).start()
    time.sleep(2.0)
    frame = vs.read()
    frame = imutils.resize(frame, width=400)
    gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
    rects = detector(gray, 0)
    for rect in rects:
        shape = predictor(gray, rect)
        shape = face_utils.shape_to_np(shape)
        leftEye = shape[lStart:lEnd]
        rightEye = shape[rStart:rEnd]
        leftEAR = eye_aspect_ratio(leftEye)
        rightEAR = eye_aspect_ratio(rightEye)
    return leftEAR, rightEAR
Ejemplo n.º 15
0
    def Start(self, filename):
        '''
        Parameters:
        `filename` path of the video file
        '''

        self.shape_predictor = ".\\shape_predictor_68_face_landmarks.dat"
        self.detector = dlib.get_frontal_face_detector()
        self.predictor = dlib.shape_predictor(self.shape_predictor)

        self.vs = FileVideoStream(filename).start()
        fileStream = True
        time.sleep(1)

        print("Arranging video frames ...")
        while self.vs.more():
            frame = self.vs.read()
            # Transpose and flip statements performed everytime the orientation
            # of
            # the video passed is not
            # initially correct.
            #frame = cv2.transpose(frame)
            frame = cv2.flip(
                frame, flipCode=1
            )  # HACK: this tweak depends on the original video orientation.

            frame = imutils.resize(frame, width=350)
            self.frames.append(frame)

        print('Done!')

        self.InitPlot()
        self.ReadFrames()
Ejemplo n.º 16
0
	def init_stream(self, camera=0):
		if type(camera) == type(1):
			self.camera = camera;
			self.web_cam = True
			self.file = ''
			try: 
				self.vs = VideoStream1(self.camera)
				self.err_open_stream = False
			except:
				self.err_open_stream = True
				logger.info('can not open stream')
		elif type(camera) == type('string'):
			self.camera = -1;
			self.web_cam = False 		
			self.file = camera
			try:
				self.vs = FileVideoStream(self.file)
				self.err_open_stream = False
			except:
				self.err_open_stream = True
				logger.info('can not open file')

		#sleep(0.1)						# time to worm-up camera

		self.processed_frames = 0# number of processed frames in current time window	
 def __init__(self, src, ind):
     super(CustomMainThread, self).__init__()
     self.src = src  # the input camera/video source link
     self.fvs = FileVideoStream(self.src, queue_size=64).start()
     self.ind = ind
     self.inputQ = self.fvs.Q
     self.obj = AllBehaviours()
Ejemplo n.º 18
0
class VideoSource (object):
    def __init__ (self, video_file, log, use_pi_camera = True, resolution=(320, 200), framerate = 30, night = False):
        self.filename = video_file
        self.log = log
        self.use_pi_camera  = use_pi_camera
        self.resolution = resolution
        self.framerate = framerate
        self.night = night
        self.fvs = None
        self.stream = None
        self._done = False

    def start (self):
        if self.filename is not None:
            self.log.debug('Video file: %s', self.filename)
            self.fvs = FileVideoStream(self.filename).start()
            self.stream = self.fvs.stream
            time.sleep(1.0)
        else: 
            if self.use_pi_camera:
                self.log.debug('Pi Camera (%d %d)', self.resolution[0], self.resolution[1])
                self.stream = VideoStream(src=0,
                    usePiCamera=True,                    
                    resolution=self.resolution,
                    framerate=self.framerate,
                    sensor_mode=5
                    ).start()

            else:
                self.log.debug('Web Camera')
                self.stream = cv2.VideoCapture(0)
                self.stream.set(cv2.CAP_PROP_BUFFERSIZE, 2)
                self.resolution = (
                    self.stream.get(cv2.CAP_PROP_FRAME_WIDTH),
                    self.stream.get(cv2.CAP_PROP_FRAME_HEIGHT)
                )
                self.framerate = self.stream.get(cv2.CAP_PROP_FPS)

        return self.resolution, self.framerate

    def read(self):
        if self.filename:
            frame = self.fvs.read()
        else:
            frame = self.stream.read()
        return frame

    def stop (self):
        if self.filename:
            self.fvs.stop()
        else:
            self.stream.stop()
        self._done = True

    def done(self):
        # check to see if video is still running
        running = (self.filename and self.fvs.running()) or True
        self._done = not running
        return self._done
def main(_argv):
    input_layer = tf.keras.layers.Input([FLAGS.size, FLAGS.size, 3])
    feature_maps = YOLOv4(input_layer, NUM_CLASS)
    bbox_tensors = []
    for i, fm in enumerate(feature_maps):
        bbox_tensor = decode(fm, NUM_CLASS, i)
        bbox_tensors.append(bbox_tensor)

    model = tf.keras.Model(input_layer, bbox_tensors)
    utils.load_weights(model, 'data/YOLOv4-obj_1000.weights')

    vid = FileVideoStream(FLAGS.input)  # Reading input
    return_value, frame = vid.read()

    fourcc = cv2.VideoWriter_fourcc('F', 'M', 'P', '4')
    out = cv2.VideoWriter(FLAGS.output, fourcc, 10.0,
                          (frame.shape[1], frame.shape[0]), True)

    plates = []

    n = 0
    Sum = 0
    while True:
        start = time.time()
        n += 1
        return_value, frame = vid.read()
        if frame is None:
            continue

        frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
        bboxes = plateDetect(frame, FLAGS.size,
                             model)  # License plate detection
        for i in range(len(bboxes)):
            img = frame[int(bboxes[i][1]):int(bboxes[i][3]),
                        int(bboxes[i][0]):int(bboxes[i][2])]
            prediction_groups = pipeline.recognize(
                [img])  # Text detection and recognition on license plate
            string = ''
            for j in range(len(prediction_groups[0])):
                string = string + prediction_groups[0][j][0].upper()

            if platePattern(string) == True and string not in plates:
                plates.append(string)

        if len(plates) > 0:
            drawText(frame, plates)

        frame = utils.draw_bbox(
            frame, bboxes)  # Draws bounding box around license plate
        frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)

        Sum += time.time() - start
        print('Avg fps:- ', Sum / n)

        out.write(frame)
        cv2.imshow("result", frame)
        if cv2.waitKey(1) == 27: break
    out.release()
    cv2.destroyAllWindows()
    def standard_run(
        self,
        x1: int,
        x2: int,
        y1: int,
        y2: int,
        sub_ch: list,
        channel_len,
    ):

        fps = FPS().start()
        cap = FileVideoStream(self.filename).start()
        count = 0

        start = time.time()
        cycle_start = time.time()
        frame = cap.read()
        frame = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)
        while cap.more():
            #            print(count)
            frame = cap.read()
            if frame is None:
                break

            if count < 200:
                self.frames_buffer.append(frame)
            frame = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)
            frame = frame[y1:y2, x1:x2]
            crop = self.mask.apply(frame)
            crop = cv2.GaussianBlur(crop, (7, 7), 3.0)

            _, crop = cv2.threshold(crop, 150, 255,
                                    cv2.THRESH_BINARY + cv2.THRESH_OTSU)
            contours, hierarchy = cv2.findContours(crop, cv2.RETR_TREE,
                                                   cv2.CHAIN_APPROX_SIMPLE)

            for i in range(len(contours)):
                avg = np.mean(contours[i], axis=0)
                coord = (int(avg[0][0]), int(avg[0][1]))  # Coord is (x,y)
                ch_pos = int(math.floor((coord[0]) / channel_len))

                try:
                    self.sum_ch1[ch_pos] += float(1)
                except:
                    pass

            count += 1
            fps.update()
        fps.stop()
        cycle_end = time.time()
        self.cycle_count += 1
        end = time.time()
        detect_benchmark = end - start
        print("Number of frames processed: ", count)
        print("Time taken for WBC counting:", detect_benchmark)
        print("[INFO] Each cycle time taken = %0.5fs" %
              ((cycle_end - cycle_start) / count))

        return fps
Ejemplo n.º 21
0
def detect_object(frameCount):
    # grab global references to the video stream, output frame, and
    # lock variables
    global vs, outputFrame, lock

    # initialize the motion detector and the total number of frames
    # read thus far
    total = 0
    # loop over the frames from the video stream
    while True:
        # grab the frame from the threaded video stream and resize it
        # to have a maximum width of 400 pixels
        frame = vs.read()
        #       time.sleep(0.05)

        if frame is None:
            print(
                "INFO: unable to connect to a webcam, using file stream instead"
            )
            vs = FileVideoStream("static/test_video.mp4").start()
            frame = vs.read()
            continue
        frame = imutils.resize(frame, width=400)

        # grab the frame dimensions and convert it to a blob
        (h, w) = frame.shape[:2]
        blob = cv2.dnn.blobFromImage(cv2.resize(frame, (300, 300)), 0.007843,
                                     (300, 300), 127.5)

        # pass the blob through the network and obtain the detections and predictions
        net.setInput(blob)
        detections = net.forward()

        # loop over the detections
        for i in np.arange(0, detections.shape[2]):
            # extract the confidence (i.e., probability) associated with the prediction
            confidence = detections[0, 0, i, 2]

            # filter out weak detections by ensuring the `confidence` is
            # greater than the minimum confidence
            if confidence > args["confidence"]:
                # extract the index of the class label from the
                # `detections`, then compute the (x, y)-coordinates of
                # the bounding box for the object
                idx = int(detections[0, 0, i, 1])
                box = detections[0, 0, i, 3:7] * np.array([w, h, w, h])
                (startX, startY, endX, endY) = box.astype("int")

                # draw the prediction on the frame
                label = "{}: {:.2f}%".format(CLASSES[idx], confidence * 100)
                cv2.rectangle(frame, (startX, startY), (endX, endY),
                              COLORS[idx], 2)
                y = startY - 15 if startY - 15 > 15 else startY + 15
                cv2.putText(frame, label, (startX, y),
                            cv2.FONT_HERSHEY_SIMPLEX, 0.5, COLORS[idx], 2)

        # acquire the lock, set the output frame, and release the lock
        with lock:
            outputFrame = frame.copy()
Ejemplo n.º 22
0
 def __init__(self, filename=''):
     super().__init__()
     if filename == '':
         self.vs = VideoStream(src=0).start()
         self.source_is_file = False
     else:
         self.vs = FileVideoStream(filename).start()
         self.source_is_file = True
 def __init__(self):
     # Using OpenCV to capture from device 0. If you have trouble capturing
     # from a webcam, comment the line below out and use a video file
     # instead.
     # self.video = cv2.VideoCapture(0)
     # If you decide to use video.mp4, you must have this file in the folder
     # as the main.py.
     self.video = FileVideoStream("cropvideo.mp4").start()
Ejemplo n.º 24
0
    def frames():
        camera = FileVideoStream()
        time.sleep(1.0)

        # loop over frames from the video file stream
        while camera.more():
            frame = camera.read()
            yield frame
Ejemplo n.º 25
0
def showVideo(app):
    fvs = FileVideoStream(app.vidDir).start()
    time.sleep(1.0)
    while fvs.more():
        frame = fvs.read()
        frame = cv2.resize(frame, (720, 480))
        cv2.imshow("Frame", frame)
        cv2.waitKey(1)
Ejemplo n.º 26
0
    def __init__(self):
        #self.cap = cv2.VideoCapture(1)

        self.fvs = FileVideoStream(1)

        # self.fvs.stream.set(3,FULL_WIDTH)
        # self.fvs.stream.set(4,FULL_HEIGHT)

        self.fvs.start()
Ejemplo n.º 27
0
def detect_motion(frameCount):
    # grab global references to the video stream, output frame, and
    # lock variables
    global vs, outputFrame, lock

    # initialize the motion detector and the total number of frames
    # read thus far
    md = SingleMotionDetector(accumWeight=0.1)
    total = 0

    # loop over frames from the video stream
    while True:
        # read the next frame from the video stream, resize it,
        # convert the frame to grayscale, and blur it
        frame = vs.read()
        time.sleep(0.05)
        if frame is None:
            print(
                "INFO: unable to connect to a webcam, using file stream instead"
            )
            vs = FileVideoStream("video/test_video.mp4").start()
            frame = vs.read()
            continue
        frame = imutils.resize(frame, width=400)
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        gray = cv2.GaussianBlur(gray, (7, 7), 0)

        # grab the current timestamp and draw it on the frame
        timestamp = datetime.datetime.now()
        cv2.putText(frame, timestamp.strftime("%A %d %B %Y %I:%M:%S%p"),
                    (10, frame.shape[0] - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.35,
                    (0, 0, 255), 1)

        # if the total number of frames has reached a sufficient
        # number to construct a reasonable background model, then
        # continue to process the frame
        if total > frameCount:
            # detect motion in the image
            motion = md.detect(gray)

            # cehck to see if motion was found in the frame
            if motion is not None:
                # unpack the tuple and draw the box surrounding the
                # "motion area" on the output frame
                (thresh, (minX, minY, maxX, maxY)) = motion
                cv2.rectangle(frame, (minX, minY), (maxX, maxY), (0, 0, 255),
                              2)

        # update the background model and increment the total number
        # of frames read thus far
        md.update(gray)
        total += 1

        # acquire the lock, set the output frame, and release the
        # lock
        with lock:
            outputFrame = frame.copy()
def write_svg_haar(stream_url):
    '''
    Reads an alternative face detection model, and connects to the in-memory 
    Redis database. Detects faces (no identification) in the specified stream 
    and calculates the corresponding bounding boxes. Writes the bounding boxes 
    for all detected faces to an svg overlay which is then saved to
    Redis to be accessed by other processes. 
    '''
    print("[INFO] opening redis connection")
    redis_db = redis.StrictRedis(host="localhost", port=6379, db=0)
    print("[INFO] starting stream")
    #    capture = cv2.VideoCapture(stream_url)
    #    capture.set(cv2.CAP_PROP_BUFFERSIZE, 0)
    fvs = FileVideoStream(stream_url, queue_size=1).start()
    process_this_frame = True
    while True:
        # if process_this_frame:
        if True:
            # read_flag, img = capture.read()
            if not fvs.more():
                continue
            img = fvs.read()

            face_cascade = cv2.CascadeClassifier(
                "./haarcascade_frontalface_default.xml")
            gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
            faces = face_cascade.detectMultiScale(gray, minSize=(20, 20))
            svg_document = svgwrite.Drawing(size=("1280px", "720px"))
            svg_document.add(
                svg_document.rect(insert=(0, 0),
                                  size=("1280px", "720px"),
                                  stroke_width="10",
                                  stroke="green",
                                  fill="rgb(0,0,0)",
                                  fill_opacity=0))

            for (x, y, w, h) in faces:
                x = int(x)
                y = int(y)
                svg_document.add(
                    svg_document.rect(insert=(x, y),
                                      size=("{}px".format(w),
                                            "{}px".format(h)),
                                      stroke_width="10",
                                      stroke="yellow",
                                      fill="rgb(0,0,0)",
                                      fill_opacity=0))
            redis_db.set('overlay', svg_document.tostring())

        process_this_frame = not process_this_frame

        key = cv2.waitKey(1) & 0xFF

        # if the `q` key was pressed, break from the loop
        if key == ord("q"):
            break
Ejemplo n.º 29
0
def piscadas():
    vs = FileVideoStream(0).start()
    fileStream = True

    EYE_AR_THRESH = 0.3
    EYE_AR_CONSEC_FRAMES = 3

    COUNTER = 0
    TOTAL = 0

    detector = dlib.get_frontal_face_detector()
    predictor = dlib.shape_predictor('shape_predictor_68_face_landmarks.dat')

    (lStart, lEnd) = face_utils.FACIAL_LANDMARKS_IDXS["left_eye"]
    (rStart, rEnd) = face_utils.FACIAL_LANDMARKS_IDXS["right_eye"]

    while True:

        if fileStream and not vs.more():
            break

        frame = vs.read()
        frame = imutils.resize(frame, width=450)
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        rects = detector(gray, 0)
        for rect in rects:

            shape = predictor(gray, rect)
            shape = face_utils.shape_to_np(shape)

            leftEye = shape[lStart:lEnd]
            rightEye = shape[rStart:rEnd]
            leftEAR = get_ear(leftEye)
            rightEAR = get_ear(rightEye)

            ear = (leftEAR + rightEAR) / 2.0

            leftEyeHull = cv2.convexHull(leftEye)
            rightEyeHull = cv2.convexHull(rightEye)
            cv2.drawContours(frame, [leftEyeHull], -1, (0, 255, 0), 1)
            cv2.drawContours(frame, [rightEyeHull], -1, (0, 255, 0), 1)

            if ear < EYE_AR_THRESH:
                COUNTER += 1

            else:
                if COUNTER >= EYE_AR_CONSEC_FRAMES:
                    TOTAL += 1

                COUNTER = 0

            cv2.putText(frame, "Piscadas: {}".format(TOTAL), (10, 30),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)

        cv2.imshow("Frame", frame)
        key = cv2.waitKey(1) & 0xFF
Ejemplo n.º 30
0
    def load_video(self):
        """
        Accelerated video stream with multi-threading support

        :return:
        """
        if self.verbose:
            logging.info(msg=f"{self.video_path} is loading ..")
        if self.if_exist():
            self.video = FileVideoStream(self.video_path)
import sys
import cv2
import time
from imutils.video import FileVideoStream

videoFile = sys.argv[1]
outputDir = sys.argv[2]

fvs = FileVideoStream(videoFile).start()
time.sleep(1.0)

frameIndex = 0

while (fvs.more()):
	frame = fvs.read()

	cv2.imshow("Frame", frame)
	fileName = outputDir + "\\frame_" + str(frameIndex) + ".tiff"
	cv2.imwrite(fileName, frame)

	frameIndex += 1

	cv2.waitKey(1)

cv2.destroyAllWindows()
fvs.stop()
def filterFrame(frame):
	frame = imutils.resize(frame, width=450)
	frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
	frame = np.dstack([frame, frame, frame])
	return frame

# construct the argument parse and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-v", "--video", required=True,
	help="path to input video file")
args = vars(ap.parse_args())

# start the file video stream thread and allow the buffer to
# start to fill
print("[INFO] starting video file thread...")
fvs = FileVideoStream(args["video"], transform=filterFrame).start()
time.sleep(1.0)

# start the FPS timer
fps = FPS().start()

# loop over frames from the video file stream
while fvs.running():
	# grab the frame from the threaded video file stream, resize
	# it, and convert it to grayscale (while still retaining 3
	# channels)
	frame = fvs.read()

	# Relocated filtering into producer thread with transform=filterFrame
	#  Python 2.7: FPS 92.11 -> 131.36
	#  Python 3.7: FPS 41.44 -> 50.11