예제 #1
0
def run_video():
    PATH_VIDEO = 'D:/Binus/data/videos/VID_20190915_123023.mp4'
    PATH_OUT_VIDEO = 'D:/Binus/data/videos/VID_20190915_123023_2.avi'
    fo = FaceOrchestrator()
    fs = FileVideoStream(PATH_VIDEO).start()
    time.sleep(1.0)
    frame = fs.read()
    h, w, __ = frame.shape
    out = cv2.VideoWriter(PATH_OUT_VIDEO,
                          cv2.VideoWriter_fourcc('M', 'J', 'P', 'G'), 24,
                          (w, h))
    i = 0
    while fs.more():
        i = i + 1
        print('Processing frame ' + str(i))
        try:
            frame = fs.read()
            fo.set_image_to_be_processed(frame)
            # detect faces
            fo.fd_method = 'AI'
            fo.fd_ai_min_score = 0.6
            fo.detect_faces()
            fo.align_faces(target_face_percent=0.28)
            # reconize faces
            # fo.fr_max_delta = 0.6
            # fo.fr_method = 'DELTA'
            fo.fr_min_probability = 0.5
            fo.fr_method = 'ML'
            frame_with_label, names = fo.recognize_faces()
            out.write(frame_with_label)
        except:
            pass
    fs.stop()
    out.release()
    print('Finish.')
예제 #2
0
def run_detection(fast_mtcnn, filenames):
    frames = []
    frames_processed = 0
    faces_detected = 0
    batch_size = 60
    start = time.time()

    for filename in tqdm(filenames):

        v_cap = FileVideoStream(filename).start()
        v_len = int(v_cap.stream.get(cv2.CAP_PROP_FRAME_COUNT))

        for j in range(v_len):

            frame = v_cap.read()
            frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
            frames.append(frame)

            if len(frames) >= batch_size or j == v_len - 1:

                faces = fast_mtcnn(frames)
                for i in range(len(faces)):
                    file_name = str(j) + str(i) + '.jpg'
                # cv2.imwrite(file_name,faces[i])
                #print(faces)
                frames_processed += len(frames)
                faces_detected += len(faces)
                frames = []

                print(
                    f'Frames per second: {frames_processed / (time.time() - start):.3f},',
                    f'faces detected: {faces_detected}\r',
                    end='')

        v_cap.stop()
class VideoStreamer(Camera):
    def __init__(self, src=0, use_pi=-1, resolution=480, framerate=30):

        super(VideoStreamer, self).__init__(use_pi=use_pi,
                                            resolution=resolution,
                                            framerate=framerate)

        # VideoStream class is used for live stream.
        # FileVideoStream class is used for streaming from a saved video.
        if isinstance(src, int):
            self.vs = VideoStream(src=src,
                                  usePiCamera=self.use_pi > 0,
                                  resolution=self.resolution,
                                  framerate=self.framerate).start()
        else:
            self.vs = FileVideoStream(path=src)

        # Let camera warm up
        time.sleep(1.0)

    # Returns the frame as a np array
    def read(self):
        if isinstance(self.vs, FileVideoStream):
            return self.vs.stream.read()[1]
        else:
            return self.vs.read()

    # Terminate the capture thread.
    def stop(self):
        self.vs.stop()
예제 #4
0
def livevideocorrection(filepath, modelname):
    fvs = FileVideoStream(filepath, queue_size=128).start()
    desired_frames = 250
    frame_number = 0
    last_time = time.monotonic()
    while fvs.more() and frame_number <= desired_frames:
        frame = fvs.read()
        if frame_number % 10 == 0:
            current_time = time.monotonic()
            timedif = current_time - last_time
            FPS = 10 / timedif
            print('FPS:' + str(FPS))
            last_time = time.monotonic()
        frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
        prediction = (predict_one(frame,
                                  modelname).squeeze().detach().float().cpu())
        prediction = prediction.permute(1, 2, 0).numpy()
        prediction = cv2.cvtColor(prediction, cv2.COLOR_BGR2RGB)

        cv2.putText(prediction, "Queue Size: {}".format(fvs.Q.qsize()),
                    (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 255, 0), 2)
        cv2.putText(prediction, ('FPS: ' + str(FPS)), (750, 50),
                    cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 2)
        cv2.imshow('preview', prediction)
        frame_number += 1
        cv2.waitKey(1)

    cv2.destroyAllWindows()
    fvs.stop()
def main():
    # Construct the argument parse and parse the arguments
    ap = argparse.ArgumentParser()
    ap.add_argument("-m", "--model", required=True,
                    help="path to model file")
    ap.add_argument("-c", "--confidence", type=float, default=0.95,
                    help="minimum probability to filter weak detections")
    args = vars(ap.parse_args())

    # Initialize our centroid tracker and frame dimensions
    ct = CentroidTracker()

    # Load our serialized model from disk
    print("[INFO] Loading model...")
    model = get_model(args["model"])

    vs = FileVideoStream("/media/ave/ae722e82-1476-4374-acd8-a8a18ef8ae7a/Rana_vids/Nova_1/121/03-2018.05.25_06.24.23-17.mpg").start()
    time.sleep(2.0)

    # Loop over the frames from the video stream
    while vs.more():
        # Read the next frame from the video stream
        frame = vs.read()

        analyze_frame(frame, ct, model, args["confidence"])

    # Cleanup
    cv2.destroyAllWindows()
    vs.stop()
예제 #6
0
    def read_frames(self):
        # start the file video stream thread and allow the buffer to
        # start to fill
        print("[INFO] starting video file thread...")
        args = self.argument_parser()
        output_path = self.cwd / Path(args.output_dir)
        output_path.mkdir(parents=True, exist_ok=True)
        fvs = FileVideoStream(args.video).start()
        time.sleep(1.0)

        # start the FPS timer
        fps = FPS().start()
        detector = mtcnn.MTCNN()
        fno = 0
        frames = VideoMeta(args.video).fps()
        #print(meta)

        # loop over frames from the video file stream
        while fvs.more():
            fno += 1
            print(fno)
            # grab the frame from the threaded video file stream, resize
            # it, and convert it to grayscale (while still retaining 3
            # channels)
            frame = fvs.read()
            if (fno % frames != 0):
                continue
            try:
                frame = imutils.resize(frame, width=450)
            except:
                break
            frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
            # frame = np.dstack([frame, frame, frame])

            if args.detect == "haarcascade":
                results = self.face_cascade.detectMultiScale(frame, 1.3, 5)
                if len(results) != 0:

                    self.crop(frame, results[0],
                              str(output_path / Path(str(fno) + ".jpg")))
            else:
                results = detector.detect_faces(frame)

            # display the size of the queue on the frame
            # cv2.putText(frame, "Queue Size: {}".format(fvs.Q.qsize()),
            #            (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 255, 0), 2)

            # show the frame and update the FPS counter
            cv2.imshow("Frame", frame)
            cv2.waitKey(1)
            fps.update()

        # stop the timer and display FPS information
        fps.stop()
        print("[INFO] elasped time: {:.2f}".format(fps.elapsed()))
        print("[INFO] approx. FPS: {:.2f}".format(fps.fps()))

        # do a bit of cleanup
        cv2.destroyAllWindows()
        fvs.stop()
    def scan_file(self, fname):
        vs = FileVideoStream(fname).start()

        while True:

            frame = vs.read()
            # frame = imutils.resize(frame)

            barcodes = pyzbar.decode(frame)

            for code in barcodes:
                (x, y, w, h) = code.rect
                cv2.rectangle(frame, (x, y), (x + w, x + h), (0, 0, 255), 2)

                bar_data = code.data.decode('utf-8')
                bar_type = code.type

                text = f'{bar_data} ({bar_type})'
                cv2.putText(frame, text, (x, y - 10), cv2.FONT_HERSHEY_SIMPLEX,
                            0.5, (0, 0, 255), 2)

            cv2.imshow("Barcode Scanner", frame)
            key = cv2.waitKey(1) & 0xFF

            if key == ord('q'):
                break

        cv2.destroyAllWindows()
        vs.stop()
예제 #8
0
def main(video_path):
    video_path = 'sample_video/car_stopping_shorter_version.mp4'
    fvs = FileVideoStream(video_path).start()
    time.sleep(1.0)
    fps = FPS().start()

    # loop over frames from the video file stream
    input_frames_list = []
    number = 0
    while fvs.more():
        number += 1
        frame = fvs.read()
        if (frame is None):
            continue

        cv2.putText(frame, "Queue Size: {}".format(fvs.Q.qsize()), (10, 30),
                    cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 255, 0), 2)

        ann_frame = stream_frames_to_model(frame, number)
        cv2.imshow('frame', ann_frame)
        cv2.waitKey(1)
        fps.update()

    # stop the timer and display FPS information
    fps.stop()
    print("[INFO] elasped time: {:.2f}".format(fps.elapsed()))
    print("[INFO] approx. FPS: {:.2f}".format(fps.fps()))
    # do a bit of cleanup
    cv2.destroyAllWindows()
    fvs.stop()
def livevideocorrection(filepath, modelname):
    fvs = FileVideoStream(filepath, queue_size=128).start()
    # ivs = InferenceDataStream(fvs, queue_size=25).start() # --> inference video stream reads from file video stream and forms queue of prepped images for inference
    desired_frames = 250
    frame_number = 0
    last_time = time.monotonic()
    while fvs.more() and frame_number <= desired_frames:
        frame = fvs.read()  # --> frame = ivs.read()
        if frame_number % 10 == 0:
            current_time = time.monotonic()
            timedif = current_time - last_time
            FPS = 10 / timedif
            print('FPS:' + str(FPS))
            last_time = time.monotonic()
        frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
        prediction = (
            predict_one(frame, frame_number).squeeze().detach().cpu()
        )  # --> prediction = predict_one(frame, frame_number) #why? because logic for preparing images is now done asyncronously in ivs.
        prediction = prediction.permute(1, 2, 0).numpy()
        prediction = cv2.cvtColor(prediction, cv2.COLOR_BGR2RGB)

        cv2.putText(prediction, "Queue Size: {}".format(fvs.Q.qsize()),
                    (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 255, 0), 2)
        cv2.putText(prediction, ('FPS: ' + str(FPS)), (750, 50),
                    cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 2)
        cv2.imshow('preview', prediction)
        frame_number += 1
        cv2.waitKey(1)

    cv2.destroyAllWindows()
    fvs.stop()
예제 #10
0
def extract(input_path, output_folder, rate):
    STR_FRAME_PATH = "{}\\frame{}.jpg"
    video_args = {}
    video_args["video"] = input_path

    fvs = FileVideoStream(video_args["video"], queue_size=300).start()
    time.sleep(1.0)

    framecount = 0
    fps = FPS().start()

    while fvs.running():
        frame = fvs.read()
        img_raw = None

        framecount = framecount + 1

        if framecount % int(rate) == 0:
            print("Extrating frame {}".format(framecount), end="\r")
            cv2.imwrite(STR_FRAME_PATH.format(output_folder, str(framecount)),
                        frame)
        else:
            continue

        fps.update()

    fps.stop()
    fvs.stop()

    print("[INFO] elasped time: {:.2f}".format(fps.elapsed()))
    print("[INFO] approx. FPS: {:.2f}".format(fps.fps()))
def read_feed():
    #cap = VideoStream(usePiCamera=True, resolution=(VIDEO_WIDTH, VIDEO_HEIGHT))
    cap = FileVideoStream('sample/fish3.mp4').start()

    time.sleep(2.0)

    frame = cap.read()
    global VIDEO_HEIGHT, VIDEO_WIDTH
    VIDEO_HEIGHT, VIDEO_WIDTH = frame.shape[:2]

    try:
        while True:
            frame = cap.read()
            jpeg_frame = cv2.imencode('.jpg', frame,
                                      [int(cv2.IMWRITE_JPEG_QUALITY), 80])[1]

            if VIDEO_IS_RECORDING:
                VIDEO_WRITER_QUEUE.put(jpeg_frame)

            #time.sleep(0.1)
            yield (b'--frame\r\n'
                   b'Content-Type: image/jpeg\r\n\r\n' +
                   jpeg_frame.tostring() + b'\r\n')
    finally:
        cap.stop()
예제 #12
0
 def compute_attributes(self):
     input_stream = cv.VideoCapture(self.file_name)
     self.sample_rate = input_stream.get(cv.CAP_PROP_FPS)
     input_stream.release()
     # Use multi-threading by Adrian of https://pyimagesearch.com
     input_stream = FileVideoStream(self.file_name).start()
     directograms = []
     directogram_times = []
     frame = input_stream.read()
     last_frame = cv.cvtColor(frame, cv.COLOR_BGR2GRAY)
     self.shape = frame.shape
     while input_stream.more():
         if len(directograms) % 250 == 0:
             print('Video Processing: {:.2f}s'.format(
                 len(directograms) / self.sample_rate))
         frame = input_stream.read()
         if not input_stream.more():
             break
         next_frame = cv.cvtColor(frame, cv.COLOR_BGR2GRAY)
         # These parameters were taken from a demo in the documentation
         flow = cv.calcOpticalFlowFarneback(last_frame, next_frame, None,
                                            0.5, 3, 15, 3, 5, 1.2, 0)
         directograms.append(get_directogram(flow))
         directogram_times.append(len(directograms) / self.sample_rate)
         last_frame = next_frame
     self.envelope_times = np.array(directogram_times)
     self.envelopes = get_impact_envelopes(directograms,
                                           self.envelope_times)
     self.envelope_sample_rate = self.sample_rate
     input_stream.stop()
예제 #13
0
def main():
    vid = (Path(__file__).parent / 'test.mp4').absolute()
    stream = FileVideoStream(str(vid), queue_size=100).start()
    time.sleep(1)

    while stream.more():
        img_data = stream.read()
        if img_data is None:
            break
        img = Image(img_data)
        transformed = Transformer(img)
        image = transformed.cannify()
        cropped_img = region(image)
        lines = cv2.HoughLinesP(cropped_img.raw,
                                2,
                                np.pi / 180,
                                100,
                                np.array([]),
                                minLineLength=40,
                                maxLineGap=5)
        avg_lines = utils.average_slope_intercept(img, lines)
        lines_image = img.display_lines(avg_lines)
        combined = img.combine(lines_image)
        cv2.imshow('', combined.raw)
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

    cv2.destroyAllWindows()
    stream.stop()
    def face_verification(self, path):
        """
            Face verification from input file video Stream

            :param path: Location of the video
            :return  name: Recognized person's name
        """

        video_path = path
        result = True
        while (result):
            vs = FileVideoStream(video_path).start()
            frame = vs.read()

            frame = ndimage.rotate(frame, 180)
            frame = ndimage.rotate(frame, 90)
            cap_image = frame
            check = SPOOF_DETECTION().spoof_detection(video_path)
            result = False
            vs.stop()
        cv2.destroyAllWindows()

        if check == True:
            name = obj.face_recognize(cap_image)
            print('Recognized image {}'.format(name))
        else:
            print('SPOOFING ')
예제 #15
0
class ImageRouter(QtCore.QObject):
    source_is_file = False
    on_frame = QtCore.pyqtSignal(np.ndarray)

    def __init__(self, filename=''):
        super().__init__()
        if filename == '':
            self.vs = VideoStream(src=0).start()
            self.source_is_file = False
        else:
            self.vs = FileVideoStream(filename).start()
            self.source_is_file = True

    def __del__(self):
        self.vs.stop()

    def tick(self):
        if self.source_is_file and not self.vs.more():
            return False
        frame = self.vs.read()
        if frame is None:
            return False
        self.on_frame.emit(frame)
        return True

    def run(self):
        print("[INFO] Started ImageRouter")
        while self.tick():
            None
        print("[INFO] Stopped ImageRouter")
예제 #16
0
def main(input_video_path, output_video_path, batch_size, async_workers_count):
    frame_id = 0
    video_path = 'sample_video/car_stopping_shorter_version.mp4'
    fvs = FileVideoStream(input_video_path).start()
    time.sleep(1.0)
    fps = FPS().start()
    in_holder_path, op_holder_path = return_io_frames_holder()
    print(in_holder_path, op_holder_path)
    # loop over frames from the video file stream
    input_frames_list = []
    while fvs.more():
        frame_id += 1
        frame = fvs.read()
        if (frame is None):
            continue

        cv2.putText(frame, "Queue Size: {}".format(fvs.Q.qsize()), (10, 30),
                    cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 255, 0), 2)
        cv2.imwrite(f"{in_holder_path}/{frame_id}.jpeg", frame)

    # stop the timer and display FPS information
    fps.stop()
    print("[INFO] elasped time: {:.2f}".format(fps.elapsed()))
    print("[INFO] approx. FPS: {:.2f}".format(fps.fps()))
    # do a bit of cleanup
    cv2.destroyAllWindows()
    fvs.stop()
    colonel_batch(in_holder_path, op_holder_path, batch_size,
                  async_workers_count)
    #op_holder_path = "/home/gr8-adakron/Adakron_bay/work/FastAPI/fastapi_exp/video_processing/outputs/bCe7mEE/"
    write_detected_video(output_video_path, op_holder_path)
def main():
    # Get ROI from frames
    file_name = "/home/smart/WBC286 InvL-Pillars -35mbar 15fps 29-11-2019 v3.4.avi"
    #file_name = "C:/Users/Me/Desktop/capstone/WBC286 InvL-Pillars -350mbar 150fps 29-11-2019 v3.4.avi"
    cap = FileVideoStream(file_name).start()
    image = cap.read()
    print("***** PROCESSING ROI for RUN 1 ***** File: %s" % file_name)
    cap.stop()
    print("***** PROCESSING RUN 1 ***** File: %s" % file_name)
    print("Frame size: ", image.shape)
    Channels = 34
    #original size: [502,1402,3]
    # r = [84, 357, 1238, 130]
    r = [
        int(0.167 * image.shape[0]),
        int(0.25 * image.shape[1]),
        int(0.883 * image.shape[1]), 130
    ]
    x1, x2, y1, y2, sub_ch, channel_len = to_crop(image, r, Channels)

    # run count
    run_standard = standard(file_name, Channels)
    (fps) = run_standard.standard_run(x1, x2, y1, y2, sub_ch, channel_len)
    print("[INFO] elasped time: {:.2f}".format(fps.elapsed()))
    print("[INFO] approx. FPS: {:.2f}".format(fps.fps()))

    rbc_counts = run_standard.rbc_detection()
    run_standard.process_results(rbc_counts)

    print(
        "----------------------------------------------------------------------"
    )
예제 #18
0
class VideoSource (object):
    def __init__ (self, video_file, log, use_pi_camera = True, resolution=(320, 200), framerate = 30, night = False):
        self.filename = video_file
        self.log = log
        self.use_pi_camera  = use_pi_camera
        self.resolution = resolution
        self.framerate = framerate
        self.night = night
        self.fvs = None
        self.stream = None
        self._done = False

    def start (self):
        if self.filename is not None:
            self.log.debug('Video file: %s', self.filename)
            self.fvs = FileVideoStream(self.filename).start()
            self.stream = self.fvs.stream
            time.sleep(1.0)
        else: 
            if self.use_pi_camera:
                self.log.debug('Pi Camera (%d %d)', self.resolution[0], self.resolution[1])
                self.stream = VideoStream(src=0,
                    usePiCamera=True,                    
                    resolution=self.resolution,
                    framerate=self.framerate,
                    sensor_mode=5
                    ).start()

            else:
                self.log.debug('Web Camera')
                self.stream = cv2.VideoCapture(0)
                self.stream.set(cv2.CAP_PROP_BUFFERSIZE, 2)
                self.resolution = (
                    self.stream.get(cv2.CAP_PROP_FRAME_WIDTH),
                    self.stream.get(cv2.CAP_PROP_FRAME_HEIGHT)
                )
                self.framerate = self.stream.get(cv2.CAP_PROP_FPS)

        return self.resolution, self.framerate

    def read(self):
        if self.filename:
            frame = self.fvs.read()
        else:
            frame = self.stream.read()
        return frame

    def stop (self):
        if self.filename:
            self.fvs.stop()
        else:
            self.stream.stop()
        self._done = True

    def done(self):
        # check to see if video is still running
        running = (self.filename and self.fvs.running()) or True
        self._done = not running
        return self._done
예제 #19
0
    def read_frames(self):
        # start the file video stream thread and allow the buffer to
        # start to fill
        print("[INFO] starting video file thread...")
        args = self.argument_parser()
        output_path = self.cwd / Path(args.output_dir)
        output_path.mkdir(parents=True, exist_ok=True)
        fvs = FileVideoStream(args.video).start()
        time.sleep(1.0)

        # start the FPS timer
        fps = FPS().start()
        detector = mtcnn.MTCNN()
        fno = 0
        #frames = VideoMeta(args.video).fps()
        #print(meta)
        e = TfPoseEstimator(get_graph_path("mobilenet_thin"),
                            target_size=(432, 368))
        # loop over frames from the video file stream
        while fvs.more():
            fno += 1
            print(fno)
            # grab the frame from the threaded video file stream, resize
            # it, and convert it to grayscale (while still retaining 3
            # channels)
            frame = fvs.read()
            #if (fno % frames != 0):
            #   continue
            try:
                frame = imutils.resize(frame, width=432, height=368)
            except:
                break
            #frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
            #frame = np.dstack([frame, frame, frame])

            humans = e.inference(frame)
            image = TfPoseEstimator.draw_humans(frame, humans, imgcopy=False)
            print("humans:", humans)

            # display the size of the queue on the frame
            # cv2.putText(frame, "Queue Size: {}".format(fvs.Q.qsize()),
            #            (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 255, 0), 2)

            # show the frame and update the FPS counter
            cv2.imshow("Frame", image)
            cv2.waitKey(1)
            fps.update()

        # stop the timer and display FPS information
        fps.stop()
        print("[INFO] elasped time: {:.2f}".format(fps.elapsed()))
        print("[INFO] approx. FPS: {:.2f}".format(fps.fps()))

        # do a bit of cleanup
        cv2.destroyAllWindows()
        fvs.stop()
예제 #20
0
    def run(self, filepath):
        detector = dlib.get_frontal_face_detector()
        # predictor = dlib.shape_predictor(os.path.join(sys.path[0], "data/")) #ignore for now

        # Start the frame queue
        vs = FileVideoStream(filepath)

        if (not vs.stream.isOpened()):
            return 0, 0

        self.maxframes = int(vs.stream.get(cv2.CAP_PROP_FRAME_COUNT))

        vs.start()
        best_frame = 0
        scoremax = 0

        self.framecounter = 0
        # loop over the frames from the video stream
        while (self.framecounter < self.maxframes):

            # grab the frame from the threaded video stream, resize it to
            # have a maximum width of 400 pixels, and convert it to
            # grayscale
            frame = vs.read()
            frame = imutils.resize(frame, width=400)
            gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
            # detect faces in the grayscale frame
            rects, scores, idx = detector.run(gray, 0, 0)
            # loop over the face detections
            self.framecounter = self.framecounter + 1

            for i, rect in enumerate(rects):
                # compute the bounding box of the face
                (bX, bY, bW, bH) = face_utils.rect_to_bb(rect)
                # keep track of the highest scored face
                if (scores[i] > scoremax):
                    scoremax = scores[i]
                    best_face = frame[bY:bY + bH, bX:bX + bW]
                    best_frame = self.framecounter
                    #cv2.imshow("BEST_FACE", best_face)

            # if the q key was pressed, break from the loop
            key = cv2.waitKey(1) & 0xFF
            if key == ord("q"):
                break

        # Stop the frame queue
        vs.stop()

        if (best_frame == 0):
            #If no face was found
            best_face = 0
        else:
            # Swap the R,B values
            best_face = cv2.cvtColor(best_face, cv2.COLOR_BGR2RGB)
        return best_face, best_frame
예제 #21
0
def detect_blinks(videoFilePath):
    frameCount = 0
    blinkCount = 0

    faceDetector = dlib.get_frontal_face_detector()
    landmarksPredictor = dlib.shape_predictor(shapePredictorPath)

    leftEyeIdx = face_utils.FACIAL_LANDMARKS_IDXS['left_eye']
    rightEyeIdx = face_utils.FACIAL_LANDMARKS_IDXS['right_eye']

    if FILE_VIDEO_STREAM:
        vs = FileVideoStream(videoFilePath).start()
    else:
        vs = VideoStream().start()
    time.sleep(0.5)

    target_face_save_path = os.path.join(CURRENT_FOLDER_PATH,
                                         str(time.time()) + '.jpg')

    try:
        while True:
            if FILE_VIDEO_STREAM and not vs.more():
                break
            frame = vs.read()
            frame = rotate(frame, 90)
            frame = imutils.resize(frame, width=500)
            gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
            target_face = gray
            faces = faceDetector(gray, 0)

            for (i, face) in enumerate(faces):
                (x, y, w, h) = face_utils.rect_to_bb(face)

                landmarks = landmarksPredictor(gray, face)
                landmarks = face_utils.shape_to_np(landmarks)

                leftEye = landmarks[leftEyeIdx[0]:leftEyeIdx[1]]
                rightEye = landmarks[rightEyeIdx[0]:rightEyeIdx[1]]

                leftEAR = calculateEAR(leftEye)
                rightEAR = calculateEAR(rightEye)
                ear = (leftEAR + rightEAR) / 2.0
                if ear < EAR_THRESHOLD:
                    frameCount += 1
                else:
                    target_face = gray
                    if frameCount >= EAR_CONSEC_FRAMES:
                        blinkCount += 1
                    frameCount = 0
        cv2.imwrite(target_face_save_path, target_face)
    except:
        blinkCount = 0
    vs.stop()

    return blinkCount, target_face_save_path
예제 #22
0
파일: fryer.py 프로젝트: paramkpr/DankBot
def fry_gif(update, url, number_of_cycles, args):
    number_of_emojis = 1.5 if args['high-fat'] else 1 if args['low-fat'] else 0
    bulge_probability = 0.3 if args['heavy'] else 0.15 if args['light'] else 0
    magnitude = 4 if args['deep'] else 1 if args['shallow'] else 2

    name = update.message.from_user.first_name
    filename = '%s_%s_%s' % (update.message.chat_id, name,
                             update.message.message_id)
    filepath = bin_path + '/temp/' + filename
    caption = __get_caption(name, number_of_cycles, args)
    output = bin_path + '/temp/out_' + filename + '.mp4'

    gifbio = BytesIO()
    gifbio.name = filename + '.gif'
    fs = [__posterize, __sharpen, __increase_contrast, __colorize]
    shuffle(fs)

    if not __download_gif(url, filepath):
        return

    fvs = FileVideoStream(filepath + '.mp4').start()
    frame = fvs.read()
    height, width, _ = frame.shape

    try:
        fps = fvs.get(CAP_PROP_FPS)
    except:
        fps = 30
    out = VideoWriter(output, VideoWriter_fourcc(*'mp4v'), fps,
                      (width, height))
    out.write(
        fry_frame(frame, number_of_cycles, fs, number_of_emojis,
                  bulge_probability, magnitude, args))

    while fvs.more() or fvs.more():
        try:
            temp = fry_frame(fvs.read(), number_of_cycles, fs,
                             number_of_emojis, bulge_probability, magnitude,
                             args)
        except Exception as e:
            break
        out.write(temp)

    fvs.stop()
    fvs.stream.release()
    out.release()
    update.message.reply_animation(open(output, 'rb'),
                                   caption=caption,
                                   quote=True)
    try:
        __upload_to_imgur(output, caption)
    except (Exception, BaseException) as e:
        print(e)
    remove(filepath + '.mp4')
예제 #23
0
def process_video(analyzed_videos, reference_digits, time_parsable, ts_box, vdir, video):
    print("[*] Processing video {} from {}".format(video, vdir.directory))
    if video in analyzed_videos:
        print("[*] Video has been processed. Checking if processing is complete...")
        last_processed_frame = get_last_processed_frame(video)
        print("[*] Last processed frame for this video is: ", last_processed_frame)
    else:
        last_processed_frame = None

    vs = FileVideoStream(os.path.join(vdir.directory, video)).start()

    # Reset frame number
    f_num = 0

    # Allow the buffer some time to fill
    time.sleep(2.0)

    while vs.more():
        frame = vs.read()

        f_num += 1
        if (last_processed_frame is not None) and (f_num <= last_processed_frame):
            print("[*] Current frame is {}. Waiting for {}...".format(f_num, last_processed_frame + 1))
            # Give video buffer time to fill so we don't overtake it
            time.sleep(0.01)
            continue
        else:
            try:
                # Process the timestamp area in the video
                frame_time, ts_box = compute_frame_time(frame, reference_digits, time_parsable, ts_box)
            except AttributeError:
                if frame is None:
                    print("[!] Frame was none.")
                    break

                print("Something went wrong. Trying again...")
                continue

            if frame_time is not None:
                time_parsable = True
            else:
                print("[!] Failed to process time, probably because the frame is distorted.")

            # Add the frame information to the logging database
            add_frame(directory=vdir,
                      video=video,
                      time=frame_time,
                      frame_number=f_num)

    print("{} is done being processed. Adding to database of processed videos...".format(video))
    add_processed_video(video=video, total_frames=f_num)
    vs.stop()
예제 #24
0
def video_extract_features(vid_path, frame_number, end_frame):
    # FASTER video reading (decode in separate thread)
    fvs = FileVideoStream(vid_path, start_frame=frame_number).start()
    time.sleep(1.0)

    current_frame = frame_number - 1
    frameCount = end_frame - frame_number + 1

    roi = [0.25, 0.55, 0.35, 0.65]
    frame_width = int(fvs.get_width())
    frame_height = int(fvs.get_height())
    result_width = round(roi[3] * frame_width) - round(roi[2] * frame_width)
    result_height = round(roi[1] * frame_height) - round(roi[0] * frame_height)
    buf = np.empty((frameCount, result_height, result_width), np.dtype('uint8'))
    buf_canny = np.empty((frameCount, result_height, result_width), np.dtype('uint8'))
    hist_buf = np.empty((frameCount, 16))
    print_iter = 0

    while fvs.more():

        print_iter += 1
        # Capture frame-by-frame
        frame = fvs.read()
        current_frame += 1

        frame_roi = get_ROI(frame, roi)

        harris_result = get_harris_feature(frame_roi)

        hist_result = extract_frame_histogram(frame_roi)

        # canny_result = get_canny_feature(frame_roi)

        buf[current_frame - frame_number] = harris_result
        hist_buf[current_frame - frame_number] = hist_result
        # buf_canny[current_frame - frame_number] = canny_result

        # cv2.imshow('Frame', harris_result)
        if divmod(print_iter, 60)[1] == 0:
            print(f'Progress: {100*(current_frame-frame_number)/(end_frame-frame_number)}%')


        # Press Q on keyboard to  exit
        if cv2.waitKey(10) & 0xFF == ord('q'):
            print(f'current_frame: {current_frame}')
            break
        if current_frame == end_frame:
            fvs.stop()
            break
    #save numpy matrix of feature frames
    np.save('library_match_1_segment_harris.npy', buf)
    np.save('library_match_1_segment_histogram.npy', hist_buf)
def run_video(path, net):
    '''Runs the uploaded video through the juggle counter algorithm
	'''

    confidence = 0.45
    (W, H) = (200, 200)
    direction = 1
    bounces = 0
    prev_cY = 0
    frame_num = 0
    prev_frame_num = -7
    first_detection = True
    print("Processing video...")
    vs = FileVideoStream(path).start()

    while vs.more():
        frames_per_read = 2
        for i in range(frames_per_read):
            frame = vs.read()
            frame_num += 1
        try:
            frame = cv2.resize(frame, (W, H))
        except cv2.error as e:
            print("...end of video")
            break

        detection = process_frame(frame, net, W, H)
        if detection[0, 0, 0, 2] > confidence:
            (cX, cY, bounces, direction, prev_cY, prev_frame_num,
             diameter_temp) = check_detection(frame_num, prev_frame_num, frame,
                                              detection, W, H, bounces,
                                              direction, prev_cY)
            if first_detection:
                diameter = diameter_temp
                first_detection = False
            cv2.putText(frame, str(bounces), (int(W * 0.86), int(W * 0.2)),
                        cv2.FONT_HERSHEY_SIMPLEX, int(W * 0.007), (0, 255, 0),
                        2)
            if 0.9 * diameter < diameter_temp < 1.1 * diameter:
                cv2.circle(frame, (cX, cY), int(W * 0.03), (0, 255, 0), -1)

        cv2.imshow("Frame", frame)
        key = cv2.waitKey(1) & 0xFF

        # Only for debuggin:
        # if the `q` key was pressed, break from the loop
        if key == ord("q"):
            break
    cv2.destroyAllWindows()
    vs.stop()

    return bounces
예제 #26
0
def main():
    filepath = "Raw Video Output 10x Inv-L.avi"
    #Load frames into memory
    get_frames = cuda_video_stream.video_queue(filepath, 0, 0, 934,
                                               239).start()
    stream = FileVideoStream(filepath, queue_size=500).start()

    # Benchmark tests
    with Timer("OpenCV"):
        output1 = cv2_bgsub(stream)
    stream.stop()
    with Timer("OpenCV with CUDA"):
        output2 = cuda_bgsub(get_frames)
    stream = FileVideoStream(filepath).start()
    with Timer("Numba test"):
        output = numba_test(stream)
def realtime(src, modelweightspath):

    if (type(src) == str):
        cap = FileVideoStream(src).start()
    elif (type(src) == int):
        cap = WebcamVideoStream(src).start()  # for fast fps read for real-time

    fps = FPS().start()
    #cap = cv2.VideoCapture(0)                               # for cv2 read
    h = 512
    w = 384
    seg_model = load_bonnet(3, h, w)
    seg_model.load_weights(modelweightspath)
    check = True

    while check:
        start = time.time()
        #ret,frame = cap.read()                             # for cv2 read
        frame = cap.read()  # for fast fps read webcamvideostream
        #print(frame.shape)                                 # frame is of: BGR type
        frame = cv2.resize(frame, (w, h), interpolation=cv2.INTER_AREA)
        cv2.imshow('Input', frame)
        print(frame.shape)
        IP = []
        ip = load_input(frame, h, w)
        IP.append(ip)
        IP = np.array(IP)
        pred = seg_model.predict(IP)
        prediction = pred.argmax(axis=-1)
        prediction = to_categorical(prediction, 3)
        prediction = np.reshape(prediction, (h, w, 3))
        prediction = prediction[:, :, [2, 1, 0]]
        cv2.imshow('Output', prediction)  # 2=> R 0=> B  frame => BGR
        end = time.time()
        fps.update()
        print("FrameRate:" + str(1 / (end - start)))
        c = cv2.waitKey(1)
        if c == 27:
            break
        if (type(src) == str
            ):  # Check if next frame exists in case of video file
            check = cap.more()
    fps.stop()
    print("AVg FrameRate:" + str(fps.fps()))
    cap.stop()  # for fast fps read
    #cap.release()                                          # for cv2 read
    cv2.destroyAllWindows()
def run_detection(fast_mtcnn, filename):
    frames = []
    frames_processed = 0
    faces_detected = 0
    batch_size = 60
    start = time.time()
    v_cap = FileVideoStream(filename).start()
    v_len = int(v_cap.stream.get(cv2.CAP_PROP_FRAME_COUNT))
    for j in range(v_len):
        frame = v_cap.read()
        frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
        frames.append(frame)
        if len(frames) >= batch_size or j == v_len - 1:
            faces, boxes = fast_mtcnn(frames)
            frames_processed += len(frames)
            faces_detected += len(faces)
            frames = []
            fps = frames_processed / (time.time() - start)
            print(
                f'                          Frames per second: {fps:.3f},',
                f'faces detected: {faces_detected}\r',
                end=''
            )
            print("========================")
            print(boxes[0]) 
            print(faces_detected)
            frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR )
            for boxNo in range(faces_detected):
                xStart = int(boxes[0][boxNo, 0])
                yStart = int(boxes[0][boxNo, 1])
                xEnd = int(boxes[0][boxNo, 2])
                yEnd = int(boxes[0][boxNo, 3])            
                # [[148.05846  93.21905 217.33783 175.8501 ]]       
                print( xStart, yStart, xEnd, yEnd )
                cv2.rectangle(frame,
                    (xStart, yStart),
                    (xEnd, yEnd),
                    (255, 255, 255),
                    thickness=1)
    cv2.rectangle(frame,(0,0),(550,30),(0, 0, 0),-1)
    label = "Number of faces detected : {:} Frames per second : {:.3f}".format(faces_detected, fps)
    cv2.putText(frame, label, (20,20),cv2.FONT_HERSHEY_SIMPLEX,0.5,(255, 255, 255),1)
    cv2.imshow("Result",frame)
    cv2.waitKey(0)
    v_cap.stop()
    cv2.destroyAllWindows()
예제 #29
0
def vplay():
    stream =  askopenfilename()
    ld = AdvancedLaneDetectorWithMemory(opts, ipts, src_pts, dst_pts, 10, 50, 25)
    # import the necessary packages

    print("[INFO] starting video file thread...")
    fvs = FileVideoStream(stream).start()
    time.sleep(1.0)

    # start the FPS timer
    fps = FPS().start()

    # loop over frames from the video file stream
    while fvs.more():
        # grab the frame from the threaded video file stream, resize
        # it, and convert it to grayscale (while still retaining 3
        # channels)
        frame = fvs.read()
        
        frame = imutils.resize(frame,width = 450)
        frame = ld.process_image(frame)
        frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        
        #frame = np.dstack([frame])
        frame = np.dstack([frame, frame, frame])
        # display the size of the queue on the frame
       
        # show the frame and update the FPS counter
        
        frame = imutils.resize(frame,width = 900)
        
        
        
        cv2.imshow("Frame", frame)
        cv2.waitKey(1)
        fps.update()

    # stop the timer and display FPS information
    fps.stop()
    print("[INFO] elasped time: {:.2f}".format(fps.elapsed()))
    print("[INFO] approx. FPS: {:.2f}".format(fps.fps()))

    # do a bit of cleanup
    cv2.destroyAllWindows()
    fvs.stop()
예제 #30
0
def num_blinks(video):

    vs = FileVideoStream(video).start()
    fileStream = True
    COUNTER = 0
    TOTAL = 0
    time.sleep(1.0)
    best_frame = None
    max_ear = -1
    while True:

        if fileStream and not vs.more():
            break
        frame = vs.read()
        frame = imutils.resize(frame, width=450)
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

        rects = detector(gray, 0)

        for rect in rects:

            shape = predictor(gray, rect)
            shape = face_utils.shape_to_np(shape)

            leftEye = shape[lStart:lEnd]
            rightEye = shape[rStart:rEnd]
            leftEAR = eye_aspect_ratio(leftEye)
            rightEAR = eye_aspect_ratio(rightEye)

            ear = (leftEAR + rightEAR) / 2.0
            print ear
            if ear > max_ear:
                best_frame = gray
                max_ear = ear

            if ear < EYE_AR_THRESH:
                COUNTER += 1
            else:

                if COUNTER >= EYE_AR_CONSEC_FRAMES:
                    TOTAL += 1

                COUNTER = 0
    vs.stop()
    return TOTAL, best_frame
	# grab the frame from the threaded video file stream, resize
	# it, and convert it to grayscale (while still retaining 3
	# channels)
	frame = fvs.read()

	# Relocated filtering into producer thread with transform=filterFrame
	#  Python 2.7: FPS 92.11 -> 131.36
	#  Python 3.7: FPS 41.44 -> 50.11
	#frame = filterFrame(frame)

	# display the size of the queue on the frame
	cv2.putText(frame, "Queue Size: {}".format(fvs.Q.qsize()),
		(10, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 255, 0), 2)	

	# show the frame and update the FPS counter
	cv2.imshow("Frame", frame)

	cv2.waitKey(1)
	if fvs.Q.qsize() < 2:  # If we are low on frames, give time to producer
		time.sleep(0.001)  # Ensures producer runs now, so 2 is sufficient
	fps.update()

# stop the timer and display FPS information
fps.stop()
print("[INFO] elasped time: {:.2f}".format(fps.elapsed()))
print("[INFO] approx. FPS: {:.2f}".format(fps.fps()))

# do a bit of cleanup
cv2.destroyAllWindows()
fvs.stop()