Exemplo n.º 1
0
def main():
    with CvVideoCamera(source=1, api_name='V4L2', is_color=False) as inp, \
        CvVideoDisplay() as out:

        inp.set_property('PROP_BUFFERSIZE', 1)
        print(f"camera api: {inp.camera_api}")
        print(f"camera bufsize: {inp.get_property('PROP_BUFFERSIZE')}")
        out.open()

        # give autoexposure a chance to adjust
        for i in range(10):
            frame = inp.read()
            out.write(frame)

        # capture individual frames
        for i in range(5):
            input("Press ENTER to capture frame: ")
            # dump first frame (hardware double-buffering)
            inp.read()
            # use second frame
            frame = inp.read()
            out.write(frame)

    with CvVideoCamera(source=1, api_name='V4L2', is_color=False) as inp, \
        CvVideoDisplay() as out1, \
        CvVideoOutputFile("demo.mp4", is_color=False) as out2:

        inp.set_property('PROP_BUFFERSIZE',
                         1)  # this will limit frame rate for sequences
        print(f"camera api: {inp.camera_api}")
        print(f"camera bufsize: {inp.get_property('PROP_BUFFERSIZE')}")
        out1.open()
        out2.open()
        start = time.time()
        for i in range(300):
            frame = inp.read()
            out1.write(frame)
            out2.write(frame)
        print("time elapsed = {}".format(time.time() - start))

        time.sleep(5)

    with CvVideoInputFile("demo.mp4", is_color=False) as inp, \
        CvVideoDisplay() as out:

        inp.open()
        out.open()
        for frame in inp:
            print(frame.shape)
            out.write(frame)

        time.sleep(5)
def main():
    with CameraStreamProcessor(
            camera=CvVideoCamera(is_color=False),
            pipeline=[
                CvBlobDetector(
                    min_threshold=31.23,
                    max_threshold=207.05,
                    filter_by_color=True,
                    blob_color=255,
                    filter_by_area=True,
                    min_area=17.05,
                    max_area=135.46,
                    filter_by_circularity=True,
                    min_circularity=0.62,
                    filter_by_inertia=True,
                    min_inertia_ratio=0.27,
                    filter_by_convexity=True,
                    min_convexity=0.60,
                ),
                # CvContourBlobDetector(),
                NearestNeighbourTracker(threshold=20),
                KeypointEncoder(),
            ],
            view=KeypointView(color=(0, 255, 0)),
            display=CvVideoDisplay(name='preview'),
            writer=CvImageOutputFileSeq(),
    ) as p:
        keypoints = p.process(num_frames=5, outfile="demo1.jpg")
        #        keypoints = p.process(num_frames=300)
        print(f"keypoints.shape = {keypoints.shape}")
        print(f"keypoints[0] = {keypoints[0]}")

    with FileStreamProcessor(
            reader=CvImageInputFileSeq(),
            pipeline=[
                CvBlobDetector(
                    min_threshold=31.23,
                    max_threshold=207.05,
                    filter_by_color=True,
                    blob_color=255,
                    filter_by_area=True,
                    min_area=17.05,
                    max_area=135.46,
                    filter_by_circularity=True,
                    min_circularity=0.62,
                    filter_by_inertia=True,
                    min_inertia_ratio=0.27,
                    filter_by_convexity=True,
                    min_convexity=0.60,
                ),
                # CvContourBlobDetector(),
                NearestNeighbourTracker(threshold=20),
                KeypointEncoder(),
            ],
            view=KeypointView(color=(0, 255, 0)),
            display=CvVideoDisplay(name='preview'),
    ) as p:
        keypoints = p.process(num_frames=5, infile="demo1.jpg")
        print(f"keypoints.shape = {keypoints.shape}")
Exemplo n.º 3
0
def main():
    with CvVideoCamera(is_color=False) as inp, CvVideoDisplay() as out:
        out.open()
        frames = []
        for i in range(300):
            frame = inp.read()
            out.write(frame)
            frames.append(frame)
        frames = np.array(frames)

    idx = np.random.choice(frames.shape[0], 10, replace=False)
    frames = frames[idx]

    params = optimize_blob_detector_params(
        frames,
        target_blobs=127,
        min_threshold_range=(0, 300),
        max_threshold_range=(0, 300),
        min_area_range=(0, 200),
        max_area_range=(0, 200),
        min_circularity_range=(0.1, 0.9),
        min_inertia_ratio_range=(0.1, 0.9),
        min_convexity_range=(0.1, 0.9),
    )
    print(params)
    det = CvBlobDetector(**params)

    with CvVideoCamera(is_color=False) as inp, CvVideoDisplay() as out:
        for i in range(300):
            frame = inp.read()
            keypoints = det.detect(frame)
            pts = np.array([f.point for f in keypoints])
            sizes = np.array([f.size for f in keypoints])
            print("pts.shape = {}".format(pts.shape))
            print("sizes.shape = {}".format(sizes.shape))

            kpts = [
                cv2.KeyPoint(kp.point[0], kp.point[1], kp.size)
                for kp in keypoints
            ]
            frame_with_kpts = cv2.drawKeypoints(
                frame, kpts, np.array([]), (0, 0, 255),
                cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
            out.write(frame_with_kpts)
Exemplo n.º 4
0
def main():
    with CameraStreamProcessor(
            camera=CvVideoCamera(is_color=False),
            pipeline=[
                CvBlobDetector(
                    min_threshold=31.23,
                    max_threshold=207.05,
                    filter_by_color=True,
                    blob_color=255,
                    filter_by_area=True,
                    min_area=17.05,
                    max_area=135.46,
                    filter_by_circularity=True,
                    min_circularity=0.62,
                    filter_by_inertia=True,
                    min_inertia_ratio=0.27,
                    filter_by_convexity=True,
                    min_convexity=0.60,
                ),
                # CvContourBlobDetector(),
                NearestNeighbourTracker(threshold=20),
                KeypointEncoder(),
            ],
            view=KeypointView(color=(0, 255, 0)),
            display=CvVideoDisplay(name='preview'),
            writer=CvVideoOutputFile(is_color=False),
    ) as p:
        # capture sequence of keypoints - ensure sensor has sufficient time to return to
        # its rest position before end of sequence
        keypoints = p.process(num_frames=300, outfile="demo1.mp4")
        #        keypoints = p.process(num_frames=300)
        print(f"keypoints.shape = {keypoints.shape}")

        # check that final keypoint ordering is the same as initial ordering
        init_keypoints, final_keypoints = keypoints[0], keypoints[-1]
        dists = ssd.cdist(final_keypoints, init_keypoints, 'euclidean')
        min_dist_idxs = np.argmin(dists, axis=1)
        print("Test passed: {}".format(np.all(min_dist_idxs == range(len(min_dist_idxs)))))
Exemplo n.º 5
0
def main():
    with CvVideoCamera(is_color=False) as inp, \
        CvVideoDisplay() as out1, \
        CvImageOutputFileSeq("demo.jpg", start_frame=1) as out2:

        out1.open()
        out2.open()
        for i in range(5):
            frame = inp.read()
            out1.write(frame)
            out2.write(frame)

        time.sleep(5)

    with CvImageInputFileSeq("demo.jpg") as inp, \
        CvVideoDisplay() as out:

        inp.open()
        out.open()
        for frame in inp:
            print(frame.shape)
            out.write(frame)

        time.sleep(5)
Exemplo n.º 6
0
def main():
    with CvVideoCamera(is_color=False) as inp, \
        CvVideoDisplay() as out1, \
        CvVideoOutputFile("demo.mp4", is_color=False) as out2:

        out1.open()
        out2.open()
        for i in range(300):
            frame = inp.read()
            out1.write(frame)
            out2.write(frame)

        time.sleep(5)

    with CvVideoInputFile("demo.mp4", is_color=False) as inp, \
        CvVideoDisplay() as out:

        inp.open()
        out.open()
        for frame in inp:
            print(frame.shape)
            out.write(frame)

        time.sleep(5)
Exemplo n.º 7
0
def main():       
    with AsyncProcessor(CameraStreamProcessorMT(
        camera=CvVideoCamera(is_color=False),
        pipeline=[
            CvBlobDetector(
                min_threshold=31.23,
                max_threshold=207.05,
                filter_by_color=True,
                blob_color=255,
                filter_by_area=True,
                min_area=17.05,
                max_area=135.46,
                filter_by_circularity=True,
                min_circularity=0.62,
                filter_by_inertia=True,
                min_inertia_ratio=0.27,
                filter_by_convexity=True,
                min_convexity=0.60,
            ),
            # CvContourBlobDetector(),
            NearestNeighbourTracker(threshold=20),
            KeypointEncoder(),
        ],
        view=KeypointView(color=(0,255,0)),
        display=CvVideoDisplay(name='preview'),
        writer=CvVideoOutputFile(is_color=False),
        )) as p:        
        
        p.async_process(outfile='demo1.mp4')
        print("Getting on with something else ...")
        time.sleep(5)
        p.async_cancel()
        frames = p.async_result()
        print("frames.shape = {}".format(frames.shape))

    with AsyncProcessor(FileStreamProcessorMT(
            reader=CvVideoInputFile(is_color=False),
            pipeline=[
                CvBlobDetector(
                    min_threshold=31.23,
                    max_threshold=207.05,
                    filter_by_color=True,
                    blob_color=255,
                    filter_by_area=True,
                    min_area=17.05,
                    max_area=135.46,
                    filter_by_circularity=True,
                    min_circularity=0.62,
                    filter_by_inertia=True,
                    min_inertia_ratio=0.27,
                    filter_by_convexity=True,
                    min_convexity=0.60,
                ),
                # CvContourBlobDetector(),
                NearestNeighbourTracker(threshold=20),
                KeypointEncoder(),
            ],
            view=KeypointView(color=(0, 255, 0)),
            display=CvVideoDisplay(name='replay'),
    )) as p:
        p.async_process(num_frames=100, infile="demo1.mp4")
        print("Getting on with something else ...")
        time.sleep(5)
#        p.async_cancel()
        keypoints = p.async_result()
        print(f"keypoints.shape = {keypoints.shape}")
Exemplo n.º 8
0
def main():

    with CvVideoCamera(is_color=False) as camera,              \
        CvVideoDisplay("display1") as display_1,            \
        CvVideoDisplay("display2") as display_2,            \
        CvVideoOutputFile("demo1.mp4", is_color=False) as video_file_1,     \
        CvVideoOutputFile("demo2.mp4", is_color=False) as video_file_2:

        # Build pipeline
        camera_read_ops = DataflowQueueMT(maxsize=1)
        display_ops = DataflowQueueMT(maxsize=1)
        camera_frames_1 = DataflowQueueMT(maxsize=10)
        camera_frames_2 = DataflowQueueMT(maxsize=10)
        camera_frames_3 = DataflowQueueMT(maxsize=10)
        camera_frames_4 = DataflowQueueMT(maxsize=10)
        camera_frames_5 = DataflowQueueMT()

        queues = [
            camera_read_ops, display_ops, camera_frames_1, camera_frames_2,
            camera_frames_3, camera_frames_4, camera_frames_5
        ]

        video_camera = DataflowFunctionMT(func=camera.read,
                                          in_queues=[camera_read_ops],
                                          out_queues=[
                                              camera_frames_1, camera_frames_2,
                                              camera_frames_3, camera_frames_4,
                                              camera_frames_5
                                          ])

        video_display_1 = DataflowFunctionMT(func=display_1.write,
                                             pre_func=display_1.open,
                                             post_func=display_1.close,
                                             in_queues=[camera_frames_1],
                                             out_queues=[])

        video_display_2 = DataflowFunctionMT(
            func=display_2.write,
            pre_func=display_2.open,
            post_func=display_2.close,
            in_queues=[display_ops, camera_frames_2],
            out_queues=[])

        video_writer_1 = DataflowFunctionMT(func=video_file_1.write,
                                            pre_func=video_file_1.open,
                                            post_func=video_file_1.close,
                                            in_queues=[camera_frames_3],
                                            out_queues=[])

        video_writer_2 = DataflowFunctionMT(func=video_file_2.write,
                                            pre_func=video_file_2.open,
                                            post_func=video_file_2.close,
                                            in_queues=[camera_frames_4],
                                            out_queues=[])

        # Pipeline sorted in topological order
        threads = [
            video_camera, video_display_1, video_display_2, video_writer_1,
            video_writer_2
        ]

        # Start pipeline
        for t in threads:
            t.start()

        # Issue commands
        for i in range(300):
            camera_read_ops.put(None)
            display_ops.put(None)

        # Flush and close pipeline
        for t in threads:
            for q in t.in_queues:
                q.close()
            t.join()

        # Pick up results
        print("Frames captured = {}".format(camera_frames_5.qsize()))
        frame = camera_frames_5.get()
        if isinstance(frame, np.ndarray):
            print("Frame shape = {}".format(frame.shape))
        else:
            print("Frame is not a Numpy array")