def run_visualization(
    cap: cv2.VideoCapture,
    out: cv2.VideoWriter,
    detection_pipeline: pipeline.DetectionPipeline,
) -> List[Dict[ID, detect.ObjectDetection]]:
    ret, frame = True, None
    frame_count = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
    i = 1
    pbar = tqdm(total=frame_count)
    objects = dict()
    while cap.grab():
        pbar.update(1)
        ret, frame = cap.retrieve(frame)
        # run detections on passed in frames
        detections = detection_pipeline(frame)

        for det in detections:
            objects[det.id] = det.obj_class

        visualization.draw_all_detections(img=frame,
                                          detections=detections,
                                          color=[255, 0, 0],
                                          font_face=cv2.FONT_HERSHEY_PLAIN,
                                          font_scale=5.0,
                                          thickness=3)

        #print('objects = ' + str(objects))
        out.write(frame)

    return objects.values()
示例#2
0
文件: video.py 项目: wozimer/applypy
class Writer:
    def __init__(self, path, codec, bitrate, dimension):
        self._path = path
        self._codec = codec
        self._bitrate = bitrate
        self._dimension = dimension

    def __enter__(self):
        self._writer = VideoWriter(self._path,
                                   VideoWriter_fourcc(*self._codec),
                                   self._bitrate, self._dimension)
        return self

    def __exit__(self, exc_type, exc_val, exc_tb):
        self._writer.release()

    def write(self, frames):

        for frame in frames:
            frame = uint8(frame)
            self._writer.write(frame)
示例#3
0
def make_video(images,
               outimg=None,
               fps=5,
               size=None,
               is_color=True,
               format="XVID"):
    """
	Create a video from a list of images.

	@param      outvid      output video
	@param      images      list of images to use in the video
	@param      fps         frame per second
	@param      size        size of each frame
	@param      is_color    color
	@param      format      see http://www.fourcc.org/codecs.php
	@return                 see http://opencv-python-tutroals.readthedocs.org/en/latest/py_tutorials/py_gui/py_video_display/py_video_display.html

	The function relies on http://opencv-python-tutroals.readthedocs.org/en/latest/.
	By default, the video will have the size of the first image.
	It will resize every image to this size before adding them to the video.
	"""
    from cv2.cv2 import VideoWriter, VideoWriter_fourcc, imread, resize
    fourcc = VideoWriter_fourcc(*format)
    vid = None
    for image in images:
        if not os.path.exists(image):
            raise FileNotFoundError(image)
        img = imread(image)
        if vid is None:
            if size is None:
                size = img.shape[1], img.shape[0]
            vid = VideoWriter(outvid, fourcc, float(fps), size, is_color)
        if size[0] != img.shape[1] and size[1] != img.shape[0]:
            img = resize(img, size)
        vid.write(img)
    vid.release()
    return vid
示例#4
0
    # loop over frames from the video file stream
    while True:
        # grab the frame from the threaded video stream and resize it
        # to 500px (to speedup processing)
        frame = vs.read()
        frame = process_frame(frame, detector, data)

        if record:
            if out is None:
                (h, w) = frame.shape[:2]
                out = VideoWriter("outpy.avi",
                                  cv2.VideoWriter_fourcc('M', 'J', 'P', 'G'),
                                  frame_rate, (w, h))

            out.write(frame)

        # display the image to our screen
        cv2.imshow("Frame", frame)

        key = cv2.waitKey(1) & 0xFF

        # if the `q` key was pressed, break from the loop
        if key == ord("q"):
            break

        # update the FPS counter
        fps.update()

    # stop the timer and display FPS information
    fps.stop()
示例#5
0
class Player:
    writer: VideoWriter
    tracker: Tracker

    def __init__(self, input_filename: str, output_filename: str, codec: str,
                 window_name: str) -> None:

        # OPTIONS
        self.input_filename = input_filename
        self.output_filename = output_filename
        self.window_name = window_name

        self.state = PlayerState()
        self.hud = HUD(self.state)

        self.writer = None
        if self.output_filename:
            codec = VideoWriter_fourcc(*(codec or 'XVID'))
            self.writer = VideoWriter(output_filename, codec, 30, (1280, 720))

        self.capture = VideoCapture(input_filename)
        if not self.capture.isOpened():
            raise Exception("The capture could not be opened")

        ok, self.frame = self.capture.read()
        if not ok:
            raise Exception("The capture could not be read")

        self.state.target_fps = self.state.original_fps = self.capture.get(
            CAP_PROP_FPS)

        imshow(self.window_name, self.frame)

        self.selector = Selector(self.window_name)
        self.selector.on_selecting = self.on_selecting
        self.selector.on_selected = self.on_selected

        self.label_uppercase = False

        self.meter = TickMeter()

    def update(self):
        if not self.state.paused:
            ok, self.frame = self.capture.read()
            if not ok:
                return self._close()
            for tracking_object in self.state.tracking_objects:
                tracking_object.update(self.frame)
            if self.writer:
                self.writer.write(self.hud.render_output(self.frame))

        imshow(self.window_name, self.hud.render(self.frame))

        self.meter.stop()
        self.state.fps = 1 / (self.meter.getTimeSec() or 1)
        wait_ms = max(
            1, 1000.0 / self.state.target_fps - self.meter.getTimeMilli())
        self.meter.reset()
        self.meter.start()

        key = waitKey(int(wait_ms)) & 0xff
        if key == 255:
            pass
        elif len(
                self.state.tracking_objects
        ) > 0 and self.state.tracking_objects[-1].label_typing_in_progress:
            print(key)
            if key == 27:  # ESC
                self.state.tracking_objects[-1].label = ""
                self.state.tracking_objects[
                    -1].label_typing_in_progress = False
            elif key == 8:
                if len(self.state.tracking_objects[-1].label) > 0:
                    self.state.tracking_objects[
                        -1].label = self.state.tracking_objects[-1].label[:-1]
            elif key == 13:  # ENTER
                self.state.tracking_objects[
                    -1].label_typing_in_progress = False
            elif key == 229:  # CAPS LOCK
                self.label_uppercase = not self.label_uppercase
            elif key in range(32, 127):
                self.state.tracking_objects[-1].label += chr(
                    key) if not self.label_uppercase else chr(key).upper()
        elif key == 27:  # ESC
            return self._close()
        elif key == 32:  # SPACE
            self.state.paused = not self.state.paused
        elif key == 81:  # LARROW
            self.state.target_fps = self.state.target_fps / 2
        elif key == 83:  # RARROW
            self.state.target_fps = self.state.target_fps * 2

    def on_selecting(self, rect):
        self.state.selection = rect

    def on_selected(self, rect):
        if rect[2] != 0 or rect[3] != 0:
            self.state.tracking_objects += [TrackingObject(self.frame, rect)]
        self.state.selection = None

    def _close(self):
        self.capture.release()
        if self.writer:
            self.writer.release()
        return -1