예제 #1
0
    def write_video_frame(self, input_frame):
        if not self.configured:
            self.video_stream.height = input_frame.height
            self.video_stream.width = input_frame.width
            self.configured = True
            self.start_time = input_frame.timestamp
            if input_frame.yuv_buffer:
                self.frame = av.VideoFrame(input_frame.width, input_frame.height,'yuv422p')
            else:
                self.frame = av.VideoFrame(input_frame.width,input_frame.height,'bgr24')
            if self.use_timestamps:
                self.frame.time_base = self.time_base
            else:
                self.frame.time_base = Fraction(1,self.fps)

        if input_frame.yuv_buffer:
            y,u,v = input_frame.yuv422
            self.frame.planes[0].update(y)
            self.frame.planes[1].update(u)
            self.frame.planes[2].update(v)
        else:
            self.frame.planes[0].update(input_frame.img)

        if self.use_timestamps:
            self.frame.pts = int( (input_frame.timestamp-self.start_time)/self.time_base )
        else:
            # our timebase is 1/30  so a frame idx is the correct pts for an fps recorded video.
            self.frame.pts = self.current_frame_idx
        #send frame of to encoder
        packet = self.video_stream.encode(self.frame)
        if packet:
            self.container.mux(packet)
        self.current_frame_idx +=1
예제 #2
0
 def _configure_video(self, template_frame):
     self.video_stream.height = template_frame.height
     self.video_stream.width = template_frame.width
     self.configured = True
     self.start_time = template_frame.timestamp
     if template_frame.yuv_buffer is not None:
         self.frame = av.VideoFrame(template_frame.width,
                                    template_frame.height, "yuv422p")
     else:
         self.frame = av.VideoFrame(template_frame.width,
                                    template_frame.height, "bgr24")
     if self.use_timestamps:
         self.frame.time_base = self.time_base
     else:
         self.frame.time_base = Fraction(1, self.fps)
예제 #3
0
    def __init__(self, source_path, chunk_size, force):
        self._source_path = source_path
        self._frames_number = None
        self._force = force
        self._upper_bound = 3 * chunk_size + 1

        with closing(av.open(self.source_path, mode='r')) as container:
            video_stream = VideoStreamReader._get_video_stream(container)
            isBreaked = False
            for packet in container.demux(video_stream):
                if isBreaked:
                    break
                for frame in packet.decode():
                    # check type of first frame
                    if not frame.pict_type.name == 'I':
                        raise Exception('First frame is not key frame')

                    # get video resolution
                    if video_stream.metadata.get('rotate'):
                        frame = av.VideoFrame().from_ndarray(
                            rotate_image(
                                frame.to_ndarray(format='bgr24'),
                                360 -
                                int(container.streams.video[0].metadata.get(
                                    'rotate')),
                            ),
                            format='bgr24',
                        )
                    self.height, self.width = (frame.height, frame.width)
                    # not all videos contain information about numbers of frames
                    if video_stream.frames:
                        self._frames_number = video_stream.frames
                    isBreaked = True
                    break
예제 #4
0
    def decode_needed_frames(self, chunk_number, db_data):
        step = db_data.get_frame_step()
        start_chunk_frame_number = db_data.start_frame + chunk_number * db_data.chunk_size * step
        end_chunk_frame_number = min(start_chunk_frame_number + (db_data.chunk_size - 1) * step + 1, db_data.stop_frame + 1)
        start_decode_frame_number, start_decode_timestamp = self.get_nearest_left_key_frame(start_chunk_frame_number)
        container = self._open_video_container(self.source_path, mode='r')
        video_stream = self._get_video_stream(container)
        container.seek(offset=start_decode_timestamp, stream=video_stream)

        frame_number = start_decode_frame_number - 1
        for packet in container.demux(video_stream):
            for frame in packet.decode():
                frame_number += 1
                if frame_number < start_chunk_frame_number:
                    continue
                elif frame_number < end_chunk_frame_number and not ((frame_number - start_chunk_frame_number) % step):
                    if video_stream.metadata.get('rotate'):
                        frame = av.VideoFrame().from_ndarray(
                            rotate_image(
                                frame.to_ndarray(format='bgr24'),
                                360 - int(container.streams.video[0].metadata.get('rotate'))
                            ),
                            format ='bgr24'
                        )
                    yield frame
                elif (frame_number - start_chunk_frame_number) % step:
                    continue
                else:
                    self._close_video_container(container)
                    return

        self._close_video_container(container)
예제 #5
0
파일: av_writer.py 프로젝트: elmorg/pupil
    def write_video_frame_compressed(self, input_frame):
        if not self.configured:
            self.video_stream.height = input_frame.height
            self.video_stream.width = input_frame.width
            self.configured = True
            self.start_time = input_frame.timestamp
            self.frame = av.VideoFrame(input_frame.width, input_frame.height,
                                       'yuv422p')
            self.frame.time_base = self.time_base

        y, u, v = input_frame.yuv422
        self.frame.planes[0].update(y)
        self.frame.planes[1].update(u)
        self.frame.planes[2].update(v)

        # here we create a timestamp in ms resolution to be used for the frame pts.
        # later libav will scale this to stream timebase
        frame_ts_ms = int(
            (input_frame.timestamp - self.start_time) * self.time_resolution)
        self.frame.pts = frame_ts_ms
        # we keep a version of the timestamp counting from first frame in the codec resoltion (lowest time resolution in toolchain)
        frame_ts_s = float(frame_ts_ms) / self.time_resolution
        # we append it to our list to correlate hi-res absolute timestamps with media timstamps
        self.timestamps_list.append((input_frame.timestamp, frame_ts_s))

        #send frame of to encoder
        packet = self.video_stream.encode(self.frame)
        if packet:
            # print 'paket',packet.pts
            self.container.mux(packet)
예제 #6
0
    def __iter__(self):
        start_decode_frame_number, start_decode_timestamp = self._get_nearest_left_key_frame()
        with closing(av.open(self.source_path, mode='r')) as container:
            video_stream = next(stream for stream in container.streams if stream.type == 'video')
            video_stream.thread_type = 'AUTO'

            container.seek(offset=start_decode_timestamp, stream=video_stream)

            frame_number = start_decode_frame_number - 1
            for packet in container.demux(video_stream):
                for frame in packet.decode():
                    frame_number += 1
                    if frame_number in self._frame_range:
                        if video_stream.metadata.get('rotate'):
                            frame = av.VideoFrame().from_ndarray(
                                rotate_image(
                                    frame.to_ndarray(format='bgr24'),
                                    360 - int(container.streams.video[0].metadata.get('rotate'))
                                ),
                                format ='bgr24'
                            )
                        yield frame
                    elif frame_number < self._frame_range[-1]:
                        continue
                    else:
                        return
예제 #7
0
 def on_first_frame(self, input_frame) -> None:
     # setup av frame once to use as buffer throughout the process
     if input_frame.yuv_buffer is not None:
         pix_format = "yuv422p"
     else:
         pix_format = "bgr24"
     self.frame = av.VideoFrame(input_frame.width, input_frame.height, pix_format)
     self.frame.time_base = self.time_base
예제 #8
0
 def test_video_frame_from_avframe_yuv420p(self):
     avframe = av.VideoFrame(width=640, height=480, format='yuv420p')
     avframe.pts = 123
     frame = video_frame_from_avframe(avframe)
     self.assertEqual(len(frame.data), 460800)
     self.assertEqual(frame.width, 640)
     self.assertEqual(frame.height, 480)
     self.assertEqual(frame.pts, 123)
예제 #9
0
def frame_to_avframe(frame):
    u_start = frame.width * frame.height
    v_start = 5 * u_start // 4
    av_frame = av.VideoFrame(frame.width, frame.height, 'yuv420p')
    av_frame.planes[0].update(frame.data[0:u_start])
    av_frame.planes[1].update(frame.data[u_start:v_start])
    av_frame.planes[2].update(frame.data[v_start:])
    return av_frame
예제 #10
0
 def test_video_frame_from_avframe_rgb32(self):
     avframe = av.VideoFrame(width=640, height=480, format='rgb32')
     avframe.pts = 123
     frame = video_frame_from_avframe(avframe)
     self.assertEqual(len(frame.data), 460800)
     self.assertEqual(frame.width, 640)
     self.assertEqual(frame.height, 480)
     self.assertEqual(frame.timestamp, 123)
예제 #11
0
    async def recv(self):
        pts, time_base = await self.next_timestamp()

        frame = av.VideoFrame(width=3840, height=2160)
        for p in frame.planes:
            p.update(bytes(p.buffer_size))
        frame.pts = pts
        frame.time_base = time_base
        return frame
예제 #12
0
 def frame_sizes(self):
     container = self._open_video_container(self.source_path, 'r')
     frame = next(iter(self.key_frames.values()))
     if container.streams.video[0].metadata.get('rotate'):
         frame = av.VideoFrame().from_ndarray(rotate_image(
             frame.to_ndarray(format='bgr24'),
             360 - int(container.streams.video[0].metadata.get('rotate'))),
                                              format='bgr24')
     self._close_video_container(container)
     return (frame.width, frame.height)
예제 #13
0
def video_frame_to_avframe(frame):
    """
    Convert an aiortc.VideoFrame to av.VideoFrame.
    """
    u_start = frame.width * frame.height
    v_start = 5 * u_start // 4
    av_frame = av.VideoFrame(frame.width, frame.height, 'yuv420p')
    av_frame.planes[0].update(frame.data[0:u_start])
    av_frame.planes[1].update(frame.data[u_start:v_start])
    av_frame.planes[2].update(frame.data[v_start:])
    av_frame.pts = frame.timestamp
    av_frame.time_base = fractions.Fraction(1, VIDEO_CLOCKRATE)
    return av_frame
예제 #14
0
 def get_preview(self):
     container = self._get_av_container()
     stream = container.streams.video[0]
     preview = next(container.decode(stream))
     return self._get_preview(preview.to_image() if not stream.metadata.get('rotate') \
         else av.VideoFrame().from_ndarray(
             rotate_image(
                 preview.to_ndarray(format='bgr24'),
                 360 - int(container.streams.video[0].metadata.get('rotate'))
             ),
             format ='bgr24'
         ).to_image()
     )
예제 #15
0
 def _get_frame_size(container):
     video_stream = WorkWithVideo._get_video_stream(container)
     for packet in container.demux(video_stream):
         for frame in packet.decode():
             if video_stream.metadata.get('rotate'):
                 frame = av.VideoFrame().from_ndarray(
                     rotate_image(
                         frame.to_ndarray(format='bgr24'),
                         360 - int(container.streams.video[0].metadata.get('rotate')),
                     ),
                     format ='bgr24',
                 )
             return frame.width, frame.height
예제 #16
0
def video_frame_to_avframe(frame):
    """
    Convert an aiortc.VideoFrame to av.VideoFrame.
    """
    u_start = frame.width * frame.height
    v_start = 5 * u_start // 4
    av_frame = av.VideoFrame(frame.width, frame.height, 'yuv420p')
    assert av_frame.planes[0].line_size == av_frame.width
    av_frame.planes[0].update(frame.data[0:u_start])
    av_frame.planes[1].update(frame.data[u_start:v_start])
    av_frame.planes[2].update(frame.data[v_start:])
    av_frame.pts = frame.pts
    av_frame.time_base = frame.time_base
    return av_frame
예제 #17
0
 def frame_sizes(self):
     container = self._open_video_container(self.source_path, 'r')
     video_stream = self._get_video_stream(container)
     container.seek(offset=next(iter(self.key_frames.values())),
                    stream=video_stream)
     for packet in container.demux(video_stream):
         for frame in packet.decode():
             if video_stream.metadata.get('rotate'):
                 frame = av.VideoFrame().from_ndarray(rotate_image(
                     frame.to_ndarray(format='bgr24'), 360 -
                     int(container.streams.video[0].metadata.get('rotate'))
                 ),
                                                      format='bgr24')
             self._close_video_container(container)
             return (frame.width, frame.height)
예제 #18
0
 def _decode(self, container):
     frame_num = 0
     for packet in container.demux():
         if packet.stream.type == 'video':
             for image in packet.decode():
                 frame_num += 1
                 if self._has_frame(frame_num - 1):
                     if packet.stream.metadata.get('rotate'):
                         old_image = image
                         image = av.VideoFrame().from_ndarray(
                             rotate_image(
                                 image.to_ndarray(format='bgr24'),
                                 360 - int(container.streams.video[0].
                                           metadata.get('rotate'))),
                             format='bgr24')
                         image.pts = old_image.pts
                     yield (image, self._source_path[0], image.pts)
예제 #19
0
 def record_vid(self, frame):
     """
     convert frames to packets and write to file
     """
     new_frame = av.VideoFrame(
         width=frame.width, height=frame.height, format=frame.format.name)
     for i in range(len(frame.planes)):
         new_frame.planes[i].update(frame.planes[i])
     pkt = None
     try:
         pkt = self.out_stream.encode(new_frame)
     except IOError as err:
         print("encoding failed: {0}".format(err))
     if pkt is not None:
         try:
             self.out_file.mux(pkt)
         except IOError:
             print('mux failed: ' + str(pkt))
예제 #20
0
    def write_image_frame(self, jpeg_frame, image):
        curr_time = datetime.datetime.utcnow()
        # h, w = image.shape[:2]
        w, h = image.size
        if self.update_time + TIME_10_MINS < curr_time \
           or self.width != w \
           or self.height != h:
            print("Frames: ", self.frames)
            # Time's up or image size has changed.
            self.force_rotate_file(curr_time)

            self.video_stream.width = self.width = w
            self.video_stream.height = self.height = h
            self.frames += 1
        # frame = av.VideoFrame.from_image(image)
        frame = av.VideoFrame(w, h, 'rgb24')
        frame.planes[0].update_from_string(image.tostring())
        packet = self.video_stream.encode(frame)
        self.video_output.mux(packet)
예제 #21
0
def main():
    # Set up tello streaming
    drone = tellopy.Tello()
    drone.log.set_level(2)
    drone.connect()
    drone.start_video()

    # container for processing the packets into frames
    container = av.open(drone.get_video_stream())
    video_st = container.streams.video[0]

    # stream and outputfile for video
    output = av.open('archive.mp4', 'w')
    ovstream = output.add_stream('mpeg4', video_st.rate)
    ovstream.pix_fmt = 'yuv420p'
    ovstream.width = video_st.width
    ovstream.height = video_st.height

    counter = 0
    save = True
    for packet in container.demux((video_st, )):
        for frame in packet.decode():
            # convert frame to cv2 image and show
            image = cv2.cvtColor(numpy.array(frame.to_image()),
                                 cv2.COLOR_RGB2BGR)
            cv2.imshow('frame', image)
            key = cv2.waitKey(1) & 0xFF

            # save initial 1300 frames
            if save:
                new_frame = av.VideoFrame(width=frame.width,
                                          height=frame.height,
                                          format=frame.format.name)
                for i in range(len(frame.planes)):
                    new_frame.planes[i].update(frame.planes[i])
                encode(new_frame, ovstream, output)
                counter += 1
                print("Frames encoded:", counter)
                if counter > 300:
                    output.close()
                    save == False
예제 #22
0
파일: write.py 프로젝트: mark-dawn/stytra
    def run(self):
        """ """
        while True:
            timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
            out_container = av.open(self.folder + timestamp + ".mp4", mode="w")
            print("Recorder running, saving to ",
                  self.folder + timestamp + ".mp4")
            out_stream = None
            video_frame = None
            while True:
                if self.reset_signal.is_set() or self.finished_signal.is_set():
                    self.reset_signal.clear()
                    break
                try:
                    if out_stream is None:
                        current_frame = self.input_queue.get(timeout=1)
                        out_stream = out_container.add_stream("mpeg4", rate=50)
                        out_stream.width, out_stream.height = current_frame.shape[::
                                                                                  -1]
                        out_stream.pix_fmt = "yuv420p"
                        out_stream.bit_rate = self.kbit_rate * 1000
                        video_frame = av.VideoFrame(current_frame.shape[1],
                                                    current_frame.shape[0],
                                                    "gray")
                        video_frame.planes[0].update(current_frame)
                    else:
                        video_frame.planes[0].update(
                            self.input_queue.get(timeout=1))
                    print("Got and written frame")
                    packet = out_stream.encode(video_frame)
                    out_container.mux(packet)
                    self.update_framerate()

                except Empty:
                    pass
            if self.finished_signal.is_set():
                break

        if out_stream is not None:
            out_container.close()
예제 #23
0
def main():
    # drone = tellopy.Tello()
    # drone.log.set_level(2)
    # drone.connect()
    # drone.start_video()
    # drone.subscribe(drone.EVENT_VIDEO_FRAME, videoFrameHandler)

    container = av.open('ball_tracking_example.mp4')
    # container = av.open(drone.get_video_stream())
    video_st = container.streams.video[0]
    output = av.open('archive.mp4', 'w')
    ovstream = output.add_stream('mpeg4', video_st.rate)
    ovstream.pix_fmt = 'yuv420p'
    ovstream.width = video_st.width
    ovstream.height = video_st.height

    net = caffe.Net('mobilenet-yolov3.prototxt',
                    'mobilenet_yolov3_lite_deploy.caffemodel')

    counter = 0
    for packet in container.demux((video_st, )):
        for frame in packet.decode():
            image = cv2.cvtColor(numpy.array(frame.to_image()),
                                 cv2.COLOR_RGB2BGR)
            print(type(image))
            cv2.imshow('frame', image)

            new_frame = av.VideoFrame(width=frame.width,
                                      height=frame.height,
                                      format=frame.format.name)
            for i in range(len(frame.planes)):
                new_frame.planes[i].update(frame.planes[i])
            encode(new_frame, ovstream, output)
            counter += 1
            print("Frames encoded:", counter)
        if counter > 500:
            output.close()
            break
예제 #24
0
            print('mux failed: ' + str(pkt))
    return True


input_file = 'http://192.168.8.105:8081/'
container = av.open(input_file)
video_st = container.streams.video[0]
output = av.open('archive.mp4', 'w')
ovstream = output.add_stream('libx264', video_st.rate)
ovstream.pix_fmt = 'yuv720p'
ovstream.width = video_st.width
ovstream.height = video_st.height

counter = 0
for packet in container.demux((video_st, )):
    for frame in packet.decode():
        new_frame = av.VideoFrame(width=frame.width,
                                  height=frame.height,
                                  format=frame.format.name)
        for i in range(len(frame.planes)):
            new_frame.planes[i].update(frame.planes[i])
        encode(new_frame)
        counter += 1
        print("Frames encoded:", counter)
    if counter > 200:
        break

while True:
    if not encode(None):
        break
output.close()
예제 #25
0
def export_undistorted_h264(distorted_video_loc, target_video_loc,
                            export_range):
    yield "Converting scene video", .1
    capture = File_Source(Empty(), distorted_video_loc)
    if not capture.initialised:
        yield "Converting scene video failed", 0.
        return

    update_rate = 10
    start_time = None
    time_base = Fraction(1, 65535)
    average_fps = int(
        len(capture.timestamps) /
        (capture.timestamps[-1] - capture.timestamps[0]))

    target_container = av.open(target_video_loc, 'w')
    video_stream = target_container.add_stream('mpeg4', 1 / time_base)
    video_stream.bit_rate = 150e6
    video_stream.bit_rate_tolerance = video_stream.bit_rate / 20
    video_stream.thread_count = max(1, mp.cpu_count() - 1)
    video_stream.width, video_stream.height = capture.frame_size

    av_frame = av.VideoFrame(*capture.frame_size, 'bgr24')
    av_frame.time_base = time_base

    capture.seek_to_frame(export_range[0])
    next_update_idx = export_range[0] + update_rate
    while True:
        try:
            frame = capture.get_frame()
        except EndofVideoError:
            break

        if frame.index > export_range[1]:
            break

        if start_time is None:
            start_time = frame.timestamp

        undistorted_img = capture.intrinsics.undistort(frame.img)
        av_frame.planes[0].update(undistorted_img)
        av_frame.pts = int((frame.timestamp - start_time) / time_base)

        packet = video_stream.encode(av_frame)
        if packet:
            target_container.mux(packet)

        if capture.current_frame_idx >= next_update_idx:
            progress = ((capture.current_frame_idx - export_range[0]) /
                        (export_range[1] - export_range[0])) * .9 + .1
            yield "Converting scene video", progress * 100.
            next_update_idx += update_rate

    while True:  # flush encoder
        packet = video_stream.encode()
        if packet:
            target_container.mux(packet)
        else:
            break

    target_container.close()
    capture.cleanup()
    yield "Converting scene video completed", 1. * 100.
예제 #26
0
        frames = (os.path.getsize(sys.argv[1]) - 12) // height // width // 2
        print('Frame counter not set!  Guessed from file size as {}'.format(
            frames))

    outvid = av.open(sys.argv[1] + '.mp4', 'w')
    if len(sys.argv) > 2:
        stream = outvid.add_stream('mpeg4', sys.argv[2])
    else:
        stream = outvid.add_stream('mpeg4', '50')
    stream.bit_rate = 10000000
    stream.pix_fmt = 'yuv420p'
    stream.width = width
    stream.height = height
    stream.thread_count = 3

    outframe = av.VideoFrame(width, height, 'yuv420p')

    for frameno in range(frames):
        I = np.ndarray(buffer=f.read(width * height * 2),
                       dtype=np.uint16,
                       shape=(height, width))  # 16 bpp
        Y, U, V = to_iron_ycbcr(I)
        outframe.planes[0].update(Y)
        outframe.planes[1].update(U)
        outframe.planes[2].update(V)
        outframe.pts = None
        packet = outvid.streams[0].encode(outframe)
        if packet:
            outvid.mux(packet)

    while True:
예제 #27
0
from PIL import ImageFont
import logging

font = ImageFont.truetype("Helvetica.ttf", 80)
logging.basicConfig(level=logging.DEBUG)

w, h = 1920, 1080
out = av.open("test.mov", mode="w", options={"preset": "fast", "crf": "22"})
stream = out.add_stream('libx264', 24)
stream.width = w
stream.height = h
stream.pix_fmt = "yuv420p"
idx = 0
for frame in range(64):
    image = Image.new('RGB', (w, h))
    outframe = av.VideoFrame(w, h, 'rgb24')
    draw = ImageDraw.Draw(image)
    draw.text((10 + 4 * idx, 10 + 4 * idx),
              "Hello",
              font=font,
              fill=(0, 255, 255, 255))
    outframe.planes[0].update_from_string(image.tobytes())
    outframe.pts = None
    packet = stream.encode(outframe)
    if packet:
        out.mux(packet)
    idx += 1

while True:
    out_packet = stream.encode()
    if out_packet:
예제 #28
0
def export_processed_h264(
    world_timestamps,
    unprocessed_video_loc,
    target_video_loc,
    export_range,
    process_frame,
    export_timestamps,
):
    yield "Converting video", 0.1
    capture = File_Source(Empty(), unprocessed_video_loc)
    if not capture.initialised:
        yield "Converting scene video failed", 0.0
        return

    export_window = pm.exact_window(world_timestamps, export_range)
    (export_from_index,
     export_to_index) = pm.find_closest(capture.timestamps, export_window)

    update_rate = 10
    start_time = None
    time_base = Fraction(1, 65535)

    target_container = av.open(target_video_loc, "w")
    video_stream = target_container.add_stream("mpeg4", 1 / time_base)
    video_stream.bit_rate = 150e6
    video_stream.bit_rate_tolerance = video_stream.bit_rate / 20
    video_stream.thread_count = max(1, mp.cpu_count() - 1)
    video_stream.width, video_stream.height = capture.frame_size

    av_frame = av.VideoFrame(*capture.frame_size, "bgr24")
    av_frame.time_base = time_base

    capture.seek_to_frame(export_from_index)
    next_update_idx = export_from_index + update_rate
    timestamps = []
    while True:
        try:
            frame = capture.get_frame()
        except EndofVideoError:
            break

        if frame.index > export_to_index:
            break

        if start_time is None:
            start_time = frame.timestamp

        undistorted_img = process_frame(capture, frame)
        av_frame.planes[0].update(undistorted_img)
        av_frame.pts = int((frame.timestamp - start_time) / time_base)

        if export_timestamps:
            timestamps.append(frame.timestamp)

        packet = video_stream.encode(av_frame)
        if packet:
            target_container.mux(packet)

        if capture.current_frame_idx >= next_update_idx:
            progress = ((capture.current_frame_idx - export_from_index) /
                        (export_to_index - export_from_index)) * 0.9 + 0.1
            yield "Converting video", progress * 100.0
            next_update_idx += update_rate

    while True:  # flush encoder
        packet = video_stream.encode()
        if packet:
            target_container.mux(packet)
        else:
            break

    if export_timestamps:
        write_timestamps(target_video_loc, timestamps)

    target_container.close()
    capture.cleanup()
    yield "Converting video completed", 1.0 * 100.0
예제 #29
0
파일: av_writer.py 프로젝트: sleip87/pupil
    def write_video_frame(self, input_frame):
        if not self.configured:
            self.video_stream.height = input_frame.height
            self.video_stream.width = input_frame.width
            self.configured = True
            self.start_time = input_frame.timestamp
            if input_frame.yuv_buffer is not None:
                self.frame = av.VideoFrame(input_frame.width,
                                           input_frame.height, 'yuv422p')
            else:
                self.frame = av.VideoFrame(input_frame.width,
                                           input_frame.height, 'bgr24')
            if self.use_timestamps:
                self.frame.time_base = self.time_base
            else:
                self.frame.time_base = Fraction(1, self.fps)

        if input_frame.yuv_buffer is not None:
            y, u, v = input_frame.yuv422
            self.frame.planes[0].update(y)
            self.frame.planes[1].update(u)
            self.frame.planes[2].update(v)
        else:
            self.frame.planes[0].update(input_frame.img)

        if self.use_timestamps:
            self.frame.pts = int(
                (input_frame.timestamp - self.start_time) / self.time_base)
        else:
            # our timebase is 1/30  so a frame idx is the correct pts for an fps recorded video.
            self.frame.pts = self.current_frame_idx
        # send frame of to encoder
        packet = self.video_stream.encode(self.frame)
        if packet:
            self.container.mux(packet)
        self.current_frame_idx += 1
        self.timestamps.append(input_frame.timestamp)
        if self.audio_export:
            for audio_packet in self.audio_rec.demux():
                if self.audio_packets_decoded >= len(self.audio_ts):
                    logger.debug(
                        'More audio frames decoded than there are timestamps: {} > {}'
                        .format(self.audio_packets_decoded,
                                len(self.audio_ts)))
                    break
                audio_pts = int(
                    (self.audio_ts[self.audio_packets_decoded] -
                     self.start_time) / self.audio_export.time_base)
                audio_packet.pts = audio_pts
                audio_packet.dts = audio_pts
                audio_packet.stream = self.audio_export
                self.audio_packets_decoded += 1

                if audio_pts * self.audio_export.time_base < 0:
                    logger.debug('Seeking: {} -> {}'.format(
                        audio_pts * self.audio_export.time_base,
                        self.start_time))
                    continue  # seek to start_time

                self.container.mux(audio_packet)
                if audio_pts * self.audio_export.time_base > self.frame.pts * self.time_base:
                    break  # wait for next image
예제 #30
0
done = False
def finish():
	global done
	if outflir:
		toggle_record_flir()
	if outmp4:
		toggle_record_mp4()
	camera.stop_acquisition()
	done = True
	gui.destroy()

gui.protocol("WM_DELETE_WINDOW", finish)

if PyAV:
	mp4frame = av.VideoFrame(w, h, 'yuv420p')

last_frame = 0
last_render = time.time()
inhibit_focus_poll_until = 0
while not done:
	buf = stream.pop_buffer()

	if not buf:
		continue

	if buf.get_status() == Aravis.BufferStatus.TIMEOUT:
		print('Packet timeout!')
	else:
		if buf.get_frame_id() != last_frame + 1 and buf.get_frame_id() != 1 and last_frame != 65535:
			print('Dropped {} frame(s)!'.format(buf.get_frame_id() - last_frame - 1))