コード例 #1
0
    def decode_needed_frames(self, chunk_number, db_data):
        step = db_data.get_frame_step()
        start_chunk_frame_number = db_data.start_frame + chunk_number * db_data.chunk_size * step
        end_chunk_frame_number = min(start_chunk_frame_number + (db_data.chunk_size - 1) * step + 1, db_data.stop_frame + 1)
        start_decode_frame_number, start_decode_timestamp = self.get_nearest_left_key_frame(start_chunk_frame_number)
        container = self._open_video_container(self.source_path, mode='r')
        video_stream = self._get_video_stream(container)
        container.seek(offset=start_decode_timestamp, stream=video_stream)

        frame_number = start_decode_frame_number - 1
        for packet in container.demux(video_stream):
            for frame in packet.decode():
                frame_number += 1
                if frame_number < start_chunk_frame_number:
                    continue
                elif frame_number < end_chunk_frame_number and not ((frame_number - start_chunk_frame_number) % step):
                    if video_stream.metadata.get('rotate'):
                        frame = av.VideoFrame().from_ndarray(
                            rotate_image(
                                frame.to_ndarray(format='bgr24'),
                                360 - int(container.streams.video[0].metadata.get('rotate'))
                            ),
                            format ='bgr24'
                        )
                    yield frame
                elif (frame_number - start_chunk_frame_number) % step:
                    continue
                else:
                    self._close_video_container(container)
                    return

        self._close_video_container(container)
コード例 #2
0
ファイル: media_extractors.py プロジェクト: z80020100/cvat
    def __iter__(self):
        start_decode_frame_number, start_decode_timestamp = self._get_nearest_left_key_frame()
        with closing(av.open(self.source_path, mode='r')) as container:
            video_stream = next(stream for stream in container.streams if stream.type == 'video')
            video_stream.thread_type = 'AUTO'

            container.seek(offset=start_decode_timestamp, stream=video_stream)

            frame_number = start_decode_frame_number - 1
            for packet in container.demux(video_stream):
                for frame in packet.decode():
                    frame_number += 1
                    if frame_number in self._frame_range:
                        if video_stream.metadata.get('rotate'):
                            frame = av.VideoFrame().from_ndarray(
                                rotate_image(
                                    frame.to_ndarray(format='bgr24'),
                                    360 - int(container.streams.video[0].metadata.get('rotate'))
                                ),
                                format ='bgr24'
                            )
                        yield frame
                    elif frame_number < self._frame_range[-1]:
                        continue
                    else:
                        return
コード例 #3
0
 def frame_sizes(self):
     container = self._open_video_container(self.source_path, 'r')
     frame = next(iter(self.key_frames.values()))
     if container.streams.video[0].metadata.get('rotate'):
         frame = av.VideoFrame().from_ndarray(rotate_image(
             frame.to_ndarray(format='bgr24'),
             360 - int(container.streams.video[0].metadata.get('rotate'))),
                                              format='bgr24')
     self._close_video_container(container)
     return (frame.width, frame.height)
コード例 #4
0
 def get_preview(self):
     container = self._get_av_container()
     stream = container.streams.video[0]
     preview = next(container.decode(stream))
     return self._get_preview(preview.to_image() if not stream.metadata.get('rotate') \
         else av.VideoFrame().from_ndarray(
             rotate_image(
                 preview.to_ndarray(format='bgr24'),
                 360 - int(container.streams.video[0].metadata.get('rotate'))
             ),
             format ='bgr24'
         ).to_image()
     )
コード例 #5
0
 def _get_frame_size(container):
     video_stream = WorkWithVideo._get_video_stream(container)
     for packet in container.demux(video_stream):
         for frame in packet.decode():
             if video_stream.metadata.get('rotate'):
                 frame = av.VideoFrame().from_ndarray(
                     rotate_image(
                         frame.to_ndarray(format='bgr24'),
                         360 - int(container.streams.video[0].metadata.get('rotate')),
                     ),
                     format ='bgr24',
                 )
             return frame.width, frame.height
コード例 #6
0
 def frame_sizes(self):
     container = self._open_video_container(self.source_path, 'r')
     video_stream = self._get_video_stream(container)
     container.seek(offset=next(iter(self.key_frames.values())),
                    stream=video_stream)
     for packet in container.demux(video_stream):
         for frame in packet.decode():
             if video_stream.metadata.get('rotate'):
                 frame = av.VideoFrame().from_ndarray(rotate_image(
                     frame.to_ndarray(format='bgr24'), 360 -
                     int(container.streams.video[0].metadata.get('rotate'))
                 ),
                                                      format='bgr24')
             self._close_video_container(container)
             return (frame.width, frame.height)
コード例 #7
0
 def _decode(self, container):
     frame_num = 0
     for packet in container.demux():
         if packet.stream.type == 'video':
             for image in packet.decode():
                 frame_num += 1
                 if self._has_frame(frame_num - 1):
                     if packet.stream.metadata.get('rotate'):
                         old_image = image
                         image = av.VideoFrame().from_ndarray(
                             rotate_image(
                                 image.to_ndarray(format='bgr24'),
                                 360 - int(container.streams.video[0].
                                           metadata.get('rotate'))),
                             format='bgr24')
                         image.pts = old_image.pts
                     yield (image, self._source_path[0], image.pts)