Пример #1
0
    def test_play(self):
        import ffpyplayer.tests.common
        from ffpyplayer.writer import MediaWriter
        from ffpyplayer.tools import get_supported_pixfmts, get_supported_framerates
        from ffpyplayer.pic import Image

        w, h = 640, 480
        out_opts = {
            'pix_fmt_in': 'rgb24', 'width_in': w, 'height_in': h,
            'codec': 'libx264', 'frame_rate': (5, 1)}

        lib_opts = {'preset':'slow', 'crf':'22'}
        metadata = {'title':'Singing in the sun', 'author':'Rat', 'genre':'Animal sounds'}
        writer = MediaWriter(fname, [out_opts] * 2, fmt='mp4',
                             width_out=w/2, height_out=h/2, pix_fmt_out='yuv420p',
                             lib_opts=lib_opts, metadata=metadata)

        # Construct images
        size = w * h * 3
        buf = bytearray([int(x * 255 / size) for x in range(size)])
        img = Image(plane_buffers=[buf], pix_fmt='rgb24', size=(w, h))

        buf = bytearray([int((size - x) * 255 / size) for x in range(size)])
        img2 = Image(plane_buffers=[buf], pix_fmt='rgb24', size=(w, h))

        for i in range(20):
            writer.write_frame(img=img, pts=i / 5., stream=0)  # stream 1
            writer.write_frame(img=img2, pts=i / 5., stream=1)  # stream 2
Пример #2
0
def create_image(size):
    from ffpyplayer.pic import Image

    w, h = size
    size = w * h * 3
    buf = bytearray([int(x * 255 / size) for x in range(size)])
    return Image(plane_buffers=[buf], pix_fmt='rgb24', size=(w, h))
Пример #3
0
def video_file(tmp_path_factory):
    from ffpyplayer.writer import MediaWriter
    from ffpyplayer.pic import Image
    fname = str(tmp_path_factory.mktemp('data') / 'test_video.avi')

    w, h = 64, 64
    size = w * h
    out_opts = {
        'pix_fmt_in': 'gray',
        'width_in': w,
        'height_in': h,
        'codec': 'rawvideo',
        'frame_rate': (2997, 100)
    }

    buf = bytearray([int(x * 255 / size) for x in range(size)])
    buf2 = bytearray([0] * size)
    img = Image(plane_buffers=[buf, buf2], pix_fmt='gray', size=(w, h))

    writer = MediaWriter(fname, [out_opts])
    for i in range(20):
        writer.write_frame(img=img, pts=i / 29.97, stream=0)
    writer.close()

    return fname
Пример #4
0
def test_write_streams(tmp_path):
    from ffpyplayer.writer import MediaWriter
    from ffpyplayer.tools import get_supported_pixfmts, get_supported_framerates
    from ffpyplayer.pic import Image
    from ffpyplayer.tools import get_codecs
    fname = str(tmp_path / 'test_video.avi')

    lib_opts = {}
    codec = 'rawvideo'
    if 'libx264' in get_codecs(encode=True, video=True):
        codec = 'libx264'
        lib_opts = {'preset': 'slow', 'crf': '22'}

    w, h = 640, 480
    out_opts = {
        'pix_fmt_in': 'rgb24',
        'width_in': w,
        'height_in': h,
        'codec': codec,
        'frame_rate': (5, 1)
    }

    metadata = {
        'title': 'Singing in the sun',
        'author': 'Rat',
        'genre': 'Animal sounds'
    }
    writer = MediaWriter(fname, [out_opts] * 2,
                         fmt='mp4',
                         width_out=w / 2,
                         height_out=h / 2,
                         pix_fmt_out='yuv420p',
                         lib_opts=lib_opts,
                         metadata=metadata)

    # Construct images
    size = w * h * 3
    buf = bytearray([int(x * 255 / size) for x in range(size)])
    img = Image(plane_buffers=[buf], pix_fmt='rgb24', size=(w, h))

    buf = bytearray([int((size - x) * 255 / size) for x in range(size)])
    img2 = Image(plane_buffers=[buf], pix_fmt='rgb24', size=(w, h))

    for i in range(20):
        writer.write_frame(img=img, pts=i / 5., stream=0)  # stream 1
        writer.write_frame(img=img2, pts=i / 5., stream=1)  # stream 2
    writer.close()
Пример #5
0
def get_image(w, h):
    from ffpyplayer.pic import Image

    # Construct images
    size = w * h * 3
    buf = bytearray([int(x * 255 / size) for x in range(size)])
    img = Image(plane_buffers=[buf], pix_fmt='rgb24', size=(w, h))
    return img
Пример #6
0
def get_gray_image_with_val(w, h, val):
    from ffpyplayer.pic import Image

    # Construct images
    size = w * h
    buf = bytearray([int(val)] * size)
    buf2 = bytearray([0] * size)
    img = Image(plane_buffers=[buf, buf2], pix_fmt='gray', size=(w, h))
    return img
Пример #7
0
def assert_image_same(image1: Image, image2: Image, exact=True) -> None:
    assert image1.get_pixel_format() == image2.get_pixel_format()
    assert image1.get_size() == image2.get_size()
    assert image1.get_buffer_size() == image2.get_buffer_size()

    data1 = image1.to_bytearray()
    data2 = image2.to_bytearray()
    assert data1[1:] == data2[1:]
    if exact:
        assert data1[0] == data2[0]
    else:
        for a, b in zip(data1[0], data2[0]):
            assert a - 1 <= b <= a + 1
Пример #8
0
    def read_image_from_block(block, postfix=''):
        try:
            group = block.groups['fluorescent_image{}'.format(postfix)]
        except KeyError:
            return None

        planes = [np.array(d).tobytes() for d in group.data_arrays]
        img = Image(plane_buffers=planes,
                    pix_fmt=group.metadata['pix_fmt'],
                    size=yaml_loads(group.metadata['size']))
        return img
Пример #9
0
    def process_in_kivy_thread(self, *largs):
        """Processes messages from the client in the kivy thread.
        """
        while self.to_kivy_queue is not None:
            try:
                msg, value = self.to_kivy_queue.get(block=False)

                if msg == 'exception':
                    e, exec_info = value
                    cpl_media.error_callback(e, exc_info=exec_info)
                elif msg == 'exception_exit':
                    e, exec_info = value
                    cpl_media.error_callback(e, exc_info=exec_info)
                    self.stop_all()
                    if self.play_state != 'none':
                        self.complete_stop()
                elif msg == 'started_recording':
                    if self.play_state == 'starting':
                        self.ts_play = self._ivl_start = clock()
                        self._frame_count = 0

                        self.metadata_play_used = VideoMetadata(*value)
                        self.complete_start()
                elif msg == 'stopped_recording':
                    self.stop()
                elif msg == 'stopped_playing':
                    self.complete_stop()
                elif msg == 'image':
                    if self.play_state != 'playing':
                        continue

                    t = clock()
                    if t - self._ivl_start >= 1.:
                        self.real_rate = self._frame_count / (t -
                                                              self._ivl_start)
                        self._frame_count = 0
                        self._ivl_start = t

                    self._frame_count += 1
                    self.frames_played += 1

                    plane_buffers, pix_fmt, size, linesize, metadata = value
                    sws = SWScale(*size, pix_fmt, ofmt=pix_fmt)
                    img = Image(plane_buffers=plane_buffers,
                                pix_fmt=pix_fmt,
                                size=size,
                                linesize=linesize)
                    self.process_frame(sws.scale(img), metadata)
                else:
                    print('Got unknown RemoteVideoPlayer message', msg, value)
            except Empty:
                break
Пример #10
0
    def create_image_from_msg(self, msg_value):
        """Takes the ``value`` from the server that contains image data,
        constructs the image and returns it and its metadata.

        It returns a 4-tuple of ``img, count, queued_count, t)``. Where
        ``img`` is the image. ``count`` is the image number as provided by the
        camera. ``queued_count`` is the number of frames the camera still has
        to process (i.e. need to be send from the hardware). ``t`` is the
        image timestamp.
        """
        data, fmt, (w, h), count, queued_count, t = msg_value
        img = Image(plane_buffers=[data], pix_fmt=fmt, size=(w, h))
        return img, count, queued_count, t
Пример #11
0
def create_test_image(width: int = 100, height: int = 100) -> Image:
    size = width * height * 3
    buf = bytearray([int(x * 255 / size) for x in range(size)])
    img = Image(plane_buffers=[buf], pix_fmt='bgr24', size=(width, height))

    return img
Пример #12
0
    def generate_movie(self,
                       filename,
                       out_fmt='yuv420p',
                       codec='libx264',
                       lib_opts={'crf': '0'},
                       video_fmt='mp4',
                       start=None,
                       end=None,
                       canvas_size=(0, 0),
                       canvas_size_hint=(1, 1),
                       projector_pos=(0, 0),
                       projector_pos_hint=(None, None),
                       paint_funcs=(),
                       stimulation_transparency=1.,
                       lum=1.,
                       speed=1.,
                       hidden_shapes=None):
        from kivy.graphics import (Canvas, Translate, Fbo, ClearColor,
                                   ClearBuffers, Scale)
        from kivy.core.window import Window

        rate = float(self.view_controller.frame_rate)
        rate_int = int(rate)
        if rate != rate_int:
            raise ValueError('Frame rate should be integer')
        orig_w, orig_h = (self.view_controller.screen_width,
                          self.view_controller.screen_height)

        canvas_w, canvas_h = canvas_size
        cv_hint_w, cv_hint_h = canvas_size_hint
        w = int(canvas_w if cv_hint_w is None else orig_w * cv_hint_w)
        h = int(canvas_h if cv_hint_h is None else orig_h * cv_hint_h)

        projector_x, projector_y = projector_pos
        projector_hint_x, projector_hint_y = projector_pos_hint
        x = int(projector_x if projector_hint_x is None else orig_w *
                projector_hint_x)
        y = int(projector_y if projector_hint_y is None else orig_h *
                projector_hint_y)

        Window.size = w, h
        intensities = self.shapes_intensity

        n = len(intensities[next(iter(intensities.keys()))])
        if start is not None:
            start = int(start * rate)
            if start >= n:
                raise Exception('Start time is after the end of the data')
        else:
            start = 0

        if end is not None:
            end = int(math.ceil(end * rate)) + 1
            if end <= start:
                raise Exception('End time is before or at the start time')
        else:
            end = n

        stream = {
            'pix_fmt_in': 'rgba',
            'pix_fmt_out': out_fmt,
            'width_in': w,
            'height_in': h,
            'width_out': w,
            'height_out': h,
            'codec': codec,
            'frame_rate': (int(speed * rate_int), 1)
        }
        writer = MediaWriter(filename, [stream],
                             fmt=video_fmt,
                             lib_opts=lib_opts)

        fbo = Fbo(size=(w, h), with_stencilbuffer=True)
        with fbo:
            ClearColor(0, 0, 0, 1)
            ClearBuffers()
            Scale(1, -1, 1)
            Translate(0, -h, 0)

        config = {
            'canvas': fbo,
            'pos': (x, y),
            'size': (w, h),
            'orig_size': (orig_w, orig_h),
            'rate': rate
        }
        paint_funcs = [func(config) for func in paint_funcs]
        paint_funcs = [func for func in paint_funcs if func is not None]

        fbo.draw()
        img = Image(plane_buffers=[fbo.pixels], pix_fmt='rgba', size=(w, h))
        writer.write_frame(img, 0.)

        fbo.add(Translate(x, y))
        shape_views = self.stage_factory.get_shapes_gl_color_instructions(
            fbo, 'stage_replay')
        fbo.add(Translate(-x, -y))

        pbar = tqdm(total=(end - 1 - start) / rate,
                    file=sys.stdout,
                    unit='second',
                    unit_scale=1)

        # all shapes listed in intensities must be in shape_views. However,
        # we don't want to show shapes not given values in intensities or if
        # they are to be hidden
        unused_shapes = set(shape_views) - set(intensities)
        unused_shapes.update(set(hidden_shapes or []))
        for name in unused_shapes:
            if name in shape_views:
                shape_views[name].rgba = 0, 0, 0, 0

        for i in range(start, end):
            pbar.update(1 / rate)
            for name, intensity in intensities.items():
                r, g, b, a = intensity[i]
                if name in unused_shapes:
                    a = 0
                shape_views[name].rgba = \
                    r * lum, g * lum, b * lum, a * stimulation_transparency

            try:
                for func in paint_funcs:
                    func(i)
            except EndOfDataException:
                break

            fbo.draw()
            img = Image(plane_buffers=[fbo.pixels],
                        pix_fmt='rgba',
                        size=(w, h))
            writer.write_frame(img, (i - start + 1) / (rate * speed))
        pbar.close()
Пример #13
0
    def play_thread_run(self):
        process_frame = self.process_frame
        c = None
        ffmpeg_fmts = self.ffmpeg_pix_map

        try:
            ip = list(map(int, self.ip.split('.'))) if self.ip else None
            c = Camera(serial=self.serial or None, ip=ip)
            c.connect()

            started = False
            # use_rt = self.use_real_time
            count = 0
            ivl_start = 0

            c.start_capture()
            while self.play_state != 'stopping':
                try:
                    c.read_next_image()
                except Exception as e:
                    self.exception(e)
                    continue
                if not started:
                    ivl_start = clock()
                    self.setattr_in_kivy_thread('ts_play', ivl_start)
                    Clock.schedule_once(self.complete_start)
                    started = True
                    self._camera = c

                ivl_end = clock()
                if ivl_end - ivl_start >= 1.:
                    real_rate = count / (ivl_end - ivl_start)
                    self.setattr_in_kivy_thread('real_rate', real_rate)
                    count = 0
                    ivl_start = ivl_end

                count += 1
                self.increment_in_kivy_thread('frames_played')

                image = c.get_current_image()
                pix_fmt = image['pix_fmt']
                if pix_fmt not in ffmpeg_fmts:
                    raise Exception('Pixel format {} cannot be converted'.
                                    format(pix_fmt))
                ff_fmt = ffmpeg_fmts[pix_fmt]
                if ff_fmt == 'yuv444p':
                    buff = image['buffer']
                    img = Image(
                        plane_buffers=[buff[1::3], buff[0::3], buff[2::3]],
                        pix_fmt=ff_fmt, size=(image['cols'], image['rows']))
                elif pix_fmt == 'yuv411':
                    raise ValueError('yuv411 is not currently supported')
                else:
                    img = Image(
                        plane_buffers=[image['buffer']], pix_fmt=ff_fmt,
                        size=(image['cols'], image['rows']))

                process_frame(img, {'t': ivl_end})
        except Exception as e:
            self.exception(e)
        finally:
            self._camera = None

            try:
                c.disconnect()
            finally:
                Clock.schedule_once(self.complete_stop)
Пример #14
0
    def play_thread_run(self):
        chan = None
        try:
            process_frame = self.process_frame
            paths = list(pybarst.dep_bins)
            if hasattr(sys, '_MEIPASS'):
                paths.append(sys._MEIPASS)

            barst_bin = None
            for p, f in itertools.product(paths, ('Barst64.exe', 'Barst.exe')):
                fname = join(abspath(p), f)
                if isfile(fname):
                    barst_bin = fname
                    break

            local = not self.remote_computer_name
            name = self.remote_computer_name if not local else '.'
            pipe_name = self.pipe_name
            full_name = r'\\{}\pipe\{}'.format(name, pipe_name)

            img_fmt = self.pixel_fmt
            ffmpeg_pix_fmt = self.image_fmts[img_fmt]
            w, h = self.video_fmts[self.video_fmt]
            video_fmt = self.video_fmt
            port = self.port

            if self.barst_server is None:
                self.barst_server = BarstServer(barst_path=barst_bin,
                                                pipe_name=full_name)

            server = self.barst_server
            server.open_server()

            chan = RTVChannel(chan=port,
                              server=server,
                              video_fmt=video_fmt,
                              frame_fmt=img_fmt,
                              luma_filt=img_fmt == 'gray',
                              lossless=True)

            chan.open_channel()
            try:
                chan.close_channel_server()
            except Exception:
                pass
            chan.open_channel()
            chan.set_state(True)

            started = False
            # use_rt = self.use_real_time
            count = 0
            ivl_start = 0

            while self.play_state != 'stopping':
                ts, buf = chan.read()
                if not started:
                    ivl_start = clock()
                    self.setattr_in_kivy_thread('ts_play', ivl_start)
                    Clock.schedule_once(self.complete_start)
                    started = True

                ivl_end = clock()
                if ivl_end - ivl_start >= 1.:
                    real_rate = count / (ivl_end - ivl_start)
                    self.setattr_in_kivy_thread('real_rate', real_rate)
                    count = 0
                    ivl_start = ivl_end

                count += 1
                self.increment_in_kivy_thread('frames_played')

                img = Image(plane_buffers=[buf],
                            pix_fmt=ffmpeg_pix_fmt,
                            size=(w, h))
                process_frame(img, {'t': ts})
        except Exception as e:
            self.exception(e)
        finally:
            try:
                if chan is not None:
                    chan.close_channel_server()
            finally:
                Clock.schedule_once(self.complete_stop)
Пример #15
0
video = './../video/1x01.mkv'
save_dir = './../frame/1x01/'

# create image
w, h = 1280, 720
fmt = 'rgb24'
codec = 'png'  # we'll encode it using the tiff codec
out_opts = {
    'pix_fmt_in': fmt,
    'width_in': w,
    'height_in': h,
    'frame_rate': (30, 1),
    'codec': codec
}

player = MediaPlayer(video)
val = ''
while val != 'eof':
    frame, val = player.get_frame()
    if val != 'eof' and frame is not None:
        img, t = frame
        frame_file = 'frame' + str(int(t)) + '.png'
        file = save_dir + frame_file
        print(str(t))
        buf = img.to_bytearray()
        img = Image(plane_buffers=[buf[0]], pix_fmt=fmt, size=(w, h))
        writer = MediaWriter(file, [out_opts])
        writer.write_frame(img=img, pts=0, stream=0)
        writer.close()
def main(_):
    with tf.Session() as sess:
        config = get_config(FLAGS)
        env = MyEnvironment(config)
        agent = Agent(config, env, sess)

        scale = 1
        # 1. first probe file, get metadata
        in_file = config.input_name
        out_file = config.output_name

        convert_num = -1
        ff_opts = {
            'out_fmt': 'yuv444p',
            'framedrop': False,
            'an': True,
            'sn': True,
        }
        player = MediaPlayer(in_file, ff_opts=ff_opts)
        # must wait for probe result, strange
        while player.get_metadata()['src_vid_size'] == (0, 0):
            time.sleep(0.01)
        meta = player.get_metadata()
        width = meta['src_vid_size'][0]
        height = meta['src_vid_size'][1]
        width_out = width * scale
        height_out = height * scale

        out_opts = {
            'pix_fmt_in': 'yuv444p',
            'pix_fmt_out': 'yuv420p',
            'width_in': width_out,
            'height_in': height_out,
            'frame_rate': meta['frame_rate'],
            'codec': 'libx264',
            #'acpect': '4:3',
        }
        lib_opts = {
            # config for BT.2020 HDR10
            # 'x265-params': 'range=pc:colorprim=bt2020:transfer=smpte2084:colormatrix=bt2020nc:crf=15',

            # config for x264 to encode video
            'x264-params': 'crf=15',
        }
        writer = MediaWriter(out_file, [out_opts],
                             lib_opts=lib_opts,
                             overwrite=True)

        frame_count = 0
        start_timestamp = 0
        while True:

            frame, val = player.get_frame()
            if val == 'eof':
                print('end of video')
                break
            elif frame is None:
                time.sleep(0.01)
            else:
                t1 = time.time() * 1000
                img, t = frame
                if frame_count == 0:
                    start_timestamp = t
                bufs = img.to_bytearray()
                assert len(bufs) >= 3

                Y = np.frombuffer(bufs[0], dtype=np.uint8)
                U = np.frombuffer(bufs[1], dtype=np.uint8)
                V = np.frombuffer(bufs[2], dtype=np.uint8)

                input_YUV = cv2.merge([Y, U, V])
                img = cv2.cvtColor(input_YUV, cv2.COLOR_YUV2RGB)
                img = np.array(img).reshape(height, width, 3)

                outputImg = agent.test_video(img)

                out = np.array(outputImg).reshape(height_out * width_out, 1, 3)
                YUV = cv2.cvtColor(out, cv2.COLOR_RGB2YUV)

                (Y, U, V) = cv2.split(YUV)

                bufs = []
                bufs.append(Y.tobytes())
                bufs.append(U.tobytes())
                bufs.append(V.tobytes())
                outputImg = Image(plane_buffers=bufs,
                                  pix_fmt='yuv444p',
                                  size=(width_out, height_out))
                t = t - start_timestamp
                writer.write_frame(img=outputImg, pts=t, stream=0)

                t2 = time.time() * 1000
                frame_count += 1
                if (frame_count % 30 == 0):
                    print('convert frame # ', frame_count)
                #print('--pts:', t)
                if frame_count >= convert_num > 0:
                    break
                # if frame_count >= 1800:
                #     break
                # print("time: ", time.time()*1000-tt)

        player.close_player()
        writer.close()