示例#1
0
文件: ffmpeg.py 项目: suzg/faceswap-1
    def _get_writer(self, frame_dims: Tuple[int]) -> Generator[None, np.ndarray, None]:
        """ Add the requested encoding options and return the writer.

        Parameters
        ----------
        frame_dims: tuple
            The (rows, colums) shape of the input image

        Returns
        -------
        generator
            The imageio ffmpeg writer
        """
        audio_codec = self._audio_codec
        audio_path = None if audio_codec is None else self._source_video
        logger.debug("writer config: %s, audio_path: '%s'", self.config, audio_path)

        retval = im_ffm.write_frames(self._output_filename,
                                     size=(frame_dims[1], frame_dims[0]),
                                     fps=self._video_fps,
                                     quality=None,
                                     codec=self.config["codec"],
                                     macro_block_size=8,
                                     ffmpeg_log_level="error",
                                     ffmpeg_timeout=10,
                                     output_params=self._output_params,
                                     audio_path=audio_path,
                                     audio_codec=audio_codec)
        logger.debug("FFMPEG Writer created: %s", retval)
        retval.send(None)

        return retval
示例#2
0
def test_write1():

    for n in (1, 9, 14, 279, 280, 281):

        # Prepare for writing
        gen = imageio_ffmpeg.write_frames(test_file2, (64, 64))
        assert isinstance(gen, types.GeneratorType)
        gen.send(None)  # seed

        # Write n frames
        for i in range(n):
            data = bytes([min(255, 100 + i * 10)] * 64 * 64 * 3)
            gen.send(data)
        gen.close()

        # Check that number of frames is correct
        nframes, nsecs = imageio_ffmpeg.count_frames_and_secs(test_file2)
        assert nframes == n

        # Check again by actually reading
        gen2 = imageio_ffmpeg.read_frames(test_file2)
        gen2.__next__()  # == meta
        count = 0
        for frame in gen2:
            count += 1
        assert count == n
def processvideo(file):
    print('processing ' + file)
    cap = cv2.VideoCapture(file)
    # cap.get(3) width and cap.get(4) is heigh
    if int(args.framelimit) >= 0:
        if args.outputfile.startswith('udp') or args.outputfile.startswith(
                'rtsp'):
            print("Exporting to stream: " + args.outputfile)
            writer = imageio.write_frames(args.outputfile,
                                          (int(cap.get(3)), int(cap.get(4))),
                                          output_params=['-f', 'mpegts'])
        else:
            print("Exporting to file " + args.outputfile)
            writer = imageio.write_frames(args.outputfile,
                                          (int(cap.get(3)), int(cap.get(4))))
        writer.send(None)
    frame_counter = 0
    while (cap.isOpened()):
        frame_counter = frame_counter + 1
        # check limits of processed frames
        if int(args.framelimit) > 0 and frame_counter > int(
                args.framestart) + int(args.framelimit):
            print('Processed ' + args.framelimit + 'ending...')
            break
        # read input frame
        ret, frame = cap.read()
        # throttle control
        if frame_counter % int(args.fpsthrottle) != 0:
            continue
        # check frame start
        if int(args.framestart) > 0 and frame_counter < int(args.framestart):
            break
        if ret == True:
            if not frame is None:
                print('Detecting objects in frame ' + str(frame_counter),
                      end="\r")
                image = detect(frame)
                #writer.send(frame)
                writer.send(image)
            else:
                print('Frame error in frame ' + str(frame_counter))
        else:
            break
    cap.release()
    writer.close()
示例#4
0
 def _write_frames(pixfmt, bpp, tout):
     gen = imageio_ffmpeg.write_frames(test_file2, (2048, 2048),
                                       pix_fmt_in=pixfmt,
                                       ffmpeg_timeout=tout)
     gen.send(None)  # seed
     for i in range(n):
         data = (255 * np.random.rand(2048 * 2048 * bpp)).astype(np.uint8)
         data = bytes(data)
         gen.send(data)
     gen.close()
def encode_video(video: np.ndarray,
                 output_path: str,
                 fps: float,
                 bitrate: str = "0",
                 crf: int = 20) -> str:
    """Encode a video with vp9 codec via imageio-ffmpeg

    Parameters
    ----------
    video : np.ndarray
        Video to be encoded
    output_path : str
        Desired output path for encoded video
    fps : float
        Desired frame rate for encoded video
    pix_fmt_out : str
        Desired pixel format for output encoded video, by default "yuv420p"
    bitrate : str, optional
        Desired bitrate of output, by default "0". The default *MUST*
        be zero in order to encode in constant quality mode. Other values
        will result in constrained quality mode.
    crf : int, optional
        Desired perceptual quality of output, by default 20. Value can
        be from 0 - 63. Lower values mean better quality (but bigger video
        sizes).

    Returns
    -------
    str
        Output path of the encoded video
    """

    # ffmpeg expects video shape in terms of: (width, height)
    video_shape = (video[0].shape[1], video[0].shape[0])

    writer = mpg.write_frames(output_path,
                              video_shape,
                              pix_fmt_in="gray8",
                              pix_fmt_out="yuv420p",
                              codec="libvpx-vp9",
                              fps=fps,
                              bitrate=bitrate,
                              output_params=["-crf", str(crf)])

    writer.send(None)  # Seed ffmpeg-imageio writer generator
    for frame in video:
        writer.send(frame)
    writer.close()

    return output_path
示例#6
0
def test_write_wmv():
    # Switch to MS friendly codec when writing .wmv files

    for ext, codec in [("", "h264"), (".wmv", "msmpeg4")]:
        fname = test_file2 + ext
        gen = imageio_ffmpeg.write_frames(fname, (64, 64))
        gen.send(None)  # seed
        for i in range(9):
            data = bytes([min(255, 100 + i * 10)] * 64 * 64 * 3)
            gen.send(data)
        gen.close()
        #
        meta = imageio_ffmpeg.read_frames(fname).__next__()
        assert meta["codec"].startswith(codec)
示例#7
0
async def write_video_to_stream(path_to_video, path_to_socket):
    video_reader = imageio.get_reader(path_to_video)
    sock = await create_socket(path_to_socket)
    _, socket_writer = await asyncio.open_unix_connection(sock=sock)
    img_writer = None
    for frame in video_reader:
        if img_writer is None:
            img_writer = imageio_ffmpeg.write_frames(socket_writer,
                                                     size=(frame.shape[1],
                                                           frame.shape[0]))
            img_writer.send(None)
        img_writer.send(frame)
        await socket_writer.drain()
    img_writer.close()
    socket_writer.close()
def test_write_close():

    for i in range(N):
        pids0 = get_ffmpeg_pids()
        w = imageio_ffmpeg.write_frames(test_file2, (64, 64))
        pids1 = get_ffmpeg_pids().difference(
            pids0)  # generator has not started
        w.send(None)
        w.send(b"x" * 64 * 64 * 3)
        pids2 = get_ffmpeg_pids().difference(pids0)  # now ffmpeg is running
        w.close()
        pids3 = get_ffmpeg_pids().difference(pids0)  # now its not

        assert len(pids1) == 0
        assert len(pids2) == 1
        assert len(pids3) == 0
示例#9
0
def test_write_pix_fmt_in():

    sizes = []
    for pixfmt, bpp in [("gray", 1), ("rgb24", 3), ("rgba", 4)]:
        # Prepare for writing
        gen = imageio_ffmpeg.write_frames(test_file2, (64, 64), pix_fmt_in=pixfmt)
        gen.send(None)  # seed
        for i in range(9):
            data = bytes([min(255, 100 + i * 10)] * 64 * 64 * bpp)
            gen.send(data)
        gen.close()
        with open(test_file2, "rb") as f:
            sizes.append(len(f.read()))
        # Check nframes
        nframes, nsecs = imageio_ffmpeg.count_frames_and_secs(test_file2)
        assert nframes == 9
示例#10
0
def test_write_quality():

    sizes = []
    for quality in [2, 5, 9]:
        # Prepare for writing
        gen = imageio_ffmpeg.write_frames(test_file2, (64, 64), quality=quality)
        gen.send(None)  # seed
        for i in range(9):
            data = bytes([min(255, 100 + i * 10)] * 64 * 64 * 3)
            gen.send(data)
        gen.close()
        with open(test_file2, "rb") as f:
            sizes.append(len(f.read()))
        # Check nframes
        nframes, nsecs = imageio_ffmpeg.count_frames_and_secs(test_file2)
        assert nframes == 9

    assert sizes[0] < sizes[1] < sizes[2]
示例#11
0
def test_write_pix_fmt_out():

    sizes = []
    for pixfmt in ["gray", "yuv420p"]:
        # Prepare for writing
        gen = imageio_ffmpeg.write_frames(test_file2, (64, 64), pix_fmt_out=pixfmt)
        gen.send(None)  # seed
        for i in range(9):
            data = bytes([min(255, 100 + i * 10)] * 64 * 64 * 3)
            gen.send(data)
        gen.close()
        with open(test_file2, "rb") as f:
            sizes.append(len(f.read()))
        # Check nframes
        nframes, nsecs = imageio_ffmpeg.count_frames_and_secs(test_file2)
        assert nframes == 9

    assert sizes[0] < sizes[1]
示例#12
0
def test_write_macro_block_size():

    frame_sizes = []
    for mbz in [None, 10]:  # None is default == 16
        # Prepare for writing
        gen = imageio_ffmpeg.write_frames(test_file2, (40, 50), macro_block_size=mbz)
        gen.send(None)  # seed
        for i in range(9):
            data = bytes([min(255, 100 + i * 10)] * 40 * 50 * 3)
            gen.send(data)
        gen.close()
        # Check nframes
        nframes, nsecs = imageio_ffmpeg.count_frames_and_secs(test_file2)
        assert nframes == 9
        # Check size
        meta = imageio_ffmpeg.read_frames(test_file2).__next__()
        frame_sizes.append(meta["size"])

    assert frame_sizes[0] == (48, 64)
    assert frame_sizes[1] == (40, 50)
示例#13
0
def test_write_bitrate():

    # Mind that we send uniform images, so the difference is marginal

    sizes = []
    for bitrate in ["1k", "10k", "100k"]:
        # Prepare for writing
        gen = imageio_ffmpeg.write_frames(test_file2, (64, 64), bitrate=bitrate)
        gen.send(None)  # seed
        for i in range(9):
            data = bytes([min(255, 100 + i * 10)] * 64 * 64 * 3)
            gen.send(data)
        gen.close()
        with open(test_file2, "rb") as f:
            sizes.append(len(f.read()))
        # Check nframes
        nframes, nsecs = imageio_ffmpeg.count_frames_and_secs(test_file2)
        assert nframes == 9

    assert sizes[0] < sizes[1] < sizes[2]
示例#14
0
def processvideo(file):
    cap = cv2.VideoCapture(file)

    writer = imageio.write_frames(args.outputfile, (int(cap.get(3)), int(cap.get(4))))
    writer.send(None)
    frame_counter = 0
    while(cap.isOpened()):
        frame_counter = frame_counter + 1
        ret, frame = cap.read()
        print('Detecting objects in frame ' + str(frame_counter))
        if ret==True:
            if not frame is None:
                image = detect(frame)
                writer.send(frame)
            else:
                print('Frame error in frame ' + str(frame_counter))
        else:
            break
    cap.release()
    writer.close()
示例#15
0
def test_write_audio_path():
    # Provide an audio

    gen = imageio_ffmpeg.write_frames(test_file2, (64, 64),
                                      audio_path=test_file3,
                                      audio_codec="aac")
    gen.send(None)  # seed
    for i in range(9):
        data = bytes([min(255, 100 + i * 10)] * 64 * 64 * 3)
        gen.send(data)
    gen.close()
    # Check nframes
    nframes, nsecs = imageio_ffmpeg.count_frames_and_secs(test_file2)
    assert nframes == 9
    # Check size
    meta = imageio_ffmpeg.read_frames(test_file2).__next__()
    audio_codec = meta["audio_codec"]

    assert nframes == 9
    assert audio_codec == "aac"
示例#16
0
 def write(self, frame: np.ndarray):
     assert len(frame.shape) == 3
     assert frame.shape[2] == 3
     assert frame.dtype == np.uint8
     frame_size = (frame.shape[1], frame.shape[0])
     if self.out is None:
         self.path.parent.mkdir(exist_ok=True, parents=True)
         self.frame_size = frame_size
         self.out = imageio_ffmpeg.write_frames(
             str(self.path),
             self.frame_size,
             fps=self.fps,
             codec=self.codec,
             macro_block_size=1,
             ffmpeg_log_level='error',
         )
         self.out.send(None)
     else:
         assert self.frame_size == frame_size, f"Wrong frame size: should be {self.frame_size}, got {frame_size}"
     if self.bgr2rgb:
         frame = frame[:, :, (2, 1, 0)]
     self.out.send(np.ascontiguousarray(frame))
def write_video_to_stream(path_to_video, rtp_port):
    video_reader = imageio.get_reader(path_to_video)
    print(video_reader.get_meta_data())
    rtp_url = f"rtp://localhost:{rtp_port}"
    output_params = ['-f', 'rtp_mpegts']
    input_params = ['-re']
    img_writer = imageio_ffmpeg.write_frames(
        rtp_url,
        size=video_reader.get_meta_data()['size'],
        output_params=output_params,
        input_params=input_params,
        fps=24,
        codec='mpeg4',
        bitrate='1024K')
    img_writer.send(None)
    for frame in video_reader:
        try:
            img_writer.send(frame)
        except KeyboardInterrupt as e:
            img_writer.close()
            break
    img_writer.close()
示例#18
0
def write_video_to_stream(path_to_video, rtp_port):
    video_reader = imageio.get_reader(path_to_video)
    print(video_reader.get_meta_data())
    rtp_url = f"http://localhost:{rtp_port}"
    output_params = [ '-f', 'webm', '-listen', "1", '-seekable', '0',
        '-headers', 'Access-Control-Allow-Origin: http://localhost:3000\r\n']
    input_params = ['-re']
    img_writer = imageio_ffmpeg.write_frames(
            rtp_url,
            size=video_reader.get_meta_data()['size'],
            output_params=output_params,
            input_params=input_params,
            codec='vp9',
    )
    img_writer.send(None)
    for frame in video_reader:
        #try:
            img_writer.send(frame)
        #except KeyboardInterrupt as e:
        #    print(e)
        #    img_writer.close()
        #    break
    img_writer.close()
示例#19
0
            break
    cap.release()
    writer.close()

# Doing some Object Detection on a video
classes = None

with open(args.classes, 'r') as f:
    classes = [line.strip() for line in f.readlines()]
COLORS = np.random.uniform(0, 255, size=(len(classes), 3))

if args.input.startswith('rtsp'):

    cap = cv2.VideoCapture(args.input)
    if int(args.framelimit) > 0:
        writer = imageio.write_frames(args.outputfile, (int(cap.get(3)), int(cap.get(4))))
        writer.send(None)
    frame_counter = 0
    while(True):
        if int(args.framelimit) > 0 and frame_counter > int(args.framestart) + int(args.framelimit):
            writer.close()
            break

        if frame_counter % int(args.fpsthrottle) ==0:
            ret, frame = cap.read()
            if ret and frame_counter >= int(args.framestart):
                print('Detecting objects in frame ' + str(frame_counter))
                frame = detect(frame)
                if int(args.framelimit) > 0:
                    writer.send(frame)
            else:
示例#20
0
def style_video(video_path,
                style,
                output_name=None,
                alpha=1.0,
                preserve_color=False,
                custom_resolution=None):
    toTensor = ToTensor()

    reader = imageio.get_reader(video_path)
    resolution = None

    if custom_resolution is None:
        resolution = reader.get_meta_data()["source_size"]
    else:
        resolution = custom_resolution
    fps = reader.get_meta_data()["fps"]

    style = transform.resize(style, resolution)

    if not preserve_color:
        style_tensor = torch.tensor(np.expand_dims(toTensor(style),
                                                   0)).float().to(device)
        style_tensor = net.encode(style_tensor)

    frame_number = 0
    driving_video = []
    for frame_number in tqdm(range(reader.get_meta_data()['nframes'])):
        try:
            content = reader.get_next_data()
        except imageio.core.CannotReadFrameError:
            break
        except IndexError:
            break
        else:
            if custom_resolution is not None:
                content = transform.resize(content, list(resolution)[::-1])
            else:
                content = content.astype(float) / 255
            if preserve_color:
                style_tensor = preserve_color_stable(content, style)
                style_tensor = torch.tensor(
                    np.expand_dims(toTensor(style_tensor),
                                   0)).float().to(device)
                style_tensor = net.encode(style_tensor)

            x = torch.tensor(toTensor(content)).unsqueeze(0).float().to(device)
            x = net.encode(x)
            result = adaIN(x, style_tensor)
            result = (1 - alpha) * x + alpha * result
            result = test(content, result, alpha, False, False)
            result = cv2.convertScaleAbs(result * 255)
            driving_video.append(result)
    reader.close()

    if output_name == None:
        return driving_video

    writer = imageio_ffmpeg.write_frames(f'{output_name}.mp4', (resolution),
                                         fps=fps,
                                         macro_block_size=1)
    writer.send(None)  # seed the generator
    for frame in driving_video:
        writer.send(frame)
    writer.close()
示例#21
0
            prediction = model(in_data)
            torch.cuda.synchronize()
            end = time.time()
            times.append(end - start)
            # store video frame(s)
            x = in_data[:, data.N_FRAMES, 0:3, :, :] if 'reproj' in args.type else in_data[:, 0:3, :, :]
            p = prediction[:, 0:3, :, :]
            y = target[:, 0:3, :, :]
            # postprocess
            frame = (torch.sqrt(torch.clamp(torch.cat((x, p, y), dim=-1), 0, 1)) * 255).to(torch.uint8)
            frame = frame.transpose_(-3, -1).transpose_(-3, -2)
            video[args.batch_size*idx:args.batch_size*idx+p.size(0)] = frame.cpu().numpy()
            # write images to disk?
            if args.images:
                img = torch.cat((x, p, y), dim=-1) if args.cmp else p
                data.write([f'{name}/{name}_{args.batch_size*idx+j:06}.{args.format}' for j in range(frame.size(0))], img.cpu().numpy())
            tq.update(args.batch_size)
        tq.close()

    print(f'avg inference time (in s):', np.array(times).mean(), 'std:', np.array(times).std())

    # write video
    ffmpeg = imageio_ffmpeg.write_frames(f'{name}/{name}.mp4', (3*sample.shape[-1], sample.shape[-2]), fps=args.fps, quality=5)
    ffmpeg.send(None) # seed
    ffmpeg.send(video)
    ffmpeg.close()
    print(f'{name}/{name}.mp4 written.')
    # make sure images were written
    data.pool.close()
    data.pool.join()
示例#22
0
# %%  Write a series of large frames

# In earlier versions of imageio-ffmpeg, the ffmpeg process was given a timeout
# to complete, but this timeout must be longer for longer movies. The default
# is now to wait for ffmpeg.

import os
import numpy as np
import imageio_ffmpeg

ims = [
    np.random.uniform(0, 255, size=(1000, 1000, 3)).astype(np.uint8) for i in range(10)
]

filename = os.path.expanduser("~/Desktop/foo.mp4")
w = imageio_ffmpeg.write_frames(filename, (1000, 1000), ffmpeg_timeout=0)
w.send(None)
for i in range(200):
    w.send(ims[i % 10])
    print(i)
w.close()


# %% Behavior of KeyboardInterrupt / Ctrl+C

import os
import imageio_ffmpeg

filename = os.path.expanduser("~/.imageio/images/cockatoo.mp4")
reader = imageio_ffmpeg.read_frames(filename)
示例#23
0
def OpenWriter(cam_params):
    n_cam = cam_params["n_cam"]

    folder_name = os.path.join(cam_params["videoFolder"],
                               cam_params["cameraName"])
    if cam_params["cameraMake"] == "emu":
        cam_params["videoFilename"] = "emu" + cam_params["videoFilename"]
    full_file_name = os.path.join(folder_name, cam_params["videoFilename"])

    if not os.path.isdir(folder_name):
        os.makedirs(folder_name)
        print('Made directory {}.'.format(folder_name))

    # Load defaults
    pix_fmt_out = cam_params["pixelFormatOutput"]
    codec = cam_params["codec"]
    gpu_params = []

    # CPU compression
    if cam_params["gpuID"] == -1:
        print('Opened: {} using CPU to compress the stream.'.format(
            full_file_name))
        if pix_fmt_out == 'rgb0':
            pix_fmt_out = 'yuv420p'
        if cam_params["codec"] == 'h264':
            codec = 'libx264'
        elif cam_params["codec"] == 'h265':
            codec = 'libx265'
        gpu_params = [
            '-r:v',
            str(cam_params["frameRate"]),
            '-preset',
            'fast',
            '-tune',
            'fastdecode',
            '-crf',
            cam_params["quality"],
            '-bufsize',
            '20M',
            '-maxrate',
            '10M',
            '-bf:v',
            '4',
            '-vsync',
            '0',
        ]

    # GPU compression
    else:
        print('Opened: {} using GPU {} to compress the stream.'.format(
            full_file_name, cam_params["gpuID"]))
        if cam_params["gpuMake"] == 'nvidia':
            if cam_params["codec"] == 'h264':
                codec = 'h264_nvenc'
            elif cam_params["codec"] == 'h265':
                codec = 'hevc_nvenc'
            gpu_params = [
                '-r:v',
                str(cam_params["frameRate"]
                    ),  # important to play nice with vsync '0'
                '-preset',
                'fast',  # set to 'fast', 'llhp', or 'llhq' for h264 or hevc
                '-qp',
                cam_params["quality"],
                '-bf:v',
                '0',
                '-vsync',
                '0',
                '-2pass',
                '0',
                '-gpu',
                str(cam_params["gpuID"]),
            ]
        elif cam_params["gpuMake"] == 'amd':
            if pix_fmt_out == 'rgb0':
                pix_fmt_out = 'yuv420p'
            if cam_params["codec"] == 'h264':
                codec = 'h264_amf'
            elif cam_params["codec"] == 'h265':
                codec = 'hevc_amf'
            gpu_params = [
                '-r:v',
                str(cam_params["frameRate"]),
                '-usage',
                'lowlatency',
                '-rc',
                'cqp',  # constant quantization parameter
                '-qp_i',
                cam_params["quality"],
                '-qp_p',
                cam_params["quality"],
                '-qp_b',
                cam_params["quality"],
                '-bf:v',
                '0',
                '-hwaccel',
                'auto',
                '-hwaccel_device',
                str(cam_params["gpuID"]),
            ]
        elif cam_params["gpuMake"] == 'intel':
            if pix_fmt_out == 'rgb0':
                pix_fmt_out = 'nv12'
            if cam_params["codec"] == 'h264':
                codec = 'h264_qsv'
            elif cam_params["codec"] == 'h265':
                codec = 'hevc_qsv'
            gpu_params = [
                '-r:v',
                str(cam_params["frameRate"]),
                '-bf:v',
                '0',
            ]

    # Initialize writer object (imageio-ffmpeg)
    while (True):
        try:
            try:
                writer = write_frames(
                    full_file_name,
                    [cam_params["frameWidth"], cam_params["frameHeight"]
                     ],  # size [W,H]
                    fps=cam_params["frameRate"],
                    quality=None,
                    codec=codec,
                    pix_fmt_in=cam_params[
                        "pixelFormatInput"],  # 'bayer_bggr8', 'gray', 'rgb24', 'bgr0', 'yuv420p'
                    pix_fmt_out=pix_fmt_out,
                    bitrate=None,
                    ffmpeg_log_level=cam_params[
                        "ffmpegLogLevel"],  # 'warning', 'quiet', 'info'
                    input_params=['-an'],  # '-an' no audio
                    output_params=gpu_params,
                )
                writer.send(None)  # Initialize the generator
                break
            except Exception as e:
                logging.error('Caught exception: {}'.format(e))
                time.sleep(0.1)

        except KeyboardInterrupt:
            break

    return writer
示例#24
0
def make_movie(simulation,
               dynstate,
               name: str,
               pfilm=5,
               fps=24,
               callback=None):
    """
    Makes a .mp4 movie of simulation from the history saved in dynstate.

    Parameters
    ----------
    simulation : Simulation
        The simulation object
    dynstate : DynState
        The dynState object containing the position history file.
    name : str
        The path (and name) of the file to be writen.
    pfilm : int
        To make the movie, takes every pfilm position.
    fps : int
        The number of frame per seconds of the film.
    callback : function
        An optional callback function that is called every time a
        frame is made and is passed the current iteration number.

    Returns
    -------

    """
    # ouverture pour la lecture
    npas = simulation.current_iter
    YlimB = simulation.model.y_lim_inf
    YlimH = simulation.model.y_lim_sup
    XlimG = simulation.model.x_lim_inf
    XlimD = simulation.model.x_lim_sup
    if not name.endswith(".mp4"):
        name += ".mp4"
    with dynstate.open(dynstate.POS_H, 'r') as fix:
        # liste de k ou tracer le graph
        klist = set(range(0, npas, pfilm))
        # boucle pour creer le film
        figure_size = (1920, 1088)
        try:
            os.remove(name)
        except FileNotFoundError:
            pass
        gen = write_frames(name, figure_size, fps=fps, quality=9)
        gen.send(None)
        fig = plt.figure(figsize=(figure_size[0] / (72 * 2),
                                  figure_size[1] / (72 * 2)))
        plt.clf()
        # definition du domaine de dessin
        plt.ioff()  # pour ne pas afficher les graphs)
        plt.axis('scaled')
        plt.ylim(YlimB, YlimH)
        plt.xlim(XlimG, XlimD)
        pos = fix.load()
        line1, = plt.plot(*pos[:simulation.model.n_a, :].T,
                          'ro',
                          markersize=0.5)
        line2, = plt.plot(*pos[simulation.model.n_a:, :].T,
                          'bo',
                          markersize=0.5)
        plt.xlabel("0")
        temp = io.BytesIO()
        for k in range(1, npas):
            # dessin a chaque pas (ne s'ouvre pas: est sauvegarde de maniere incrementale)
            if k in klist:
                temp.seek(0)
                plt.xlabel("Iteration : {}".format(k))
                plt.title(f"T = {simulation.state_fct['T'][k]:.2f} K")
                line1.set_data(*pos[:simulation.model.n_a, :].T)
                line2.set_data(*pos[simulation.model.n_a:, :].T)
                fig.savefig(temp, format='raw',
                            dpi=72 * 2)  # sauvegarde incrementale
                temp.seek(0)
                if callback: callback(k)
                gen.send(
                    Image.frombytes('RGBA', figure_size,
                                    temp.read()).convert('RGB').tobytes())
            pos = fix.load()  # on charge a chaque pas de temps
        gen.close()
        plt.close(fig)
示例#25
0
def main():
    home = '/Users/Max/'
    filename = home + '/Documents/videos/daily_dose1.mp4'
    out_filename = home + '/Documents/videos/daily_dose1_edited.mp4'

    reader = get_reader(filename)
    meta = reader.get_meta_data()

    writer = write_frames(out_filename, meta['size'], fps=meta['fps'])
    writer.send(None)

    start_offset = 4170

    looper = Looper(meta)
    looper.set_video(reader)
    looper.set_offset(start_offset)
    looper.set_loop_length(500)
    looper.set_frame_interval(5)

    looper1 = Looper(meta)
    looper1.set_video(reader)
    looper1.set_offset(start_offset - 1000)
    looper1.set_loop_length(500)
    looper1.set_frame_interval(5)

    reducer = BitReducer(meta)
    sharpen = Sharpener(meta)
    delay = VideoDelay(meta)

    net = ModuleNet()
    net.add_module(looper)
    # net.add_module(looper1)
    net.add_module(sharpen, input_names=(['Looper0'], [], []))
    net.add_module(reducer,
                   input_names=(['Sharpener0'], [], []),
                   output_names=(['OUT'], [], []))
    # net.add_module(reducer, input_names=(['Looper0'], [], []), output_names=(['OUT'], [], []))
    # net.add_module(delay, input_names=(['Looper0'], [], []), output_names=(['OUT'], [], []))
    # net.add_connection('Looper1', 'VideoDelay0', 'video')

    net.nodes['BitReducer0'].module.set_num_bits(4)
    # net.nodes['Sharpener0'].module.set_strength(7)
    # net.nodes['VideoDelay0'].module.disable()
    sharpen_max_strength = 20
    sharpen_min_strength = 10
    sharpen_delta = (sharpen_max_strength - sharpen_min_strength) / 2
    angular_period = 2 * np.pi / 30

    instrument = Instrument()
    instrument.net = net

    total_frames = 300

    start_time = timer()

    for i in range(total_frames):
        net.nodes['Sharpener0'].module.set_strength(sharpen_min_strength +
                                                    sharpen_delta +
                                                    sharpen_delta *
                                                    np.sin(angular_period * i))
        #
        # net.nodes['Looper0'].tick()
        # net.nodes['Sharpener0'].tick()
        # net.nodes['BitReducer0'].tick()
        # instrument.reset_flags()
        # print(net.nodes['Looper0'].module.current_frame)
        # if i == 100:
        #     net.nodes['VideoDelay0'].module.disable()
        # print(i)
        video_frame = instrument.tick()
        # frame = net.nodes['BitReducer0'].module.video_output_buffer
        writer.send(video_frame)
    #     # print(looper.current_frame)

    end_time = timer()

    reader.close()
    writer.close()

    print(
        f"Time taken to run {total_frames} frames: {end_time - start_time}s.")
示例#26
0
def style_video(video_path,
                style,
                output_name=None,
                alpha=1.0,
                preserve_color=False,
                custom_resolution=None,
                frame_skip=1):
    toTensor = ToTensor()

    reader = imageio.get_reader(video_path)
    resolution = None

    if custom_resolution is None:
        resolution = reader.get_meta_data()["source_size"]
    else:
        resolution = custom_resolution
    fps = reader.get_meta_data()["fps"] / frame_skip

    style = transform.resize(style, (resolution[1], resolution[0]))

    if not preserve_color:
        style_tensor = torch.tensor(np.expand_dims(toTensor(style),
                                                   0)).float().to(device)
        style_tensor = net.encode(style_tensor)

    driving_video = []
    for frame_number in tqdm(range(reader.count_frames())):
        try:
            content = reader.get_next_data()
        except IndexError:
            break
        else:
            if frame_number % frame_skip == 0:
                if custom_resolution is not None:
                    content = transform.resize(content,
                                               (resolution[1], resolution[0]))
                else:
                    content = content.astype(float) / 255
                if preserve_color:
                    style_tensor = preserve_color_stable(content, style)
                    style_tensor = torch.tensor(
                        np.expand_dims(toTensor(style_tensor),
                                       0)).float().to(device)
                    style_tensor = net.encode(style_tensor)

                x = torch.tensor(
                    toTensor(content)).unsqueeze(0).float().to(device)
                x = net.encode(x)
                result = adaIN(x, style_tensor)
                result = (1 - alpha) * x + alpha * result
                result = test(content, result, alpha, False, False)
                result = cv2.convertScaleAbs(result * 255)
                driving_video.append(result)
    reader.close()

    if output_name == None:
        return driving_video

    writer = imageio_ffmpeg.write_frames(f'{output_name}_plain.mp4',
                                         (resolution),
                                         fps=fps,
                                         macro_block_size=1)
    writer.send(None)
    for frame in driving_video:
        writer.send(frame)
    writer.close()

    os.system(
        f'ffmpeg -y -i "{video_path}" -vn -acodec copy "output_audio.aac"')
    os.system(
        f'ffmpeg -y -i "{output_name}_plain.mp4" -i "output_audio.aac" -c:v copy -c:a aac "{output_name}.mp4"'
    )
    if os.path.isfile("output_audio.aac"):
        os.remove("output_audio.aac")
    if not os.path.isfile(f"{output_name}.mp4"):
        os.rename(f"{output_name}_plain.mp4", f"{output_name}.mp4")
    else:
        os.remove(f'{output_name}_plain.mp4')