示例#1
0
    def OpenMovie(self,
                  filename,
                  framerate=24,
                  codec='libx264',
                  preset='medium'):
        """ Establishes a connection to the ffmpeg writer """

        # Attempt to load moviepy
        try:
            import moviepy.video.io.ffmpeg_writer as mwrite
        except BaseException:
            print('\n\nTo use this feature install moviepy and ffmpeg\n\n')
            import moviepy.video.io.ffmpeg_writer as mwrite

        # Create movie object and check if render window is active
        self.window_size = self.renWin.GetSize()
        if not self.window_size[0]:
            raise Exception('Run Plot first')

        self.mwriter = mwrite.FFMPEG_VideoWriter(filename,
                                                 self.window_size,
                                                 framerate,
                                                 codec=codec,
                                                 preset=preset)

        self.movietype = 'mp4'
示例#2
0
    def ffwd_video(path_in,
                   path_out,
                   checkpoint_dir,
                   device_t='/gpu:0',
                   batch_size=4):
        video_clip = VideoFileClip(path_in, audio=False)
        video_writer = ffmpeg_writer.FFMPEG_VideoWriter(path_out,
                                                        video_clip.size,
                                                        video_clip.fps,
                                                        codec="libx264",
                                                        preset="medium",
                                                        bitrate="2000k",
                                                        audiofile=path_in,
                                                        threads=None,
                                                        ffmpeg_params=None)

        g = tf.Graph()
        soft_config = tf.ConfigProto(allow_soft_placement=True)
        soft_config.gpu_options.allow_growth = True
        with g.as_default(), g.device(device_t), \
                tf.Session(config=soft_config) as sess:
            batch_shape = (batch_size, video_clip.size[1], video_clip.size[0],
                           3)
            img_placeholder = tf.placeholder(tf.float32,
                                             shape=batch_shape,
                                             name='img_placeholder')

            preds = transform.net(img_placeholder)
            saver = tf.train.Saver()
            if os.path.isdir(checkpoint_dir):
                ckpt = tf.train.get_checkpoint_state(checkpoint_dir)
                if ckpt and ckpt.model_checkpoint_path:
                    saver.restore(sess, ckpt.model_checkpoint_path)
                else:
                    raise Exception("No checkpoint found...")
            else:
                saver.restore(sess, checkpoint_dir)

            X = np.zeros(batch_shape, dtype=np.float32)

            def style_and_write(count):
                for i in range(count, batch_size):
                    X[i] = X[count - 1]  # Use last frame to fill X
                _preds = sess.run(preds, feed_dict={img_placeholder: X})
                for i in range(0, count):
                    video_writer.write_frame(
                        np.clip(_preds[i], 0, 255).astype(np.uint8))

            frame_count = 0  # The frame count that written to X
            for frame in video_clip.iter_frames():
                X[frame_count] = frame
                frame_count += 1
                if frame_count == batch_size:
                    style_and_write(frame_count)
                    frame_count = 0

            if frame_count != 0:
                style_and_write(frame_count)

            video_writer.close()
示例#3
0
def feed_forward_video(path_in, path_out, checkpoint_dir):
    # initialize video cap
    video_cap = VideoFileClip(path_in, audio=False)
    # initialize writer
    video_writer = ffmpeg_writer.FFMPEG_VideoWriter(path_out,
                                                    video_cap.size,
                                                    video_cap.fps,
                                                    codec='libx264',
                                                    preset='medium',
                                                    bitrate='2000k',
                                                    audiofile=path_in,
                                                    threads=None,
                                                    ffmpeg_params=None)

    g = tf.Graph()
    soft_config = tf.ConfigProto(allow_soft_placement=True)
    soft_config.gpu_options.allow_growth = True

    with g.as_default(), tf.Session(config=soft_config) as sess:
        batch_shape = (None, video_cap.size[1], video_cap.size[0], 3)
        img_placeholder = tf.placeholder(tf.float32,
                                         shape=batch_shape,
                                         name='img_placeholder')

        model = Transfer()
        pred = model(img_placeholder)
        saver = tf.train.Saver()

        if os.path.isdir(checkpoint_dir):
            ckpt = tf.train.get_checkpoint_state(checkpoint_dir)
            if ckpt and ckpt.model_checkpoint_path:
                saver.restore(sess, ckpt.model_checkpoint_path)
            else:
                raise Exception('No checkpoint found...')
        else:
            saver.restore(sess, checkpoint_dir)

        frame_id = 0
        for frame in video_cap.iter_frames():
            print('frame id: {}'.format(frame_id))
            _pred = sess.run(pred,
                             feed_dict={
                                 img_placeholder:
                                 np.asarray([frame]).astype(np.float32)
                             })
            video_writer.write_frame(np.clip(_pred, 0, 255).astype(np.uint8))
            frame_id += 1

        video_writer.close()
示例#4
0
def video_style_transfer_gatys(video_path, style_path, output_path, batch_s=4):

    video = VideoFileClip(video_path, audio=False)
    video_w = ffmpeg_writer.FFMPEG_VideoWriter(output_path, video.size, video.fps, codec="libx264",
                                               preset="medium", bitrate="2000k",
                                               audiofile=video_path, threads=None,
                                               ffmpeg_params=None)

    style = Image.load_image(style_path)
    content = [c for c in video.iter_frames()]
    batch_l = [content[i:i + batch_s] for i in range(0, len(content), batch_s)]
    for b in batch_l:
        frames = run_style_transfer(b, style)
        for f in frames:
            video_w.write_frame(f)
    video_w.close()
示例#5
0
def video_style_transfer(input_path, model_path, output_path, batch_s=4):

    video = VideoFileClip(input_path, audio=False)
    video_w = ffmpeg_writer.FFMPEG_VideoWriter(output_path, video.size, video.fps, codec="libx264",
                                               preset="medium", bitrate="2000k",
                                               audiofile=input_path, threads=None,
                                               ffmpeg_params=None)

    with tf.Graph().as_default(), tf.Session() as session:

        video_iter = list(video.iter_frames())
        batch_l = [video_iter[i:i + batch_s] for i in range(0, len(video_iter), batch_s)]
        while len(batch_l[-1]) < batch_s:
            batch_l[-1].append(batch_l[-1][-1])

        print("Loading model, it may take some time")
        video_wip = np.array(batch_l, dtype=np.float32)
        place_holder = tf.placeholder(tf.float32, shape=video_wip.shape[1:], name='place_holder')
        wip = Transform.net(place_holder)

        p_loader = tf.train.Saver()

        if os.path.isdir(model_path):

            model = tf.train.get_checkpoint_state(model_path)
            is_valid = model.model_checkpoint_path

            if model is not None and is_valid:
                p_loader.restore(session, is_valid)
            else:
                raise EX
        else:
            p_loader.restore(session, model_path)

        # The information about size in the video files are: 'width, height'
        # In *** the dimensions are 'height, width'
        #shape = (batch_s, video.size[1], video.size[0], 3)
        # TODO check if it's ok without shape
        for i in range(len(video_wip)):
            r_res = session.run(wip, feed_dict={place_holder: video_wip[i]})
            for r in r_res:
                video_w.write_frame(np.clip(r, 0, 255).astype(np.uint8))
            print("processed " + str(i+1) + " out of " + str(len(video_wip)) + " batches", end = '\r')

        video_w.close()
示例#6
0
    def _init(self):
        if self._writer is None:
            # make parent dirs if they do not exist
            abs_path = os.path.abspath(self._filename)
            dir_path = os.path.dirname(abs_path)
            os.makedirs(dir_path, exist_ok=True)

            self._writer = ffmpeg_writer.FFMPEG_VideoWriter(
                filename=abs_path,
                size=self._size,
                fps=self._fps,
                codec=self._codec,
                audiofile=self._audiofile,
                preset=self._preset,
                bitrate=self._bitrate,
                withmask=self._withmask,
                logfile=self._logfile,
                threads=self._threads,
                ffmpeg_params=self._ffmpeg_params)
def NST_Video(path_in, path_out, style_img_path):
    clip = VideoFileClip(path_in, audio=False)
    video_writer = ffmpeg_writer.FFMPEG_VideoWriter(path_out,
                                                    clip.size,
                                                    clip.fps,
                                                    codec="libx264",
                                                    preset="medium",
                                                    bitrate="2000k",
                                                    audiofile=path_in,
                                                    threads=None,
                                                    ffmpeg_params=None)
    style_img = Image_Utils.constrain_img(Image_Utils.load_img(style_img_path))
    array = []
    for frame in clip.iter_frames():
        array.append(frame)
        video_writer.write_frame(frame)
        nst = NST(Image_Utils.constrain_img(frame), style_img)
        image = nst.predict(epochs=100)
        plt.imshow(image)
        plt.show()
示例#8
0
    def transform_video(self,
                        input_path,
                        output_path,
                        batch_size=4,
                        start=0,
                        end=0):
        '''
        Transform a video to animation version
        https://github.com/lengstrom/fast-style-transfer/blob/master/evaluate.py#L21
        '''
        # Force to None
        end = end or None

        if not os.path.isfile(input_path):
            raise FileNotFoundError(f'{input_path} does not exist')

        output_dir = "/".join(output_path.split("/")[:-1])
        os.makedirs(output_dir, exist_ok=True)
        is_gg_drive = '/drive/' in output_path
        temp_file = ''

        if is_gg_drive:
            # Writing directly into google drive can be inefficient
            temp_file = f'tmp_anime.{output_path.split(".")[-1]}'

        def transform_and_write(frames, count, writer):
            anime_images = denormalize_input(self.transform(frames),
                                             dtype=np.uint8)
            for i in range(0, count):
                img = np.clip(anime_images[i], 0, 255)
                writer.write_frame(img)

        video_clip = VideoFileClip(input_path, audio=False)
        if start or end:
            video_clip = video_clip.subclip(start, end)

        video_writer = ffmpeg_writer.FFMPEG_VideoWriter(temp_file
                                                        or output_path,
                                                        video_clip.size,
                                                        video_clip.fps,
                                                        codec="libx264",
                                                        preset="medium",
                                                        bitrate="2000k",
                                                        audiofile=input_path,
                                                        threads=None,
                                                        ffmpeg_params=None)

        total_frames = round(video_clip.fps * video_clip.duration)
        print(
            f'Transfroming video {input_path}, {total_frames} frames, size: {video_clip.size}'
        )

        batch_shape = (batch_size, video_clip.size[1], video_clip.size[0], 3)
        frame_count = 0
        frames = np.zeros(batch_shape, dtype=np.float32)
        for frame in tqdm(video_clip.iter_frames()):
            try:
                frames[frame_count] = frame
                frame_count += 1
                if frame_count == batch_size:
                    transform_and_write(frames, frame_count, video_writer)
                    frame_count = 0
            except Exception as e:
                print(e)
                break

        # The last frames
        if frame_count != 0:
            transform_and_write(frames, frame_count, video_writer)

        if temp_file:
            # move to output path
            shutil.move(temp_file, output_path)

        print(f'Animation video saved to {output_path}')
        video_writer.close()
示例#9
0
def ffwd_video(path_in, path_out, checkpoint_dir, device_t='/gpu:0', batch_size=4):
    """Creates a stylized video. Code from lengstrom's repo found here:
    https://github.com/lengstrom/fast-style-transfer
    and the specific file is found here:
    https://github.com/lengstrom/fast-style-transfer/blob/master/evaluate.py

    Parameters
    ----------
    path_in : str
        The path to the video to read in to stylize.
    path_out : str
        The path to save the stylized video.
    checkpoint_dir : str
        The checkpoint dir holding the neural style transfer model. This should
        be a .ckpt file.
    device_t : str, optional
        The device you want to run the model on.
    batch_size : int, optional
        The batch size you want to use for the model.
    """

    video_clip = VideoFileClip(path_in, audio=False)
    video_writer = ffmpeg_writer.FFMPEG_VideoWriter(path_out,
                                                    video_clip.size,
                                                    video_clip.fps,
                                                    codec="libx264",
                                                    preset="medium",
                                                    bitrate="2000k",
                                                    audiofile=path_in,
                                                    threads=None,
                                                    ffmpeg_params=None)

    g = tf.Graph()
    soft_config = tf.ConfigProto(allow_soft_placement=True)
    soft_config.gpu_options.allow_growth = True
    with g.as_default(), g.device(device_t), \
            tf.Session(config=soft_config) as sess:
        batch_shape = (batch_size, video_clip.size[1], video_clip.size[0], 3)
        img_placeholder = tf.placeholder(tf.float32, shape=batch_shape,
                                         name='img_placeholder')

        preds = transform.net(img_placeholder)
        saver = tf.train.Saver()
        if os.path.isdir(checkpoint_dir):
            ckpt = tf.train.get_checkpoint_state(checkpoint_dir)
            if ckpt and ckpt.model_checkpoint_path:
                saver.restore(sess, ckpt.model_checkpoint_path)
            else:
                raise Exception("No checkpoint found...")
        else:
            saver.restore(sess, checkpoint_dir)

        X = np.zeros(batch_shape, dtype=np.float32)

        def style_and_write(count):
            for i in range(count, batch_size):
                X[i] = X[count - 1]  # Use last frame to fill X
            _preds = sess.run(preds, feed_dict={img_placeholder: X})
            for i in range(0, count):
                video_writer.write_frame(np.clip(_preds[i], 0, 255).astype(np.uint8))

        frame_count = 0  # The frame count that written to X
        pbar = tqdm(total=int(video_clip.fps * video_clip.duration))
        for frame in video_clip.iter_frames():
            X[frame_count] = frame
            frame_count += 1
            if frame_count == batch_size:
                style_and_write(frame_count)
                pbar.update(frame_count)
                frame_count = 0


        if frame_count != 0:
            style_and_write(frame_count)
            pbar.update(frame_count)

        pbar.close()
        video_writer.close()
示例#10
0
def stylize_objects(seg_model_path, orig_path_in, style_path_in, path_out,
                    device_t="/gpu:0", target_class=1):
    """Generates a video where objects are segmented out and stylized. An
    outline is also drawn around the person and noise is added in proportion to
    the amount of base.

    Parameters
    ----------
    seg_model_path : str
        The path to the segmentation model. Should be a .pb file.
    orig_path_in : str
        The path to the original un-stylized video file.
    style_path_in : str
        The path to the stylized video file.
    path_out : str
        The path to save the new video with only the objects stylized.
    device_t : str, optional
        The device to run the network on.
    target_class : int, optional
        The target you want generate masks for and stylize.

    Example
    -------
    stylize_objects("models/model.pb", "video.mp4", "inter_styled_video.mp4",
                    "styled_video.mp4")
    """
    video_clip = VideoFileClip(orig_path_in, audio=True)
    style_video_clip = VideoFileClip(style_path_in, audio=False)
    video_writer = ffmpeg_writer.FFMPEG_VideoWriter(path_out, video_clip.size,
                                                    video_clip.fps,
                                                    codec="libx264",
                                                    preset="medium",
                                                    bitrate="2000k",
                                                    audiofile=orig_path_in,
                                                    threads=None,
                                                    ffmpeg_params=None)
    ch1, ch2 = get_base_bumps(video_clip)

    # load model
    g = tf.Graph()
    with g.as_default():
        od_graph_def = tf.GraphDef()
        with tf.gfile.GFile(seg_model_path, "rb") as f:
            serialized_graph = f.read()
            od_graph_def.ParseFromString(serialized_graph)
            tf.import_graph_def(od_graph_def, name="")

    # code adapted from https://github.com/tensorflow/models/blob/master/research/object_detection/object_detection_tutorial.ipynb
    with g.as_default(), g.device(device_t), tf.Session() as sess:
        ops = tf.get_default_graph().get_operations()
        all_tensor_names = {output.name for op in ops for output in op.outputs}
        tensor_dict = {}
        for key in ["num_detections", "detection_boxes", "detection_scores",
                    "detection_classes", "detection_masks"]:
            tensor_name = key + ":0"
            if tensor_name in all_tensor_names:
                tensor_dict[key] = tf.get_default_graph().get_tensor_by_name(
                    tensor_name)

        # The following processing is only for single image
        detection_boxes = tf.squeeze(tensor_dict["detection_boxes"], [0])
        detection_masks = tf.squeeze(tensor_dict["detection_masks"], [0])
        # Reframe is required to translate mask from box coordinates to image
        # coordinates and fit the image size.
        real_num_detection = tf.cast(tensor_dict["num_detections"][0], tf.int32)
        detection_boxes = tf.slice(detection_boxes, [0, 0], [real_num_detection, -1])
        detection_masks = tf.slice(detection_masks, [0, 0, 0], [real_num_detection, -1, -1])
        detection_masks_reframed = utils_ops.reframe_box_masks_to_image_masks(
            detection_masks, detection_boxes, video_clip.size[1],
            video_clip.size[0])
        detection_masks_reframed = tf.cast(
            tf.greater(detection_masks_reframed, 0.5), tf.uint8)
        # Follow the convention by adding back the batch dimension
        tensor_dict["detection_masks"] = tf.expand_dims(
            detection_masks_reframed, 0)

        image_tensor = tf.get_default_graph().get_tensor_by_name("image_tensor:0")

        pbar = tqdm(total=int(video_clip.fps * video_clip.duration))
        for i, (frame, style_frame), in enumerate(zip(video_clip.iter_frames(),
                                                      style_video_clip.iter_frames())):
            output_dict = sess.run(tensor_dict,
                                   feed_dict={image_tensor:
                                       np.expand_dims(frame, 0)})
            # assume batch size = 1
            classes = output_dict["detection_classes"][0][:int(output_dict["num_detections"][0])]
            # if no target class then have to use a 0 mask
            if target_class not in classes:
                mask = np.zeros((video_clip.size[1], video_clip.size[0]))
                to_style_frame = False
            else:
                mask = merge_classes(output_dict["detection_masks"][0, :, :, :], 1,
                                     classes)
                to_style_frame = True
            mask = draw_random_triangles(mask, size=(ch1[i]*30 + 1e-8))

            outline = Image.fromarray(get_outline(mask))
            mask = Image.fromarray(255*mask)
            nframe = Image.fromarray(frame)
            # can't paste with 0 mask
            if to_style_frame:
                nframe.paste(Image.fromarray(style_frame), mask=mask)
                nframe.paste(outline, mask=outline)

            video_writer.write_frame(nframe)
            pbar.update(1)

        pbar.close()
        video_writer.close()
示例#11
0
def ffwd_video(path_in,
               path_out,
               checkpoint_dir,
               device_t='/gpu:0',
               batch_size=4):
    ''' feed forward video '''

    # defining video rendering variables
    video_clip = VideoFileClip(path_in, audio=False)
    video_writer = ffmpeg_writer.FFMPEG_VideoWriter(path_out,
                                                    video_clip.size,
                                                    video_clip.fps,
                                                    codec='libx264',
                                                    preset='medium',
                                                    bitrate='2000k',
                                                    audiofile=path_in,
                                                    threads=None,
                                                    ffmpeg_params=None)

    # defining tensorflow variables
    g = tf.Graph()
    soft_config = tf.ConfigProto(allow_soft_placement=True)
    soft_config.gpu_options.allow_growth = True

    # starting the tensorflow session
    with g.as_default(), g.device(device_t), tf.Session(
            config=soft_config) as sess:

        batch_shape = (batch_size, video_clip.size[1], video_clip.size[0], 3)

        # defining placeholder
        vid_ph = tf.placeholder(tf.float32, shape=batch_shape, name='vid_ph')

        # forward propogation (building the graph)
        preds = transform_net.net(vid_ph)

        # defining saver
        saver = tf.train.Saver()

        # restoring the saved model

        if os.path.isdir(checkpoint_dir):
            ckpt = tf.train.get_checkpoint_state(checkpoint_dir)
            if ckpt and ckpt.model_checkpoint_path:
                saver.restore(sess, ckpt.model_checkpoint_path)
            else:
                raise Exception("No checkpoint found...")
        else:
            saver.restore(sess, checkpoint_dir)

        x = np.zeros(batch_shape, dtype=np.float32)

        # function to generate styled video (batch images) and writing
        def style_and_write(count):

            # for batch size not complete case
            for i in range(count, batch_size):
                x[i] = x[
                    count -
                    1]  # using last frame(received from .iter_frames) to fill remaing x (batch size not complete case)

            # running the graph to style video
            _preds = sess.run(preds, feed_dict={vid_ph: x})

            for i in range(0, count):
                video_writer.write_frame(
                    np.clip(_preds, 0, 255).astype(np.uint8))

        frame_count = 0  # the frame count written to x

        for frame in video_clip.iter_frames():

            x[frame_count] = frame
            frame_count += 1

            if frame_count == batch_size:

                style_and_write(frame_count)
                frame_count = 0

        # for last batch where no of images is less than the batch_size
        if frame_count != 0:
            style_and_write(frame_count)

        video_writer.close()
def ffwd_video(
        path_in,
        path_out,
        checkpoint_dir,
        device_t='/gpu:0',
        batch_size=4,
        data_format='NHWC',
        num_base_channels=32,  # more cli params
        evaluate=False):
    video_clip = VideoFileClip(path_in, audio=False)
    video_writer = ffmpeg_writer.FFMPEG_VideoWriter(path_out,
                                                    video_clip.size,
                                                    video_clip.fps,
                                                    codec="libx264",
                                                    preset="medium",
                                                    bitrate="2000k",
                                                    audiofile=path_in,
                                                    threads=None,
                                                    ffmpeg_params=None)

    g = tf.Graph()
    soft_config = tf.compat.v1.ConfigProto(allow_soft_placement=True)
    soft_config.gpu_options.allow_growth = True
    with g.as_default(), g.device(device_t), \
            tf.compat.v1.Session(config=soft_config) as sess:
        batch_shape = (batch_size, video_clip.size[1], video_clip.size[0], 3)
        img_placeholder = tf.compat.v1.placeholder(tf.float32,
                                                   shape=batch_shape,
                                                   name='img_placeholder')

        #preds = transform.net(img_placeholder)

        if data_format == 'NHWC':
            #NHWC path
            preds = transform.net(img_placeholder,
                                  data_format=data_format,
                                  num_base_channels=num_base_channels,
                                  evaluate=evaluate)
        else:
            #NCHW path
            img_placeholder_nchw = tf.transpose(a=img_placeholder,
                                                perm=[0, 3, 1, 2])
            preds_nchw = transform.net(img_placeholder_nchw,
                                       data_format=data_format,
                                       num_base_channels=num_base_channels)
            preds = tf.transpose(a=preds_nchw, perm=[0, 2, 3, 1])

        # add output node
        preds = tf.identity(preds, "output")
        #print("tf.identity: {}".format(preds))

        saver = tf.compat.v1.train.Saver()
        if os.path.isdir(checkpoint_dir):
            ckpt = tf.train.get_checkpoint_state(checkpoint_dir)
            if ckpt and ckpt.model_checkpoint_path:
                saver.restore(sess, ckpt.model_checkpoint_path)
            else:
                raise Exception("No checkpoint found...")
        else:
            saver.restore(sess, checkpoint_dir)

        X = np.zeros(batch_shape, dtype=np.float32)

        def style_and_write(count):
            for i in range(count, batch_size):
                X[i] = X[count - 1]  # Use last frame to fill X
            _preds = sess.run(preds, feed_dict={img_placeholder: X})
            for i in range(0, count):
                video_writer.write_frame(
                    np.clip(_preds[i], 0, 255).astype(np.uint8))

        frame_count = 0  # The frame count that written to X
        for frame in video_clip.iter_frames():
            X[frame_count] = frame
            frame_count += 1
            if frame_count == batch_size:
                style_and_write(frame_count)
                frame_count = 0

        if frame_count != 0:
            style_and_write(frame_count)

        video_writer.close()
示例#13
0
    stylizer = Stylizer(args.checkpoint_model)
    video_clip = VideoFileClip(args.video_path, audio=False)
    now = datetime.now()
    video_name = args.video_path.split("/")[-1].split(".")[0]
    out_dir = f"{now.year}{now.month}{now.day}-{now.hour}{now.minute}{now.second}-{args.checkpoint_model.split('/')[-1].split('.')[0]}-styled-{video_name}"
    os.makedirs(f"images/outputs/{out_dir}", exist_ok=True)

    # Create video from frames
    video_writer = None
    if args.export_type != 0:
        video_writer = ffmpeg_writer.FFMPEG_VideoWriter(
            f'images/outputs/{out_dir}/{video_name}.mp4',
            video_clip.size,
            video_clip.fps,
            codec="libx264",
            preset="medium",
            bitrate="2000k",
            audiofile=None,
            threads=None,
            ffmpeg_params=None)

    try:
        fnum = 0
        stylized_frames = []
        for frame in tqdm.tqdm(video_clip.iter_frames(),
                               desc="Processing frames"):
            outframe = stylizer.stylize_with_octaves(
                frame, args.max_size, args.overlap, args.octave_num, args.
                octave_scale) if args.octave_num else stylizer.stylize_image(
                    frame, args.max_size, args.overlap)
            if args.export_type != 0:
def ffwd_video(path_in,
               path_out,
               checkpoint_dir,
               device_t='/gpu:0',
               batch_size=4):
    video_clip = VideoFileClip(path_in, audio=False)

    # Create a temporary file to store the audio.
    fp = tempfile.NamedTemporaryFile(suffix='.aac')
    temp_audio_file_name = fp.name
    fp.close()

    # Create a temporary file to store the video.
    fp = tempfile.NamedTemporaryFile(suffix='.mp4')
    temp_video_file_name = fp.name
    fp.close()

    # Extract the audio.
    ffmpeg_tools.ffmpeg_extract_audio(path_in, temp_audio_file_name)

    video_writer = ffmpeg_writer.FFMPEG_VideoWriter(
        temp_video_file_name,
        video_clip.size,
        video_clip.fps,
        codec="libx264",
        preset="medium",
        audiofile=None,
        threads=None,
        ffmpeg_params=["-b:v", "2000k"])

    g = tf.Graph()
    soft_config = tf.compat.v1.ConfigProto(allow_soft_placement=True)
    soft_config.gpu_options.allow_growth = True
    with g.as_default(), g.device(device_t), \
         tf.compat.v1.Session(config=soft_config) as sess:
        batch_shape = (batch_size, video_clip.size[1], video_clip.size[0], 3)
        img_placeholder = tf.compat.v1.placeholder(tf.float32,
                                                   shape=batch_shape,
                                                   name='img_placeholder')

        preds = src.transform.net(img_placeholder)
        saver = tf.compat.v1.train.Saver()
        if os.path.isdir(checkpoint_dir):
            ckpt = tf.train.get_checkpoint_state(checkpoint_dir)
            if ckpt and ckpt.model_checkpoint_path:
                saver.restore(sess, ckpt.model_checkpoint_path)
            else:
                raise Exception("No checkpoint found...")
        else:
            saver.restore(sess, checkpoint_dir)

        X = np.zeros(batch_shape, dtype=np.float32)

        def style_and_write(count):
            for i in range(count, batch_size):
                X[i] = X[count - 1]  # Use last frame to fill X
            _preds = sess.run(preds, feed_dict={img_placeholder: X})
            for i in range(0, count):
                video_writer.write_frame(
                    np.clip(_preds[i], 0, 255).astype(np.uint8))

        frame_count = 0  # The frame count that written to X
        for frame in video_clip.iter_frames():
            X[frame_count] = frame
            frame_count += 1
            if frame_count == batch_size:
                style_and_write(frame_count)
                frame_count = 0

        if frame_count != 0:
            style_and_write(frame_count)

        video_writer.close()

        # Merge audio and video
        ffmpeg_tools.ffmpeg_merge_video_audio(temp_video_file_name,
                                              temp_audio_file_name, path_out)

        # Delete temporary files
        os.remove(temp_video_file_name)
        os.remove(temp_audio_file_name)