Ejemplo n.º 1
0
def trim_video(context: Context, output_file: str):
    """
    Create a trimmed video using -ss and -to commands from FFMPEG. The trimmed video will be named 'output_file'
    """
    # load context

    input_file = context.input_file

    trim_video_command = [
        context.ffmpeg_dir, "-hwaccel", context.hwaccel, "-i", input_file
    ]

    trim_video_time = get_options_from_section(
        context.config_yaml["ffmpeg"]["trim_video"]["time"])

    for element in trim_video_time:
        trim_video_command.append(element)

    trim_video_options = \
        get_options_from_section(context.config_yaml["ffmpeg"]["trim_video"]["output_options"], ffmpeg_command=True)

    for element in trim_video_options:
        trim_video_command.append(element)

    trim_video_command.append(output_file)

    console_output = open(
        context.console_output_dir + "ffmpeg_trim_video_command.txt", "w")
    console_output.write(str(trim_video_command))
    subprocess.call(trim_video_command,
                    shell=False,
                    stderr=console_output,
                    stdout=console_output)
Ejemplo n.º 2
0
    def __init__(self, context: Context):
        # load context
        self.frame_count = context.frame_count
        self.waifu2x_converter_cpp_file_path = context.waifu2x_converter_cpp_file_path
        self.waifu2x_converter_cpp_path = context.waifu2x_converter_cpp_path
        self.residual_images_dir = context.residual_images_dir
        self.residual_upscaled_dir = context.residual_upscaled_dir
        self.noise_level = context.noise_level
        self.scale_factor = context.scale_factor
        self.workspace = context.workspace
        self.context = context

        self.waifu2x_conv_upscale_frame = [
            self.waifu2x_converter_cpp_file_path, "-i", "[input_file]",
            "--noise-level",
            str(self.noise_level), "--scale-ratio",
            str(self.scale_factor)
        ]

        waifu2x_conv_options = get_options_from_section(
            self.context.config_yaml["waifu2x_converter"]["output_options"])

        # add custom options to waifu2x_vulkan
        for element in waifu2x_conv_options:
            self.waifu2x_conv_upscale_frame.append(element)

        self.waifu2x_conv_upscale_frame.extend(["-o", "[output_file]"])

        threading.Thread.__init__(self)
        logging.basicConfig(filename=self.workspace + 'waifu2x.log',
                            level=logging.INFO)
    def start_task(self):

        extract_frames_command = [self.context.ffmpeg_dir,
                                  "-hwaccel", self.context.hwaccel,
                                  "-i", self.input_file]

        extract_frames_options = \
            get_options_from_section(self.context.config_yaml["ffmpeg"]["video_to_frames"]['output_options'],
                                     ffmpeg_command=True)

        for element in extract_frames_options:
            extract_frames_command.append(element)

        extract_frames_command.append("-r")
        extract_frames_command.append(str(self.frame_rate))

        extract_frames_command.extend([self.output_file])

        self.logger.info("extracting frames")

        console_output = open(self.context.console_output_dir + "ffmpeg_extract_frames_console.txt", "w")
        console_output.write(str(extract_frames_command))

        self.P = subprocess.Popen(extract_frames_command, shell=False,
                                  stderr=console_output, stdout=console_output)
        self.pause_resume = psutil.Process(self.P.pid)
        self.pause_resume.suspend()
Ejemplo n.º 4
0
def create_video_from_extract_frames(context: Context, output_file: str):
    """
    Create a new video by applying the filters that d2x needs to work into it's own seperate video.
    """
    input_file = context.input_file
    logger = logging.getLogger(__name__)

    command = [
        context.ffmpeg_dir, "-hwaccel", context.hwaccel, "-i", input_file
    ]

    extract_frames_options = \
        get_options_from_section(context.config_yaml["ffmpeg"]["video_to_frames"]['output_options'],
                                 ffmpeg_command=True)

    for element in extract_frames_options:
        command.append(element)

    command.extend([output_file])

    logger.info("Applying filter to video...")

    console_output = open(
        context.console_output_dir +
        "ffmpeg_create_video_from_extract_frame_filters.txt", "w")
    console_output.write(str(command))
    subprocess.call(command,
                    shell=False,
                    stderr=console_output,
                    stdout=console_output)
Ejemplo n.º 5
0
def concat_encoded_vids(context: Context, output_file: str):
    """
    Concatonate a video using 2) in this stackoverflow post.
    https://stackoverflow.com/questions/7333232/how-to-concatenate-two-mp4-files-using-ffmpeg

    The 'list.txt' should already exist, as it's produced in realtime_encoding.py
    """

    encoded_dir = context.encoded_dir

    text_file = encoded_dir + "list.txt"
    concat_videos_command = [
        context.ffmpeg_dir, "-f", "concat", "-safe", "0", "-hwaccel",
        context.hwaccel, "-i", text_file
    ]

    concat_videos_option = \
        get_options_from_section(context.config_yaml["ffmpeg"]["concat_videos"]['output_options'], ffmpeg_command=True)

    for element in concat_videos_option:
        concat_videos_command.append(element)

    concat_videos_command.extend([output_file])

    console_output = open(
        context.console_output_dir + "ffmpeg_concat_videos_command.txt", "w")
    console_output.write((str(concat_videos_command)))
    subprocess.call(concat_videos_command,
                    shell=False,
                    stderr=console_output,
                    stdout=console_output)
Ejemplo n.º 6
0
    def _setup_pipe(self) -> None:

        # load variables..
        output_no_sound = self.output_no_sound
        frame_rate = str(self.context.frame_rate)
        output_no_sound = output_no_sound
        ffmpeg_dir = self.context.ffmpeg_dir
        dar = self.context.dar

        # constructing the pipe command...
        ffmpeg_pipe_command = [ffmpeg_dir, "-r", frame_rate]

        options = get_options_from_section(
            self.context.config_yaml["ffmpeg"]["pipe_video"]['output_options'],
            ffmpeg_command=True)
        for item in options:
            ffmpeg_pipe_command.append(item)

        ffmpeg_pipe_command.append("-r")
        ffmpeg_pipe_command.append(frame_rate)

        if dar:
            ffmpeg_pipe_command.append("-vf")
            ffmpeg_pipe_command.append("setdar=" + dar.replace(":", "/"))

        ffmpeg_pipe_command.append(output_no_sound)

        # Starting the Pipe Command
        console_output = open(
            self.context.console_output_dir + "pipe_output.txt", "w")
        self.ffmpeg_pipe_subprocess = subprocess.Popen(ffmpeg_pipe_command,
                                                       stdin=subprocess.PIPE,
                                                       stdout=console_output)
Ejemplo n.º 7
0
    def __init__(self, context: Context):
        self.frame_count = context.frame_count
        self.waifu2x_caffe_cui_dir = context.waifu2x_caffe_cui_dir
        self.residual_images_dir = context.residual_images_dir
        self.residual_upscaled_dir = context.residual_upscaled_dir
        self.noise_level = context.noise_level
        self.scale_factor = context.scale_factor
        self.workspace = context.workspace
        self.context = context

        # Create Caffe Command
        self.waifu2x_caffe_upscale_frame = [
            self.waifu2x_caffe_cui_dir, "-i", "[input_file]", "-n",
            str(self.noise_level), "-s",
            str(self.scale_factor)
        ]

        waifu2x_caffe_options = get_options_from_section(
            context.config_yaml["waifu2x_caffe"]["output_options"])

        for element in waifu2x_caffe_options:
            self.waifu2x_caffe_upscale_frame.append(element)

        self.waifu2x_caffe_upscale_frame.extend(["-o", "[output_file]"])

        threading.Thread.__init__(self)
        logging.basicConfig(filename=self.workspace + 'waifu2x.log',
                            level=logging.INFO)
Ejemplo n.º 8
0
def migrate_tracks(context: Context, no_audio: str, file_dir: str,
                   output_file: str):
    """
    Add the audio tracks from the original video to the output video.
    """
    migrate_tracks_command = [
        context.ffmpeg_dir, "-i", no_audio, "-i", file_dir, "-map", "0:v:0?",
        "-map", "1?", "-c", "copy", "-map", "-1:v?"
    ]

    migrate_tracks_options = \
        get_options_from_section(context.config_yaml["ffmpeg"]["migrating_tracks"]['output_options'],
                                 ffmpeg_command=True)

    for element in migrate_tracks_options:
        migrate_tracks_command.append(element)

    migrate_tracks_command.extend([str(output_file)])

    console_output = open(context.log_dir + "migrate_tracks_command.txt", "w")
    console_output.write(str(migrate_tracks_command))
    subprocess.call(migrate_tracks_command,
                    shell=False,
                    stderr=console_output,
                    stdout=console_output)
Ejemplo n.º 9
0
def extract_frames(context: Context, input_file: str):
    """
    Extract frames from a video using ffmpeg.
    """
    input_frames_dir = context.input_frames_dir
    extension_type = context.extension_type
    output_file = input_frames_dir + "frame%01d" + extension_type
    logger = logging.getLogger(__name__)
    frame_rate = context.frame_rate

    extract_frames_command = [
        context.ffmpeg_dir, "-hwaccel", context.hwaccel, "-i", input_file
    ]

    extract_frames_options = \
        get_options_from_section(context.config_yaml["ffmpeg"]["video_to_frames"]['output_options'],
                                 ffmpeg_command=True)

    for element in extract_frames_options:
        extract_frames_command.append(element)

    extract_frames_command.append("-r")
    extract_frames_command.append(str(frame_rate))

    extract_frames_command.extend([output_file])

    console_output = open(
        context.console_output_dir + "ffmpeg_extract_frames_console.txt", "w")
    console_output.write(str(extract_frames_command))
    subprocess.call(extract_frames_command,
                    shell=False,
                    stderr=console_output,
                    stdout=console_output)
Ejemplo n.º 10
0
    def __init__(self, context, output_no_sound: str):
        self.context = context

        # load variables from context
        self.workspace = self.context.workspace
        self.upscaled_dir = self.context.residual_upscaled_dir
        self.compressed_static_dir = self.context.compressed_static_dir
        self.compressed_moving_dir = self.context.compressed_moving_dir
        self.input_frames_dir = self.context.input_frames_dir
        self.merged_dir = self.context.merged_dir
        self.residual_data_dir = self.context.residual_data_dir
        self.pframe_data_dir = self.context.pframe_data_dir
        self.correction_data_dir = self.context.correction_data_dir
        self.fade_data_dir = self.context.fade_data_dir
        self.frame_count = self.context.frame_count
        self.waifu2x_type = self.context.waifu2x_type

        # How many images to have maximum in a buffer at a given time.
        self.buffer_limit = 20
        self.pipe_running = True
        self.images_to_pipe = []

        self.nosound_file = output_no_sound
        self.frame_rate = str(self.context.frame_rate)
        self.dar = self.context.dar
        self.input_file = self.context.input_file
        self.output_file = self.context.output_file
        self.ffmpeg_dir = self.context.ffmpeg_dir

        # Create the piping command
        self.ffmpeg_pipe_command = [self.ffmpeg_dir, "-r", self.frame_rate]

        options = get_options_from_section(
            context.config_yaml["ffmpeg"]["pipe_video"]['output_options'],
            ffmpeg_command=True)

        for item in options:
            self.ffmpeg_pipe_command.append(item)

        self.ffmpeg_pipe_command.append("-r")
        self.ffmpeg_pipe_command.append(self.frame_rate)

        if self.dar:
            self.ffmpeg_pipe_command.append("-vf")
            self.ffmpeg_pipe_command.append("setdar=" +
                                            self.dar.replace(":", "/"))

        self.ffmpeg_pipe_command.append(self.nosound_file)

        self.ffmpeg_pipe_subprocess = None

        # thread variables

        self.thread_alive = True
Ejemplo n.º 11
0
def migrate_tracks(context: Context,
                   no_audio: str,
                   file_dir: str,
                   output_file: str,
                   copy_if_failed=False):
    """
    Add the audio tracks from the original video to the output video.
    """

    # to remove
    def convert(lst):
        return ' '.join(lst)

    log = logging.getLogger()

    migrate_tracks_command = [
        context.ffmpeg_dir, "-i", no_audio, "-i", file_dir, "-map", "0:v?",
        "-map", "1:a?", "-map", "1:s?", "-map", "1:d?", "-map", "1:t?"
    ]

    migrate_tracks_options = \
        get_options_from_section(context.config_yaml["ffmpeg"]["migrating_tracks"]['output_options'],
                                 ffmpeg_command=True)

    for element in migrate_tracks_options:
        migrate_tracks_command.append(element)

    migrate_tracks_command.extend([str(output_file)])

    console_file_dir = context.console_output_dir + "migrate_tracks_command.txt"
    log.info("Writing files to %s" % console_file_dir)
    log.info("Migrate Command: %s" % convert(migrate_tracks_command))

    console_output = open(console_file_dir, "w", encoding="utf8")
    console_output.write(str(migrate_tracks_command))
    subprocess.call(migrate_tracks_command,
                    shell=False,
                    stderr=console_output,
                    stdout=console_output)

    if copy_if_failed:
        with open(context.console_output_dir + "migrate_tracks_command.txt",
                  encoding="utf8") as f:
            if 'Conversion failed!' in f.read():
                import os
                import shutil

                print(
                    "Migrating Tracks failed... copying video in order to continue with dandere2x."
                )
                os.remove(output_file)
                shutil.copy(no_audio, output_file)
Ejemplo n.º 12
0
    def _construct_upscale_command(self) -> list:
        """ A generic, recyclable upscale command that can be used for single-file upscaling or batch upscaling. """
        waifu2x_vulkan_upscale_frame_command = [
            self.realsr_ncnn_vulkan_file_path, "-i", "[input_file]"
        ]

        waifu2x_vulkan_options = get_options_from_section(
            self.context.config_yaml["realsr_ncnn_vulkan"]["output_options"])

        # add custom options to waifu2x_vulkan
        for element in waifu2x_vulkan_options:
            waifu2x_vulkan_upscale_frame_command.append(element)

        waifu2x_vulkan_upscale_frame_command.extend(["-o", "[output_file]"])
        return waifu2x_vulkan_upscale_frame_command
Ejemplo n.º 13
0
    def _construct_upscale_command(self) -> list:
        upscale_command = [self.context.waifu2x_caffe_cui_dir,
                           "-i", "[input_file]",
                           "-n", str(self.noise_level),
                           "-s", str(self.scale_factor)]

        optional_paramaters = get_options_from_section(
            self.context.config_yaml["waifu2x_caffe"]["output_options"])

        # add optional paramaters to upscaling command.
        for element in optional_paramaters:
            upscale_command.append(element)

        upscale_command.extend(["-o", "[output_file]"])
        return upscale_command
Ejemplo n.º 14
0
def re_encode_video(context: Context,
                    input_file: str,
                    output_file: str,
                    throw_exception=False):
    """
    Using the "re_encode_video" commands in the yaml to re-encode the input video in an opencv2 friendly
    manner. Without this step, certain containers might not be compatible with opencv2, and will cause
    a plethora of errors.

    throw_exception: Will throw a detailed exception and print statement if conversion failed.
    """
    logger = logging.getLogger(__name__)
    frame_rate = context.frame_rate

    extract_frames_command = [
        context.ffmpeg_dir, "-hwaccel", context.hwaccel, "-i", input_file
    ]

    extract_frames_options = \
        get_options_from_section(context.config_yaml["ffmpeg"]['re_encode_video']['output_options'],
                                 ffmpeg_command=True)

    for element in extract_frames_options:
        extract_frames_command.append(element)

    extract_frames_command.append("-r")
    extract_frames_command.append(str(frame_rate))
    extract_frames_command.extend([output_file])

    log_file = context.console_output_dir + "ffmpeg_convert_video.txt"
    console_output = open(log_file, "w", encoding="utf8")
    console_output.write(str(extract_frames_command))
    subprocess.call(extract_frames_command,
                    shell=False,
                    stderr=console_output,
                    stdout=console_output)

    if throw_exception:
        with open(context.console_output_dir + "ffmpeg_convert_video.txt",
                  encoding="utf8") as f:
            if 'Conversion failed!' in f.read():
                print("Failed to convert: " + input_file + " -> " +
                      output_file + ".")
                print("Check the output file for more information: " +
                      log_file)

                raise TypeError
Ejemplo n.º 15
0
    def _construct_upscale_command(self) -> list:
        waifu2x_vulkan_upscale_frame_command = [
            self.context.waifu2x_ncnn_vulkan_legacy_file_name, "-i",
            "[input_file]", "-n",
            str(self.noise_level), "-s",
            str(self.scale_factor)
        ]

        waifu2x_vulkan_options = get_options_from_section(
            self.context.config_yaml["waifu2x_ncnn_vulkan"]["output_options"])

        # add custom options to waifu2x_vulkan
        for element in waifu2x_vulkan_options:
            waifu2x_vulkan_upscale_frame_command.append(element)

        waifu2x_vulkan_upscale_frame_command.extend(["-o", "[output_file]"])
        return waifu2x_vulkan_upscale_frame_command
    def _construct_upscale_command(self) -> list:
        waifu2x_converter_cpp_upscale_command = [
            self.context.waifu2x_converter_cpp_file_path, "-i", "[input_file]",
            "--noise-level",
            str(self.noise_level), "--scale-ratio",
            str(self.scale_factor)
        ]

        waifu2x_conv_options = get_options_from_section(
            self.context.config_yaml["waifu2x_converter"]["output_options"])

        # add custom options to waifu2x_vulkan
        for element in waifu2x_conv_options:
            waifu2x_converter_cpp_upscale_command.append(element)

        waifu2x_converter_cpp_upscale_command.extend(["-o", "[output_file]"])

        return waifu2x_converter_cpp_upscale_command
Ejemplo n.º 17
0
def create_video_from_specific_frames(context: Context, file_prefix,
                                      output_file, start_number,
                                      frames_per_video):
    """
    Create a video using the 'start_number' ffmpeg flag and the 'vframes' input flag to create a video
    using frames for a range of output images.
    """

    # load context
    logger = context.logger
    extension_type = context.extension_type
    input_files = file_prefix + "%d" + extension_type

    video_from_frames_command = [
        context.ffmpeg_dir, "-start_number",
        str(start_number), "-hwaccel", context.hwaccel, "-framerate",
        str(context.frame_rate), "-i", input_files, "-vframes",
        str(frames_per_video), "-r",
        str(context.frame_rate)
    ]

    frame_to_video_option = get_options_from_section(
        context.config_yaml["ffmpeg"]["frames_to_video"]['output_options'],
        ffmpeg_command=True)

    for element in frame_to_video_option:
        video_from_frames_command.append(element)

    video_from_frames_command.extend([output_file])

    logger.info("running ffmpeg command: " + str(video_from_frames_command))

    console_output = open(
        context.console_output_dir + "video_from_frames_command.txt", "w")
    console_output.write(str(video_from_frames_command))
    subprocess.call(video_from_frames_command,
                    shell=False,
                    stderr=console_output,
                    stdout=console_output)
Ejemplo n.º 18
0
    def __init__(self, context: Context):
        self.frame_count = context.frame_count
        self.waifu2x_caffe_cui_dir = context.waifu2x_caffe_cui_dir
        self.residual_images_dir = context.residual_images_dir
        self.residual_upscaled_dir = context.residual_upscaled_dir
        self.noise_level = context.noise_level
        self.scale_factor = context.scale_factor
        self.workspace = context.workspace
        self.context = context
        self.signal_upscale = True
        self.active_waifu2x_subprocess = None
        self.start_frame = 1

        # Create Caffe Command
        self.waifu2x_caffe_upscale_frame = [
            self.waifu2x_caffe_cui_dir, "-i", "[input_file]", "-n",
            str(self.noise_level), "-s",
            str(self.scale_factor)
        ]

        waifu2x_caffe_options = get_options_from_section(
            context.config_yaml["waifu2x_caffe"]["output_options"])

        for element in waifu2x_caffe_options:
            self.waifu2x_caffe_upscale_frame.append(element)

        self.waifu2x_caffe_upscale_frame.extend(["-o", "[output_file]"])

        # Threading Specific

        self.alive = True
        self.cancel_token = CancellationToken()
        self._stopevent = threading.Event()
        threading.Thread.__init__(self, name="Waifu2xCaffeThread")

        logging.basicConfig(filename=self.workspace + 'waifu2x.log',
                            level=logging.INFO)
Ejemplo n.º 19
0
    def __init__(self, context: Context):
        # load context
        self.frame_count = context.frame_count
        self.waifu2x_ncnn_vulkan_file_path = context.waifu2x_ncnn_vulkan_legacy_file_name
        self.waifu2x_ncnn_vulkan_path = context.waifu2x_ncnn_vulkan_path
        self.residual_images_dir = context.residual_images_dir
        self.residual_upscaled_dir = context.residual_upscaled_dir
        self.noise_level = context.noise_level
        self.scale_factor = context.scale_factor
        self.workspace = context.workspace
        self.context = context
        self.signal_upscale = True
        self.active_waifu2x_subprocess = None
        self.start_frame = 1

        self.waifu2x_vulkan_upscale_frame = [
            self.waifu2x_ncnn_vulkan_file_path, "-i", "[input_file]", "-n",
            str(self.noise_level), "-s",
            str(self.scale_factor)
        ]

        waifu2x_vulkan_options = get_options_from_section(
            self.context.config_yaml["waifu2x_ncnn_vulkan"]["output_options"])

        # add custom options to waifu2x_vulkan
        for element in waifu2x_vulkan_options:
            self.waifu2x_vulkan_upscale_frame.append(element)

        self.waifu2x_vulkan_upscale_frame.extend(["-o", "[output_file]"])

        # Threading Specific

        self.alive = True
        self.cancel_token = CancellationToken()
        self._stopevent = threading.Event()
        threading.Thread.__init__(self, name="Waifu2xVulkanThread")
Ejemplo n.º 20
0
    def __init__(self, config_file_unparsed: json):
        """
        Create all the needed values that will be used in various parts of dandere2x. A lot of these values
        are derived from external files, such as the json, ffmpeg and ffprobe, or are joined from directories.

        Having all the variables here allows dandere2x the values needed to be global, but at the same time not really.
        """

        self.this_folder = None

        # load 'this folder' in a pyinstaller friendly way
        if getattr(sys, 'frozen', False):
            self.this_folder = os.path.dirname(sys.executable)
        elif __file__:
            self.this_folder = os.path.dirname(__file__)

        self.this_folder = pathlib.Path(self.this_folder)

        # Parse the unparsed config into a parsed (../externals -> C:/this_folder/externals)
        self.config_yaml = absolutify_yaml(config_file_unparsed,
                                           str(self.this_folder.absolute()),
                                           absolutify_key="..")
        ################################
        #  setup all the directories.. #
        ################################

        # TODO: since this is a fail-safe method on loading the waifu2x clients
        # we gotta check at least one is ok before running dandere2x?

        if self.config_yaml['dandere2x']['usersettings'][
                'waifu2x_type'] == "converter_cpp":
            self.waifu2x_converter_cpp_path = self.config_yaml[
                'waifu2x_converter']['waifu2x_converter_path']
            self.waifu2x_converter_file_name = self.config_yaml[
                'waifu2x_converter']['waifu2x_converter_file_name']
            self.waifu2x_converter_cpp_file_path = os.path.join(
                self.waifu2x_converter_cpp_path,
                self.waifu2x_converter_file_name)

        if self.config_yaml['dandere2x']['usersettings'][
                'waifu2x_type'] == "vulkan":
            self.waifu2x_ncnn_vulkan_path = self.config_yaml[
                'waifu2x_ncnn_vulkan']['waifu2x_ncnn_vulkan_path']
            self.waifu2x_ncnn_vulkan_file_name = self.config_yaml[
                'waifu2x_ncnn_vulkan']['waifu2x_ncnn_vulkan_file_name']
            self.waifu2x_ncnn_vulkan_legacy_file_name = os.path.join(
                self.waifu2x_ncnn_vulkan_path,
                self.waifu2x_ncnn_vulkan_file_name)

        if self.config_yaml['dandere2x']['usersettings'][
                'waifu2x_type'] == "vulkan_legacy":
            self.waifu2x_ncnn_vulkan_legacy_path = self.config_yaml[
                'waifu2x_ncnn_vulkan_legacy'][
                    'waifu2x_ncnn_vulkan_legacy_path']
            self.waifu2x_ncnn_vulkan_legacy_file_name = self.config_yaml[
                'waifu2x_ncnn_vulkan_legacy'][
                    'waifu2x_ncnn_vulkan_legacy_file_name']
            self.waifu2x_ncnn_vulkan_legacy_file_path = os.path.join(
                self.waifu2x_ncnn_vulkan_legacy_path,
                self.waifu2x_ncnn_vulkan_legacy_file_name)

        if self.config_yaml['dandere2x']['usersettings'][
                'waifu2x_type'] == "caffe":
            self.waifu2x_caffe_cui_dir = self.config_yaml['waifu2x_caffe'][
                'waifu2x_caffe_path']

        self.workspace = self.config_yaml['dandere2x']['developer_settings'][
            'workspace']
        self.workspace_use_temp = self.config_yaml['dandere2x'][
            'developer_settings']['workspace_use_temp']

        # if we're using a temporary workspace, assign workspace to be in the temp folder
        if self.workspace_use_temp:
            self.workspace = os.path.join(pathlib.Path(tempfile.gettempdir()),
                                          'dandere2x') + os.path.sep

        # setup directories
        self.input_frames_dir = self.workspace + "inputs" + os.path.sep
        self.residual_images_dir = self.workspace + "residual_images" + os.path.sep
        self.residual_upscaled_dir = self.workspace + "residual_upscaled" + os.path.sep
        self.residual_data_dir = self.workspace + "residual_data" + os.path.sep
        self.pframe_data_dir = self.workspace + "pframe_data" + os.path.sep
        self.correction_data_dir = self.workspace + "correction_data" + os.path.sep
        self.merged_dir = self.workspace + "merged" + os.path.sep
        self.fade_data_dir = self.workspace + "fade_data" + os.path.sep
        self.debug_dir = self.workspace + "debug" + os.path.sep
        self.log_dir = self.workspace + "logs" + os.path.sep
        self.compressed_static_dir = self.workspace + "compressed_static" + os.path.sep
        self.compressed_moving_dir = self.workspace + "compressed_moving" + os.path.sep
        self.encoded_dir = self.workspace + "encoded" + os.path.sep
        self.temp_image_folder = self.workspace + "temp_image_folder" + os.path.sep

        # put all the directories that need to be created into a list for creation / deleting.
        self.directories = {
            self.workspace, self.input_frames_dir, self.correction_data_dir,
            self.residual_images_dir, self.residual_upscaled_dir,
            self.merged_dir, self.residual_data_dir, self.pframe_data_dir,
            self.debug_dir, self.log_dir, self.compressed_static_dir,
            self.compressed_moving_dir, self.fade_data_dir, self.encoded_dir,
            self.temp_image_folder
        }

        self.ffmpeg_dir = self.config_yaml['ffmpeg']['ffmpeg_path']
        self.ffprobe_dir = self.config_yaml['ffmpeg']['ffprobe_path']
        self.hwaccel = self.config_yaml['ffmpeg']['-hwaccel']

        ################################
        # Load Dandere2x User Settings #
        ################################

        # User Settings
        self.block_size = self.config_yaml['dandere2x']['usersettings'][
            'block_size']
        self.quality_minimum = self.config_yaml['dandere2x']['usersettings'][
            'quality_minimum']
        self.waifu2x_type = self.config_yaml['dandere2x']['usersettings'][
            'waifu2x_type']
        self.noise_level = self.config_yaml['dandere2x']['usersettings'][
            'denoise_level']
        self.scale_factor = self.config_yaml['dandere2x']['usersettings'][
            'scale_factor']
        self.input_file = self.config_yaml['dandere2x']['usersettings'][
            'input_file']
        self.output_file = self.config_yaml['dandere2x']['usersettings'][
            'output_file']

        # Developer Settings
        self.quality_moving_ratio = self.config_yaml['dandere2x'][
            'developer_settings']['quality_moving_ratio']
        self.step_size = self.config_yaml['dandere2x']['developer_settings'][
            'step_size']
        self.bleed = self.config_yaml['dandere2x']['developer_settings'][
            'bleed']
        self.extension_type = self.config_yaml['dandere2x'][
            'developer_settings']['extension_type']
        self.debug = self.config_yaml['dandere2x']['developer_settings'][
            'debug']
        self.dandere2x_cpp_dir = self.config_yaml['dandere2x'][
            'developer_settings']['dandere2x_cpp_dir']
        self.correction_block_size = 2

        # FFMPEG Pipe Encoding, NOTE: THIS OVERRIDES REALTIME ENCODING
        self.ffmpeg_pipe_encoding = self.config_yaml['dandere2x'][
            'developer_settings']['ffmpeg_pipe_encoding']
        self.ffmpeg_pipe_encoding_type = self.config_yaml['dandere2x'][
            'developer_settings']['ffmpeg_pipe_encoding_type']
        self.nosound_file = os.path.join(
            self.workspace,
            "nosound")  # missing an extension, will set it in a few

        if not self.ffmpeg_pipe_encoding:
            # Real Time Encoding, traditional way
            self.realtime_encoding_enabled = self.config_yaml['dandere2x'][
                'developer_settings']['realtime_encoding'][
                    'realtime_encoding_enabled']
            self.realtime_encoding_delete_files = \
            self.config_yaml['dandere2x']['developer_settings']['realtime_encoding']['realtime_encoding_delete_files']
            self.realtime_encoding_seconds_per_video = \
                self.config_yaml['dandere2x']['developer_settings']['realtime_encoding'][
                    'realtime_encoding_seconds_per_video']

        else:

            # disable traditional "RTE" because we're piping
            self.realtime_encoding_enabled = False

            # get the substring after the last dot in the output_file "asd/aert/asd.mkv" --> "mkv"
            # but "important/rapgod" will default to rapgod.mp4 because we'll get
            # pipe_ext equals to the string itself "important/rapgod" and that's not an format :P

            supported_formats = [".mkv", ".mp4", ".avi"]

            pipe_ext = "." + self.output_file.split(".")[-1]

            # add the extension to nosound file
            self.nosound_file += ".mp4" if not pipe_ext in supported_formats else pipe_ext

        ##################
        # Video Settings #
        ##################

        # find out if the user trimmed a video by checking the time part of the json. IF theres nothing there,
        # then the user didn't trim anything
        self.user_trim_video = False
        find_out_if_trim = get_options_from_section(
            self.config_yaml["ffmpeg"]["trim_video"]['time'])

        if find_out_if_trim:
            self.user_trim_video = True

        # load the needed video settings
        self.video_settings = VideoSettings(self.ffprobe_dir, self.input_file)

        self.frame_rate = math.ceil(self.video_settings.frame_rate)
        self.width, self.height = self.video_settings.width, self.video_settings.height
        self.frame_count = 0