Beispiel #1
0
    def __init__(self, context):
        self.context = context
        self.min_disk_demon = None
        self.merge_thread = Merge(self.context)
        self.residual_thread = Residual(self.context)
        self.waifu2x = self._get_waifu2x_class(self.context.waifu2x_type)
        self.compress_frames_thread = CompressFrames(self.context)
        self.dandere2x_cpp_thread = Dandere2xCppWrapper(self.context)
        self.status_thread = Status(context)

        # session specific
        self.resume_session = False
        self.first_frame = 1

        if self.context.config_yaml['resume_settings']['resume_session']:
            print("is resume session")
            self.resume_session = True
            self.first_frame = int(self.context.config_yaml['resume_settings']
                                   ['signal_merged_count'])
        else:
            print("is not resume session")

        # Threading Specific

        self.alive = True
        self.cancel_token = CancellationToken()
        self._stopevent = threading.Event()
        threading.Thread.__init__(self, name="dandere2x_thread")
Beispiel #2
0
    def __init__(self, context: Context):

        self.context = context
        # load variables from context
        self.workspace = context.workspace
        self.upscaled_dir = context.residual_upscaled_dir
        self.merged_dir = context.merged_dir
        self.residual_data_dir = context.residual_data_dir
        self.pframe_data_dir = context.pframe_data_dir
        self.correction_data_dir = context.correction_data_dir
        self.fade_data_dir = context.fade_data_dir
        self.frame_count = context.frame_count
        self.extension_type = context.extension_type
        self.nosound_file = context.nosound_file
        self.preserve_frames = context.preserve_frames
        self.logger = logging.getLogger(__name__)
        self.start_frame = self.context.start_frame

        # setup the pipe for merging

        self.pipe = Pipe(context, self.nosound_file)

        # Threading Specific

        self.alive = True
        self.cancel_token = CancellationToken()
        self._stopevent = threading.Event()
        threading.Thread.__init__(self, name="MergeThread")
Beispiel #3
0
    def __init__(self, context: Context):
        # load stuff from context
        self.workspace = context.workspace
        self.dandere2x_cpp_dir = context.dandere2x_cpp_dir
        self.frame_count = context.frame_count
        self.block_size = context.block_size
        self.step_size = context.step_size
        self.extension_type = context.extension_type
        self.residual_images_dir = context.residual_images_dir
        self.log_dir = context.console_output_dir
        self.dandere2x_cpp_subprocess = None

        self.exec_command = [self.dandere2x_cpp_dir,
                             self.workspace,
                             str(self.frame_count),
                             str(self.block_size),
                             str(self.step_size),
                             "n",
                             str(1),
                             self.extension_type]

        # Threading Specific

        self.alive = True
        self.cancel_token = CancellationToken()
        self._stopevent = threading.Event()
        threading.Thread.__init__(self, name="Dandere2xCpp")
Beispiel #4
0
    def __init__(self, context):
        self.context = context

        # load variables from context
        self.workspace = context.workspace
        self.residual_upscaled_dir = context.residual_upscaled_dir
        self.residual_images_dir = context.residual_images_dir
        self.residual_data_dir = context.residual_data_dir
        self.pframe_data_dir = context.pframe_data_dir
        self.input_frames_dir = context.input_frames_dir
        self.frame_count = context.frame_count
        self.block_size = context.block_size
        self.extension_type = context.extension_type
        self.debug_dir = context.debug_dir
        self.debug = context.debug
        self.temp_image = context.temp_image_folder + "tempimage.jpg"
        self.logger = logging.getLogger(__name__)
        self.start_frame = 1

        # Threading Specific

        self.alive = True
        self.cancel_token = CancellationToken()
        self._stopevent = threading.Event()
        threading.Thread.__init__(self, name="ResidualThread")
Beispiel #5
0
class Status(threading.Thread):

    def __init__(self, context: Context):
        self.context = context
        self.workspace = context.workspace
        self.extension_type = context.extension_type
        self.frame_count = context.frame_count
        self.is_alive = True
        self._is_stopped = False
        self.start_frame = 1

        # Threading Specific

        self.alive = True
        self.cancel_token = CancellationToken()
        self._stopevent = threading.Event()
        threading.Thread.__init__(self, name="StatusTHread")

    def join(self, timeout=None):
        threading.Thread.join(self, timeout)

    def kill(self):
        self.alive = False
        self.cancel_token.cancel()
        self._stopevent.set()

    def set_start_frame(self, start_frame: int):
        self.start_frame = start_frame

    def run(self):

        last_10 = [0]

        for x in range(self.start_frame, self.frame_count - 1):

            if not self.is_alive:
                break

            percent = int((x / self.frame_count) * 100)

            average = 0
            for time_count in last_10:
                average = average + time_count

            average = round(average / len(last_10), 2)

            sys.stdout.write('\r')
            sys.stdout.write("Frame: [%s] %i%%    Average of Last 10 Frames: %s sec / frame" % (x, percent, average))

            if len(last_10) == 10:
                last_10.pop(0)

            now = time.time()

            while x >= self.context.signal_merged_count and self.alive:
                time.sleep(.00001)

            later = time.time()
            difference = float(later - now)
            last_10.append(difference)
Beispiel #6
0
class CompressFrames(threading.Thread):

    def __init__(self, context: Context):

        # load context
        self.inputs_dir = context.input_frames_dir
        self.frame_count = context.frame_count
        self.quality_moving_ratio = context.quality_moving_ratio
        self.compressed_static_dir = context.compressed_static_dir
        self.compressed_moving_dir = context.compressed_moving_dir
        self.quality_minimum = context.quality_minimum
        self.extension_type = context.extension_type
        self.start_frame = 1

        # threading member variables
        self.cancel_token = CancellationToken()
        self.alive = True
        self._stopevent = threading.Event()
        threading.Thread.__init__(self, name="CompressFramesThread")

    def join(self, timeout=None):
        threading.Thread.join(self, timeout)

    def kill(self):
        self.cancel_token.cancel()
        self.alive = False
        self._stopevent.set()

    def set_start_frame(self, start_frame: int):
        self.start_frame = start_frame

    def run(self):
        # start from 1 because ffmpeg's extracted frames starts from 1
        for x in range(self.start_frame, self.frame_count + 1):

            # loading files area
            frame = Frame()
            frame.load_from_string_wait(self.inputs_dir + "frame" + str(x) + self.extension_type, self.cancel_token)

            # stop if thread was killed
            if not self.alive:
                return

            # if the compressed frame already exists, don't compress it
            if os.path.exists(self.compressed_static_dir + "compressed_" + str(x) + ".jpg"):
                continue

            frame.save_image_quality(self.compressed_static_dir + "compressed_" + str(x) + ".jpg",
                                     self.quality_minimum)
            frame.save_image_quality(self.compressed_moving_dir + "compressed_" + str(x) + ".jpg",
                                     int(self.quality_minimum * self.quality_moving_ratio))
Beispiel #7
0
    def __init__(self, context: Context):
        self.context = context
        self.workspace = context.workspace
        self.extension_type = context.extension_type
        self.frame_count = context.frame_count
        self.is_alive = True
        self._is_stopped = False
        self.start_frame = self.context.start_frame

        # Threading Specific

        self.alive = True
        self.cancel_token = CancellationToken()
        self._stopevent = threading.Event()
        threading.Thread.__init__(self, name="StatusTHread")
Beispiel #8
0
    def __init__(self, context: Context):

        self.context = context
        self.max_frames_ahead = self.context.max_frames_ahead
        self.frame_count = context.frame_count
        self.progressive_frame_extractor = ProgressiveFramesExtractorCV2(
            self.context)
        self.start_frame = self.context.start_frame

        # Threading Specific

        self.alive = True
        self.cancel_token = CancellationToken()
        self._stopevent = threading.Event()
        threading.Thread.__init__(self, name="Min Disk Thread")
Beispiel #9
0
    def load_from_string_wait(self,
                              input_string,
                              cancel_token=CancellationToken()):

        logger = logging.getLogger(__name__)
        exists = exists = os.path.isfile(input_string)
        count = 0
        while not exists and not cancel_token.is_cancelled:
            if count % 10000 == 0:
                logger.info(input_string + " dne")
            exists = os.path.isfile(input_string)
            count += 1
            time.sleep(.2)

        loaded = False
        while not loaded and not cancel_token.is_cancelled:
            try:
                self.load_from_string(input_string)
                loaded = True
            except PermissionError:
                logger.info("Permission Error")
                loaded = False
            except ValueError:
                logger.info("Value Error")
                loaded = False
Beispiel #10
0
 def __init__(self, input_image: str, cancel_token=CancellationToken()):
     # calling superclass init
     threading.Thread.__init__(self, name="asyncframeread")
     self.input_image = input_image
     self.loaded_image = Frame()
     self.load_complete = False
     self.cancel_token = cancel_token
Beispiel #11
0
def get_list_from_file_wait(text_file: str, cancel=CancellationToken()):
    logger = logging.getLogger(__name__)
    exists = exists = os.path.isfile(text_file)
    count = 0
    while not exists and not cancel.is_cancelled:
        if count / 500 == 0:
            logger.info(text_file + " does not exist, waiting")
        exists = os.path.isfile(text_file)
        count += 1
        time.sleep(.01)

    if cancel.is_cancelled:
        return

    file = None
    try:
        file = open(text_file, "r")
    except PermissionError:
        logging.info("permission error on file" + text_file)

    while not file:
        try:
            file = open(text_file, "r")
        except PermissionError:
            logging.info("permission error on file" + text_file)

    text_list = file.read().split('\n')
    file.close()

    if len(text_list) == 1:
        return []

    return text_list
Beispiel #12
0
    def __init__(self, context: Context):

        self.context = context
        self.max_frames_ahead = self.context.max_frames_ahead
        self.frame_count = context.frame_count
        self.progressive_frame_extractor = ProgressiveFramesExtractorFFMPEG(
            self.context, self.context.input_file)
        self.start_frame = 1

        self.progressive_frame_extractor.start_task()
        # Threading Specific

        self.alive = True
        self.cancel_token = CancellationToken()
        self._stopevent = threading.Event()
        threading.Thread.__init__(self, name="ResidualThread")
Beispiel #13
0
    def __init__(self, context: Context):

        # load context
        self.inputs_dir = context.input_frames_dir
        self.frame_count = context.frame_count
        self.quality_moving_ratio = context.quality_moving_ratio
        self.compressed_static_dir = context.compressed_static_dir
        self.compressed_moving_dir = context.compressed_moving_dir
        self.quality_minimum = context.quality_minimum
        self.extension_type = context.extension_type
        self.start_frame = 1

        # threading member variables
        self.cancel_token = CancellationToken()
        self.alive = True
        self._stopevent = threading.Event()
        threading.Thread.__init__(self, name="CompressFramesThread")
Beispiel #14
0
def wait_on_file(file_string: str, cancel=CancellationToken()):
    logger = logging.getLogger(__name__)
    exists = os.path.isfile(file_string)
    count = 0
    while not exists and not cancel.is_cancelled:
        if count / 500 == 0:
            logger.info(file_string + " does not exist, waiting")
        exists = os.path.isfile(file_string)
        count += 1
        time.sleep(.001)
Beispiel #15
0
def wait_on_either_file(file_1: str, file_2: str, cancel=CancellationToken()):
    logger = logging.getLogger(__name__)
    exists_1 = os.path.isfile(file_1)
    exists_2 = os.path.isfile(file_2)
    count = 0
    while not (exists_1 or exists_2) and not cancel.is_cancelled:
        if count / 500 == 0:
            logger.info(file_1 + " does not exist, waiting")
        exists_1 = os.path.isfile(file_1)
        exists_2 = os.path.isfile(file_2)

        count += 1
        time.sleep(.001)
Beispiel #16
0
    def __init__(self, context: Context):
        self.frame_count = context.frame_count
        self.waifu2x_caffe_cui_dir = context.waifu2x_caffe_cui_dir
        self.residual_images_dir = context.residual_images_dir
        self.residual_upscaled_dir = context.residual_upscaled_dir
        self.noise_level = context.noise_level
        self.scale_factor = context.scale_factor
        self.workspace = context.workspace
        self.context = context
        self.signal_upscale = True
        self.active_waifu2x_subprocess = None
        self.start_frame = 1

        # Create Caffe Command
        self.waifu2x_caffe_upscale_frame = [
            self.waifu2x_caffe_cui_dir, "-i", "[input_file]", "-n",
            str(self.noise_level), "-s",
            str(self.scale_factor)
        ]

        waifu2x_caffe_options = get_options_from_section(
            context.config_yaml["waifu2x_caffe"]["output_options"])

        for element in waifu2x_caffe_options:
            self.waifu2x_caffe_upscale_frame.append(element)

        self.waifu2x_caffe_upscale_frame.extend(["-o", "[output_file]"])

        # Threading Specific

        self.alive = True
        self.cancel_token = CancellationToken()
        self._stopevent = threading.Event()
        threading.Thread.__init__(self, name="Waifu2xCaffeThread")

        logging.basicConfig(filename=self.workspace + 'waifu2x.log',
                            level=logging.INFO)
Beispiel #17
0
    def __init__(self, context: Context):
        # load context
        self.frame_count = context.frame_count
        self.waifu2x_ncnn_vulkan_file_path = context.waifu2x_ncnn_vulkan_legacy_file_name
        self.waifu2x_ncnn_vulkan_path = context.waifu2x_ncnn_vulkan_path
        self.residual_images_dir = context.residual_images_dir
        self.residual_upscaled_dir = context.residual_upscaled_dir
        self.noise_level = context.noise_level
        self.scale_factor = context.scale_factor
        self.workspace = context.workspace
        self.context = context
        self.signal_upscale = True
        self.active_waifu2x_subprocess = None
        self.start_frame = 1

        self.waifu2x_vulkan_upscale_frame = [
            self.waifu2x_ncnn_vulkan_file_path, "-i", "[input_file]", "-n",
            str(self.noise_level), "-s",
            str(self.scale_factor)
        ]

        waifu2x_vulkan_options = get_options_from_section(
            self.context.config_yaml["waifu2x_ncnn_vulkan"]["output_options"])

        # add custom options to waifu2x_vulkan
        for element in waifu2x_vulkan_options:
            self.waifu2x_vulkan_upscale_frame.append(element)

        self.waifu2x_vulkan_upscale_frame.extend(["-o", "[output_file]"])

        # Threading Specific

        self.alive = True
        self.cancel_token = CancellationToken()
        self._stopevent = threading.Event()
        threading.Thread.__init__(self, name="Waifu2xVulkanThread")
Beispiel #18
0
class Waifu2xCaffe(threading.Thread):
    """
    Note: This is legacy at the moment, it may or may still work, but the class isn't up to standards.

    Let me know if you have intentions to use this so I can update it.
    """
    def __init__(self, context: Context):
        self.frame_count = context.frame_count
        self.waifu2x_caffe_cui_dir = context.waifu2x_caffe_cui_dir
        self.residual_images_dir = context.residual_images_dir
        self.residual_upscaled_dir = context.residual_upscaled_dir
        self.noise_level = context.noise_level
        self.scale_factor = context.scale_factor
        self.workspace = context.workspace
        self.context = context
        self.signal_upscale = True
        self.active_waifu2x_subprocess = None
        self.start_frame = 1

        # Create Caffe Command
        self.waifu2x_caffe_upscale_frame = [
            self.waifu2x_caffe_cui_dir, "-i", "[input_file]", "-n",
            str(self.noise_level), "-s",
            str(self.scale_factor)
        ]

        waifu2x_caffe_options = get_options_from_section(
            context.config_yaml["waifu2x_caffe"]["output_options"])

        for element in waifu2x_caffe_options:
            self.waifu2x_caffe_upscale_frame.append(element)

        self.waifu2x_caffe_upscale_frame.extend(["-o", "[output_file]"])

        # Threading Specific

        self.alive = True
        self.cancel_token = CancellationToken()
        self._stopevent = threading.Event()
        threading.Thread.__init__(self, name="Waifu2xCaffeThread")

        logging.basicConfig(filename=self.workspace + 'waifu2x.log',
                            level=logging.INFO)

    def kill(self):
        self.alive = False
        self.cancel_token.cancel()
        self._stopevent.set()

        try:
            d2xcpp_psutil = psutil.Process(self.active_waifu2x_subprocess.pid)
            if psutil.pid_exists(d2xcpp_psutil.pid):
                d2xcpp_psutil.kill()
        except psutil.NoSuchProcess:
            pass

    def set_start_frame(self, start_frame):
        self.start_frame = start_frame

    def join(self, timeout=None):
        threading.Thread.join(self, timeout)

    # The current Dandere2x implementation requires files to be removed from the folder
    # During runtime. As files produced by Dandere2x don't all exist during the initial
    # Waifu2x call, various work arounds are in place to allow Dandere2x and Waifu2x to work in real time.

    # Briefly, 1) Create a list of names that will be upscaled by waifu2x,
    #          2) Call waifu2x to upscale whatever images are in 'differences' folder
    #          3) After waifu2x call is finished, delete whatever files were upscaled, and remove those names from list.
    #             (this is to prevent Waifu2x from re-upscaling the same image again)
    #          4) Repeat this process until all the names are removed.
    def run(self):
        logger = logging.getLogger(__name__)
        console_output = open(
            self.context.console_output_dir +
            "waifu2x_caffe_upscale_frame_all.txt", "w")

        residual_images_dir = self.context.residual_images_dir
        residual_upscaled_dir = self.context.residual_upscaled_dir
        exec_command = copy.copy(self.waifu2x_caffe_upscale_frame)

        # replace the exec command withthe files we're concerned with
        for x in range(len(exec_command)):
            if exec_command[x] == "[input_file]":
                exec_command[x] = residual_images_dir

            if exec_command[x] == "[output_file]":
                exec_command[x] = residual_upscaled_dir

        remove_when_upscaled_thread = threading.Thread(
            target=self.__remove_once_upscaled_then_stop)
        remove_when_upscaled_thread.start()

        # while there are pictures that have yet to be upscaled, keep calling the upscale command
        while self.signal_upscale and self.alive:
            console_output.write(str(exec_command))
            self.active_waifu2x_subprocess = subprocess.Popen(
                exec_command,
                shell=False,
                stderr=console_output,
                stdout=console_output)
            self.active_waifu2x_subprocess.wait()

    def upscale_file(self, input_file: str, output_file: str):

        exec_command = copy.copy(self.waifu2x_caffe_upscale_frame)

        # replace the exec command withthe files we're concerned with
        for x in range(len(exec_command)):
            if exec_command[x] == "[input_file]":
                exec_command[x] = input_file

            if exec_command[x] == "[output_file]":
                exec_command[x] = output_file

        print(exec_command)

        console_output = open(
            self.context.console_output_dir +
            "waifu2x_caffe_upscale_frame_single.txt", "w")
        console_output.write(str(exec_command))

        self.active_waifu2x_subprocess = subprocess.Popen(
            exec_command,
            shell=False,
            stderr=console_output,
            stdout=console_output)
        self.active_waifu2x_subprocess.wait()

    def __remove_once_upscaled_then_stop(self):
        self.__remove_once_upscaled()
        self.signal_upscale = False

    def __remove_once_upscaled(self):

        # make a list of names that will eventually (past or future) be upscaled
        list_of_names = []
        for x in range(self.start_frame, self.frame_count):
            list_of_names.append("output_" + get_lexicon_value(6, x) + ".png")

        for x in range(len(list_of_names)):

            name = list_of_names[x]

            residual_file = self.residual_images_dir + name.replace(
                ".png", ".jpg")
            residual_upscaled_file = self.residual_upscaled_dir + name

            wait_on_file(residual_upscaled_file, self.cancel_token)

            if not self.alive:
                return

            if os.path.exists(residual_file):
                os.remove(residual_file)
            else:
                pass
Beispiel #19
0
class MinDiskUsage(threading.Thread):
    """
    A class to facilitate the actions needed to operate min_disk_usage.

    The main operations of min_disk_usage are:
    - Signalling to the progressive frame extractor to extract more frames from the video.
    - Deleting files no longer needed to be kept on disk (after the 'merged' image has been piped into ffmpeg,
      we no longer need the relevant files.
    """
    def __init__(self, context: Context):

        self.context = context
        self.max_frames_ahead = self.context.max_frames_ahead
        self.frame_count = context.frame_count
        self.progressive_frame_extractor = ProgressiveFramesExtractorFFMPEG(
            self.context, self.context.input_file)
        self.start_frame = 1

        self.progressive_frame_extractor.start_task()
        # Threading Specific

        self.alive = True
        self.cancel_token = CancellationToken()
        self._stopevent = threading.Event()
        threading.Thread.__init__(self, name="ResidualThread")

    def join(self, timeout=None):
        threading.Thread.join(self, timeout)

    def kill(self):
        self.alive = False
        self.progressive_frame_extractor.kill_task()
        self.cancel_token.cancel()
        self._stopevent.set()

    def set_start_frame(self, start_frame):
        self.start_frame = start_frame

    """
    todo:
    - Rather than extracting frame by frame, look into the applications of extracting every N frames rather than every
      1 frame. I conjecture this would lessen the amount of times these functions are called, which should
      increase performance.  
    """

    def run(self):
        """
        Waits on the 'signal_merged_count' to change, which originates from the merge.py class.
        When it does, delete the used files and extract the needed frame.
        """
        logger = logging.getLogger(__name__)
        for x in range(self.start_frame,
                       self.frame_count - self.context.max_frames_ahead + 1):
            logger.info("on frame x: " + str(x))
            # wait for signal to get ahead of MinDiskUsage
            while x >= self.context.signal_merged_count and self.alive:
                time.sleep(.00001)

            if not self.alive:
                return

            # when it does get ahead, extract the next frame
            self.progressive_frame_extractor.next_frame()
            self.__delete_used_files(x)

    def extract_initial_frames(self):
        """
        Extract 'max_frames_ahead' needed for Dandere2x to start with.

        Author: Tremex. 
        """
        max_frames_ahead = self.context.max_frames_ahead

        for x in range(max_frames_ahead):
            self.progressive_frame_extractor.next_frame()

    def __delete_used_files(self, remove_before):
        """
        Delete the files produced by dandere2x up to index_to_remove.

        Author: Tremex
        """

        # load context

        pframe_data_dir = self.context.pframe_data_dir
        residual_data_dir = self.context.residual_data_dir
        correction_data_dir = self.context.correction_data_dir
        fade_data_dir = self.context.fade_data_dir
        input_frames_dir = self.context.input_frames_dir
        compressed_static_dir = self.context.compressed_static_dir
        compressed_moving_dir = self.context.compressed_moving_dir
        residual_upscaled_dir = self.context.residual_upscaled_dir

        # get the files to delete "_r(emove)"

        index_to_remove = str(remove_before - 2)

        prediction_data_file_r = pframe_data_dir + "pframe_" + index_to_remove + ".txt"
        residual_data_file_r = residual_data_dir + "residual_" + index_to_remove + ".txt"
        correction_data_file_r = correction_data_dir + "correction_" + index_to_remove + ".txt"
        fade_data_file_r = fade_data_dir + "fade_" + index_to_remove + ".txt"

        input_image_r = input_frames_dir + "frame" + index_to_remove + ".jpg"

        compressed_file_static_r = compressed_static_dir + "compressed_" + index_to_remove + ".jpg"
        compressed_file_moving_r = compressed_moving_dir + "compressed_" + index_to_remove + ".jpg"

        # "mark" them
        remove = [
            prediction_data_file_r,
            residual_data_file_r,
            correction_data_file_r,
            fade_data_file_r,
            input_image_r,  # upscaled_file_r,
            compressed_file_static_r,
            compressed_file_moving_r
        ]

        upscaled_file_r = residual_upscaled_dir + "output_" + get_lexicon_value(
            6, int(remove_before)) + ".png"
        remove.append(upscaled_file_r)

        # remove
        threading.Thread(target=self.__delete_files_from_list,
                         args=(remove, ),
                         daemon=True,
                         name="mindiskusage").start()

    def __delete_files_from_list(self, files):
        """
        Delete all the files in a given list.

        Author: Tremex.
        """
        for item in files:
            c = 0
            while True and self.alive:
                if os.path.isfile(item):
                    try:
                        os.remove(item)
                        break
                    except OSError:
                        c += 1
                else:
                    c += 1
                if c == 20:
                    break
                time.sleep(0.1)
Beispiel #20
0
class Residual(threading.Thread):
    def __init__(self, context):
        self.context = context

        # load variables from context
        self.workspace = context.workspace
        self.residual_upscaled_dir = context.residual_upscaled_dir
        self.residual_images_dir = context.residual_images_dir
        self.residual_data_dir = context.residual_data_dir
        self.pframe_data_dir = context.pframe_data_dir
        self.input_frames_dir = context.input_frames_dir
        self.frame_count = context.frame_count
        self.block_size = context.block_size
        self.extension_type = context.extension_type
        self.debug_dir = context.debug_dir
        self.debug = context.debug
        self.temp_image = context.temp_image_folder + "tempimage.jpg"
        self.logger = logging.getLogger(__name__)
        self.start_frame = 1

        # Threading Specific

        self.alive = True
        self.cancel_token = CancellationToken()
        self._stopevent = threading.Event()
        threading.Thread.__init__(self, name="ResidualThread")

    def join(self, timeout=None):
        threading.Thread.join(self, timeout)

    def kill(self):
        self.alive = False
        self.cancel_token.cancel()
        self._stopevent.set()

    def set_start_frame(self, start_frame: int):
        self.start_frame = start_frame

    def run(self):

        # for every frame in the video, create a residual_frame given the text files.
        for x in range(self.start_frame, self.frame_count):

            # loading files area
            # stop if thread is killed
            if not self.alive:
                return

            f1 = Frame()
            f1.load_from_string_wait(
                self.input_frames_dir + "frame" + str(x + 1) +
                self.extension_type, self.cancel_token)

            # Load the neccecary lists to compute this iteration of residual making
            residual_data = get_list_from_file_wait(
                self.residual_data_dir + "residual_" + str(x) + ".txt",
                self.cancel_token)
            prediction_data = get_list_from_file_wait(
                self.pframe_data_dir + "pframe_" + str(x) + ".txt",
                self.cancel_token)

            # stop if thread is killed
            if not self.alive:
                return

            # Create the output files..
            debug_output_file = self.debug_dir + "debug" + str(
                x + 1) + self.extension_type
            output_file = self.residual_images_dir + "output_" + get_lexicon_value(
                6, x) + ".jpg"

            # Save to a temp folder so waifu2x-vulkan doesn't try reading it, then move it
            out_image = self.make_residual_image(self.context, f1,
                                                 residual_data,
                                                 prediction_data)

            if out_image.get_res() == (1, 1):
                """
                If out_image is (1,1) in size, then frame_x and frame_x+1 are identical.

                We still need to save an outimage for sake of having N output images for N input images, so we
                save these meaningless files anyways.

                However, these 1x1 can slow whatever waifu2x implementation down, so we 'cheat' d2x 
                but 'fake' upscaling them, so that they don't need to be processed by waifu2x.
                """

                # Location of the 'fake' upscaled image.
                out_image = Frame()
                out_image.create_new(2, 2)
                output_file = self.residual_upscaled_dir + "output_" + get_lexicon_value(
                    6, x) + ".png"
                out_image.save_image(output_file)

            else:
                # This image has things to upscale, continue normally
                out_image.save_image_temp(output_file, self.temp_image)

            # With this change the wrappers must be modified to not try deleting the non existing residual file

            if self.context.debug == 1:
                self.debug_image(self.block_size, f1, prediction_data,
                                 residual_data, debug_output_file)

    @staticmethod
    def make_residual_image(context: Context, raw_frame: Frame,
                            list_residual: list, list_predictive: list):
        """
        This section can best be explained through pictures. A visual way of expressing what 'make_residual_image'
        is doing is this section in the wiki.

        https://github.com/aka-katto/dandere2x/wiki/How-Dandere2x-Works#observation_3

        Inputs:
            - frame(x)
            - Residual vectors mapping frame(x)_residual -> frame(x)

        Output:
            - frame(x)_residual
        """

        residual_vectors = []
        buffer = 5
        block_size = context.block_size
        bleed = context.bleed

        # first make a 'bleeded' version of input_frame, as we need to create a buffer in the event the 'bleed'
        # ends up going out of bounds.
        bleed_frame = raw_frame.create_bleeded_image(buffer)

        # if there are no items in 'list_residuals' but have list_predictives
        # then the two frames are identical, so no residual image needed.
        if not list_residual and list_predictive:
            residual_image = Frame()
            residual_image.create_new(1, 1)
            return residual_image

        # if there are neither any predictive or inversions
        # then the frame is a brand new frame with no resemblence to previous frame.
        # in this case copy the entire frame over
        if not list_residual and not list_predictive:
            residual_image = Frame()
            residual_image.create_new(raw_frame.width, raw_frame.height)
            residual_image.copy_image(raw_frame)
            return residual_image

        # size of output image is determined based off how many residuals there are
        image_size = int(math.sqrt(len(list_residual) / 4) + 1) * (block_size +
                                                                   bleed * 2)
        residual_image = Frame()
        residual_image.create_new(image_size, image_size)

        for x in range(int(len(list_residual) / 4)):
            # load every element in the list into a vector
            vector = DisplacementVector(int(list_residual[x * 4 + 0]),
                                        int(list_residual[x * 4 + 1]),
                                        int(list_residual[x * 4 + 2]),
                                        int(list_residual[x * 4 + 3]))

            # apply that vector to the image
            residual_image.copy_block(bleed_frame, block_size + bleed * 2,
                                      vector.x_1 + buffer - bleed,
                                      vector.y_1 + buffer + -bleed,
                                      vector.x_2 * (block_size + bleed * 2),
                                      vector.y_2 * (block_size + bleed * 2))

        return residual_image

    @staticmethod
    def debug_image(block_size, frame_base, list_predictive, list_differences,
                    output_location):
        """
        Note:
            I haven't made an effort to maintain this method, as it's only for debugging.

        This section can best be explained through pictures. A visual way of expressing what 'debug'
        is doing is this section in the wiki.

        https://github.com/aka-katto/dandere2x/wiki/How-Dandere2x-Works#part-1-identifying-what-needs-to-be-drawn

        In other words, this method shows where residuals are, and is useful for finding good settings to use for a video.

        Inputs:
            - frame(x)
            - Residual vectors mapping frame(x)_residual -> frame(x)

        Output:
            - frame(x) minus frame(x)_residuals = debug_image
        """
        logger = logging.getLogger(__name__)

        difference_vectors = []
        predictive_vectors = []
        out_image = Frame()
        out_image.create_new(frame_base.width, frame_base.height)
        out_image.copy_image(frame_base)

        black_image = Frame()
        black_image.create_new(frame_base.width, frame_base.height)

        if not list_predictive and not list_differences:
            out_image.save_image(output_location)
            return

        if list_predictive and not list_differences:
            out_image.copy_image(frame_base)
            out_image.save_image(output_location)
            return

        # load list into vector displacements
        for x in range(int(len(list_differences) / 4)):
            difference_vectors.append(
                DisplacementVector(int(list_differences[x * 4]),
                                   int(list_differences[x * 4 + 1]),
                                   int(list_differences[x * 4 + 2]),
                                   int(list_differences[x * 4 + 3])))
        for x in range(int(len(list_predictive) / 4)):
            if (int(list_predictive[x * 4 + 0]) != int(list_predictive[x * 4 + 1])) and \
                    (int(list_predictive[x * 4 + 2]) != int(list_predictive[x * 4 + 3])):
                predictive_vectors.append(
                    DisplacementVector(int(list_predictive[x * 4 + 0]),
                                       int(list_predictive[x * 4 + 1]),
                                       int(list_predictive[x * 4 + 2]),
                                       int(list_predictive[x * 4 + 3])))

        # copy over predictive vectors into new image
        for vector in difference_vectors:
            out_image.copy_block(black_image, block_size, vector.x_1,
                                 vector.y_1, vector.x_1, vector.y_1)

        out_image.save_image_quality(output_location, 25)
Beispiel #21
0
class Dandere2xCppWrapper(threading.Thread):
    """
    A wrapper for the dandere2x_cpp module. It simply calls the module using information used from the context.
    """

    def __init__(self, context: Context):
        # load stuff from context
        self.workspace = context.workspace
        self.dandere2x_cpp_dir = context.dandere2x_cpp_dir
        self.frame_count = context.frame_count
        self.block_size = context.block_size
        self.step_size = context.step_size
        self.extension_type = context.extension_type
        self.residual_images_dir = context.residual_images_dir
        self.log_dir = context.console_output_dir
        self.dandere2x_cpp_subprocess = None

        self.exec_command = [self.dandere2x_cpp_dir,
                             self.workspace,
                             str(self.frame_count),
                             str(self.block_size),
                             str(self.step_size),
                             "n",
                             str(1),
                             self.extension_type]

        # Threading Specific

        self.alive = True
        self.cancel_token = CancellationToken()
        self._stopevent = threading.Event()
        threading.Thread.__init__(self, name="Dandere2xCpp")

    def join(self, timeout=None):
        print("dandere2xcpp killed")
        threading.Thread.join(self, timeout)

    def kill(self):
        self.alive = False
        self.cancel_token.cancel()
        self._stopevent.set()

        d2xcpp_psutil = psutil.Process(self.dandere2x_cpp_subprocess.pid)
        d2xcpp_psutil.kill()

    def set_start_frame(self, start_frame):
        self.exec_command = [self.dandere2x_cpp_dir,
                             self.workspace,
                             str(self.frame_count),
                             str(self.block_size),
                             str(self.step_size),
                             "r",
                             str(start_frame),
                             self.extension_type]

    def run(self):
        logger = logging.getLogger(__name__)

        logger.info(self.exec_command)

        # On linux, we can't use subprocess.create_new_console, so we just write
        # The dandere2x_cpp output to a text file.
        if get_operating_system() == 'win32':
            self.dandere2x_cpp_subprocess = subprocess.Popen(self.exec_command,
                                                             creationflags=subprocess.CREATE_NEW_CONSOLE)

        elif get_operating_system() == 'linux':
            console_output = open(self.log_dir + "dandere2x_cpp.txt", "w")
            console_output.write(str(self.exec_command))
            self.dandere2x_cpp_subprocess = subprocess.Popen(self.exec_command, shell=False, stderr=console_output,
                                                             stdout=console_output)

        if self.dandere2x_cpp_subprocess.returncode == 0:
            logger.info("d2xcpp finished correctly")
        else:
            logger.info("d2xcpp ended unexpectedly")
Beispiel #22
0
class Dandere2x(threading.Thread):
    """
    The main driver that can be called in a various level of circumstances - for example, dandere2x can be started
    from dandere2x_gui_wrapper.py, raw_config_driver.py, or raw_config_gui_driver.py. In each scenario, this is the
    class that is called when Dandere2x ultimately needs to start.
    """
    def __init__(self, context):
        self.context = context
        self.min_disk_demon = None
        self.merge_thread = Merge(self.context)
        self.residual_thread = Residual(self.context)
        self.waifu2x = self._get_waifu2x_class(self.context.waifu2x_type)
        self.compress_frames_thread = CompressFrames(self.context)
        self.dandere2x_cpp_thread = Dandere2xCppWrapper(self.context)
        self.status_thread = Status(context)

        # session specific
        self.resume_session = False
        self.first_frame = 1

        if self.context.config_yaml['resume_settings']['resume_session']:
            print("is resume session")
            self.resume_session = True
            self.first_frame = int(self.context.config_yaml['resume_settings']
                                   ['signal_merged_count'])
        else:
            print("is not resume session")

        # Threading Specific

        self.alive = True
        self.cancel_token = CancellationToken()
        self._stopevent = threading.Event()
        threading.Thread.__init__(self, name="dandere2x_thread")

    def __extract_frames(self):
        """Extract the initial frames needed for a dandere2x to run depending on session type."""

        if self.context.use_min_disk:
            if self.resume_session:
                self.min_disk_demon.progressive_frame_extractor.extract_frames_to(
                    int(self.context.config_yaml['resume_settings']
                        ['signal_merged_count']))

            self.min_disk_demon.extract_initial_frames()
        elif not self.context.use_min_disk:
            extract_frames(self.context, self.context.input_file)

    def __setup_jobs(self):
        """This method is somewhat deprecated, will be moved somewhere else in the future."""
        if self.context.use_min_disk:
            self.min_disk_demon = MinDiskUsage(self.context)

    def __upscale_first_frame(self):
        """The first frame of any dandere2x session needs to be upscaled fully, and this is done as it's own
        process. Ensuring the first frame can get upscaled also provides a source of error checking for the user."""

        # measure the time to upscale a single frame for printing purposes
        one_frame_time = time.time()
        self.waifu2x.upscale_file(
            input_file=self.context.input_frames_dir + "frame" +
            str(self.first_frame) + self.context.extension_type,
            output_file=self.context.merged_dir + "merged_" +
            str(self.first_frame) + self.context.extension_type)

        if not file_exists(self.context.merged_dir + "merged_" +
                           str(self.first_frame) +
                           self.context.extension_type):
            """ 
            Ensure the first file was able to get upscaled. We literally cannot continue if it doesn't.
            """

            print(
                "Could not upscale first file.. check logs file to see what's wrong"
            )
            logging.info(
                "Could not upscale first file.. check logs file to see what's wrong"
            )
            logging.info("Exiting Dandere2x...")
            sys.exit(1)

        print("\n Time to upscale an uncompressed frame: " +
              str(round(time.time() - one_frame_time, 2)))

    def join(self, timeout=None):

        logging.info("dandere2x joined called")

        # due to a weird quirk, prevent dandere2x from being joined until nosound.mkv exists (at least).
        wait_on_file(self.context.nosound_file)

        logging.info("joining residual")
        self.residual_thread.join()

        if self.context.use_min_disk:
            logging.info("joining min disk demon")
            self.min_disk_demon.join()

        logging.info("joining merge")
        self.merge_thread.join()
        logging.info("joining waifu2x")
        self.waifu2x.join()
        logging.info("joining dandere2x")
        self.dandere2x_cpp_thread.join()
        logging.info("joining status")
        self.status_thread.join()
        logging.info("joining compress")
        self.compress_frames_thread.join()

        self.context.logger.info("All threaded processes have finished")
        print("All threaded processes have been finished")

        if self.resume_session:
            print("Session is a resume session, concatenating two videos")
            logging.info(
                "Session is a resume session, concatenating two videos")
            file_to_be_concat = self.context.workspace + "file_to_be_concat.mp4"

            rename_file(self.context.nosound_file, file_to_be_concat)
            concat_two_videos(
                self.context,
                self.context.config_yaml['resume_settings']['nosound_file'],
                file_to_be_concat, self.context.nosound_file)

        # if this became a suspended dandere2x session, kill it.
        if not self.alive:
            logging.info("Invoking suspend exit conditions")
            self.__suspend_exit_conditions()

        elif self.alive:
            logging.info("Migrating tracks")
            migrate_tracks(self.context, self.context.nosound_file,
                           self.context.sound_file, self.context.output_file)

    def __suspend_exit_conditions(self):
        """This is called when dandere2x session is suspended midway through completition, need to save
        meta data and needed files to be resumable."""

        suspended_file = self.context.workspace + str(
            self.context.signal_merged_count + 1) + ".mp4"
        os.rename(self.context.nosound_file, suspended_file)
        self.context.nosound_file = suspended_file
        self.__leave_killed_message()

    def __leave_killed_message(self):
        """
        write the yaml file for the next resume session. The next dandere2x will resume in the same folder
        where the previous one left off, but at at \last_upscaled_frame\ (\30\).
        :return:
        """
        import yaml
        file = open(self.context.workspace + "suspended_session_data.yaml",
                    "a")

        config_file_unparsed = self.context.config_file_unparsed
        config_file_unparsed['resume_settings'][
            'signal_merged_count'] = self.context.signal_merged_count
        config_file_unparsed['resume_settings'][
            'nosound_file'] = self.context.nosound_file
        config_file_unparsed['resume_settings']['resume_session'] = True

        config_file_unparsed['dandere2x']['developer_settings']['workspace'] = \
            config_file_unparsed['dandere2x']['developer_settings']['workspace'] + \
            str(self.context.signal_merged_count + 1) + os.path.sep

        yaml.dump(config_file_unparsed, file, sort_keys=False)

    def kill(self):
        self.alive = False
        self.cancel_token.cancel()
        self._stopevent.set()

        self.merge_thread.kill()
        self.waifu2x.kill()
        self.residual_thread.kill()
        self.compress_frames_thread.kill()

        if self.context.use_min_disk:
            self.min_disk_demon.kill()
        self.dandere2x_cpp_thread.kill()
        self.status_thread.kill()

    def __set_first_frame(self):
        """
        Set the first frame for the relevent dandere2x threads when doing a resume session
        """
        self.compress_frames_thread.set_start_frame(self.first_frame)
        self.dandere2x_cpp_thread.set_start_frame(self.first_frame)
        self.merge_thread.set_start_frame(self.first_frame)
        self.residual_thread.set_start_frame(self.first_frame)
        self.waifu2x.set_start_frame(self.first_frame)
        self.status_thread.set_start_frame(self.first_frame)

        if self.context.use_min_disk:
            self.min_disk_demon.set_start_frame(self.first_frame)

    def run(self):
        """
        Starts the dandere2x_python process at large.
        """

        print("threading at start of runtime")
        print(threading.enumerate())

        # directories need to be created before we do anything
        create_directories(self.context.workspace, self.context.directories)

        # dandere2x needs the width and height to be a share a common factor with the block size,
        # so append a video filter if needed to make the size conform
        if not valid_input_resolution(self.context.width, self.context.height,
                                      self.context.block_size):
            append_video_resize_filter(self.context)

        # create the list of threads to use for dandere2x
        self.__setup_jobs()

        if self.resume_session:
            self.__set_first_frame()

        # extract the initial frames needed for execution depending on type (min_disk_usage / non min_disk_usage )
        self.__extract_frames()

        # first frame needs to be upscaled manually before dandere2x process starts.
        self.__upscale_first_frame()

        self.compress_frames_thread.start()
        self.dandere2x_cpp_thread.start()
        self.merge_thread.start()
        self.residual_thread.start()
        self.waifu2x.start()
        self.status_thread.start()

        if self.context.use_min_disk:
            self.min_disk_demon.start()

    def _get_waifu2x_class(self, name: str):
        """
        Returns a waifu2x object depending on what the user selected
        """

        if name == "caffe":
            return Waifu2xCaffe(self.context)

        elif name == "converter_cpp":
            return Waifu2xConverterCpp(self.context)

        elif name == "vulkan":
            return Waifu2xVulkan(self.context)

        elif name == "vulkan_legacy":
            return Waifu2xVulkanLegacy(self.context)

        else:
            logging.info("no valid waifu2x selected")
            print("no valid waifu2x selected")
            exit(1)

    def delete_workspace_files(self):
        """
        Delete the files produced by dandere2x (beside logs) if this method is called.
        """
        delete_directories(self.context.directories)
        no_sound = os.path.join(self.context.workspace, "nosound.mkv")

        try:
            os.remove(no_sound)

        except OSError:
            print("Deletion of the file %s failed" % no_sound)
            print(OSError.strerror)
        else:
            print("Successfully deleted the file %s " % no_sound)
Beispiel #23
0
class Merge(threading.Thread):
    """
    Description:
        - This class is the driver for merging all the files that need to be merged together.
          Essentially, it calls the 'make_merge_image' method for every image that needs to be upscaled.
        - Other tasks are to ensure the files exist, async writing for optimizations, as well
          as signalling to other parts of Dandere2x we've finished upscaling.
    """
    def __init__(self, context: Context):

        self.context = context
        # load variables from context
        self.workspace = context.workspace
        self.upscaled_dir = context.residual_upscaled_dir
        self.merged_dir = context.merged_dir
        self.residual_data_dir = context.residual_data_dir
        self.pframe_data_dir = context.pframe_data_dir
        self.correction_data_dir = context.correction_data_dir
        self.fade_data_dir = context.fade_data_dir
        self.frame_count = context.frame_count
        self.extension_type = context.extension_type
        self.nosound_file = context.nosound_file
        self.preserve_frames = context.preserve_frames
        self.logger = logging.getLogger(__name__)
        self.start_frame = self.context.start_frame

        # setup the pipe for merging

        self.pipe = Pipe(context, self.nosound_file)

        # Threading Specific

        self.alive = True
        self.cancel_token = CancellationToken()
        self._stopevent = threading.Event()
        threading.Thread.__init__(self, name="MergeThread")

    def join(self, timeout=None):
        self.pipe.join()
        threading.Thread.join(self, timeout)

    def kill(self):
        self.alive = False
        self.pipe.kill()
        self.cancel_token.cancel()
        self._stopevent.set()

    def set_start_frame(self, start_frame):
        self.start_frame = start_frame

    @staticmethod
    def make_merge_image(context: Context, frame_residual: Frame,
                         frame_previous: Frame, list_predictive: list,
                         list_residual: list, list_corrections: list,
                         list_fade: list):
        """
        This section can best be explained through pictures. A visual way of expressing what 'merging'
        is doing is this section in the wiki.

        https://github.com/aka-katto/dandere2x/wiki/How-Dandere2x-Works#part-2-using-observations-to-save-time

        Inputs:
            - frame(x)
            - frame(x+1)_residual
            - Residual vectors mapping frame(x+1)_residual -> frame(x+1)
            - Predictive vectors mapping frame(x) -> frame(x+1)

        Output:
            - frame(x+1)
        """

        # Load context
        logger = logging.getLogger(__name__)

        out_image = Frame()
        out_image.create_new(frame_previous.width, frame_previous.height)

        # If list_predictive is empty, then the residual frame is simply the newly
        # produced image.
        if not list_predictive:
            out_image.copy_image(frame_residual)
            return out_image

        # By copying the image first as the first step, all the predictive elements of the form (x,y) -> (x,y)
        # are also copied. This allows us to ignore copying vectors (x,y) -> (x,y), which prevents redundant copying,
        # thus saving valuable computational time.
        out_image.copy_image(frame_previous)

        ###################
        # Plugins Section #
        ###################

        # Note: Run the plugins in the SAME order it was ran in dandere2x_cpp. If not, it won't work correctly.
        out_image = pframe_image(context, out_image, frame_previous,
                                 frame_residual, list_residual,
                                 list_predictive)
        out_image = fade_image(context, out_image, list_fade)
        out_image = correct_image(context, out_image, list_corrections)

        return out_image

    def run(self):

        self.pipe.start()
        # Load the genesis image + the first upscaled image.
        frame_previous = Frame()
        frame_previous.load_from_string_controller(
            self.merged_dir + "merged_" + str(self.start_frame) +
            self.extension_type, self.context.controller)

        self.pipe.save(frame_previous)

        f1 = Frame()
        f1.load_from_string_controller(
            self.upscaled_dir + "output_" +
            get_lexicon_value(6, self.start_frame) + ".png",
            self.context.controller)

        last_frame = False
        for x in range(self.start_frame, self.frame_count):
            ###################################
            # Loop-iteration pre-requirements #
            ###################################
            # Check if we're at the last image, which affects the behaviour of the loop.
            if x == self.frame_count - 1:
                last_frame = True

            # Pre-load the next iteration of the loop image ahead of time, if we're not on the last frame.
            if not last_frame:
                background_frame_load = \
                    AsyncFrameRead(
                        self.upscaled_dir + "output_" + get_lexicon_value(6, x + 1) + ".png", self.context.controller)

                background_frame_load.start()

            #######################
            # Loop-iteration Core #
            #######################
            # Load the needed vectors to create the merged image.
            prediction_data_list = get_list_from_file_wait_controller(
                self.pframe_data_dir + "pframe_" + str(x) + ".txt",
                self.context.controller)
            residual_data_list = get_list_from_file_wait_controller(
                self.residual_data_dir + "residual_" + str(x) + ".txt",
                self.context.controller)
            correction_data_list = get_list_from_file_wait_controller(
                self.correction_data_dir + "correction_" + str(x) + ".txt",
                self.context.controller)
            fade_data_list = get_list_from_file_wait_controller(
                self.fade_data_dir + "fade_" + str(x) + ".txt",
                self.context.controller)

            if not self.context.controller.is_alive():
                self.logger.info("Merge.py killed at frame " + str(x))
                break

            self.logger.info("Upscaling frame " + str(x))
            # Create the actual image itself.
            frame_next = self.make_merge_image(
                self.context, f1, frame_previous, prediction_data_list,
                residual_data_list, correction_data_list, fade_data_list)

            ###############
            # Saving Area #
            ###############

            # Directly write the image to the ffmpeg pipe line.
            self.pipe.save(frame_next)

            # Manually write the image if we're preserving frames (this is for enthusiasts / debugging).
            if self.preserve_frames:
                output_file = self.workspace + "merged/merged_" + str(
                    x + 1) + self.extension_type
                background_frame_write = AsyncFrameWrite(
                    frame_next, output_file)
                background_frame_write.start()

            #######################################
            # Assign variables for next iteration #
            #######################################
            # last_frame + 1 does not exist, so don't load.
            if not last_frame:
                # We need to wait until the next upscaled image exists before we move on.
                while not background_frame_load.load_complete:
                    wait_on_file_controller(
                        self.upscaled_dir + "output_" +
                        get_lexicon_value(6, x + 1) + ".png",
                        self.context.controller)

                f1 = background_frame_load.loaded_image

            frame_previous = frame_next

            # Signal to the rest of the dandere2x process we've finished upscaling frame 'x'.
            self.context.controller.update_frame_count(x)

        self.pipe.kill()
Beispiel #24
0
class Waifu2xVulkan(threading.Thread):
    """
    The waifu2x-vulkan wrapper, with custom functions written that are specific for dandere2x to work.
    """
    def __init__(self, context: Context):
        # load context
        self.frame_count = context.frame_count
        self.waifu2x_ncnn_vulkan_file_path = context.waifu2x_ncnn_vulkan_legacy_file_name
        self.waifu2x_ncnn_vulkan_path = context.waifu2x_ncnn_vulkan_path
        self.residual_images_dir = context.residual_images_dir
        self.residual_upscaled_dir = context.residual_upscaled_dir
        self.noise_level = context.noise_level
        self.scale_factor = context.scale_factor
        self.workspace = context.workspace
        self.context = context
        self.signal_upscale = True
        self.active_waifu2x_subprocess = None
        self.start_frame = 1

        self.waifu2x_vulkan_upscale_frame = [
            self.waifu2x_ncnn_vulkan_file_path, "-i", "[input_file]", "-n",
            str(self.noise_level), "-s",
            str(self.scale_factor)
        ]

        waifu2x_vulkan_options = get_options_from_section(
            self.context.config_yaml["waifu2x_ncnn_vulkan"]["output_options"])

        # add custom options to waifu2x_vulkan
        for element in waifu2x_vulkan_options:
            self.waifu2x_vulkan_upscale_frame.append(element)

        self.waifu2x_vulkan_upscale_frame.extend(["-o", "[output_file]"])

        # Threading Specific

        self.alive = True
        self.cancel_token = CancellationToken()
        self._stopevent = threading.Event()
        threading.Thread.__init__(self, name="Waifu2xVulkanThread")

    def kill(self):
        self.alive = False
        self.cancel_token.cancel()
        self._stopevent.set()

        try:
            d2xcpp_psutil = psutil.Process(self.active_waifu2x_subprocess.pid)
            if psutil.pid_exists(d2xcpp_psutil.pid):
                d2xcpp_psutil.kill()
        except psutil.NoSuchProcess:
            pass

    def set_start_frame(self, start_frame):
        self.start_frame = start_frame

    def join(self, timeout=None):
        threading.Thread.join(self, timeout)

    def run(self):
        """
        Input:
            - Files made by residuals.py appearing in the /residual_images/ folder.

        Output:
            - Files upscaled in /residual_upscaled/

        Code Description:

        The current Dandere2x implementation requires files to be removed from the 'residual_images' folder
        during runtime. When waifu2x-ncnn-vulkan calls 'upscale folder', it will only upscale what's in the folder
        at that moment, and it'll re-upscale the images that it already upscaled in a previous iteration.

        Considering that residual_images produced by Dandere2x don't all exist during the initial
        Waifu2x call, we need to call the 'upscale folder' command multiple times. To prevent waifu2x from re-upscaling
        the same image twice, various work arounds are in place to allow Dandere2x and Waifu2x to work in real time.

        Briefly, 1) Create a list of names that will be upscaled by waifu2x,
                 2) Call waifu2x to upscale whatever images are in 'differences' folder
                 3) After waifu2x call is finished, delete whatever files were upscaled, and remove those names from list.
                   (this is to prevent Waifu2x from re-upscaling the same image again)
                 4) Repeat this process until all the names are removed.
        """

        logger = logging.getLogger(__name__)

        residual_images_dir = self.context.residual_images_dir
        residual_upscaled_dir = self.context.residual_upscaled_dir
        exec_command = copy.copy(self.waifu2x_vulkan_upscale_frame)

        console_output = open(
            self.context.console_output_dir + "vulkan_upscale_frames.txt", "w")

        # replace the exec command with the files we're concerned with
        for x in range(len(exec_command)):
            if exec_command[x] == "[input_file]":
                exec_command[x] = residual_images_dir

            if exec_command[x] == "[output_file]":
                exec_command[x] = residual_upscaled_dir

        # we need to os.chdir to set the directory or else waifu2x-vulkan won't work.
        os.chdir(self.waifu2x_ncnn_vulkan_path)

        logger.info("waifu2x_vulkan session")
        logger.info(exec_command)

        fix_names_forever_thread = threading.Thread(
            target=self.__fix_names_all, name="fixnamesforrever")
        fix_names_forever_thread.start()

        remove_when_upscaled_thread = threading.Thread(
            target=self.__remove_once_upscaled_then_stop,
            name="removeoneupscaled")
        remove_when_upscaled_thread.start()

        # while there are pictures that have yet to be upscaled, keep calling the upscale command
        while self.signal_upscale and self.alive:
            console_output.write(str(exec_command))
            self.active_waifu2x_subprocess = subprocess.Popen(
                exec_command,
                shell=False,
                stderr=console_output,
                stdout=console_output)
            self.active_waifu2x_subprocess.wait()

        console_output.close()

    def upscale_file(self, input_file: str, output_file: str):
        """
        Manually upscale a file using the wrapper.
        """

        # load context
        waifu2x_ncnn_vulkan_path = self.context.waifu2x_ncnn_vulkan_path
        exec_command = copy.copy(self.waifu2x_vulkan_upscale_frame)

        # replace the exec command with the files we're concerned with
        for x in range(len(exec_command)):
            if exec_command[x] == "[input_file]":
                exec_command[x] = input_file

            if exec_command[x] == "[output_file]":
                exec_command[x] = output_file

        # waifu2x-ncnn-vulkan requires the directory to be local when running, so use os.chir to work out of that dir.
        os.chdir(waifu2x_ncnn_vulkan_path)

        console_output = open(
            self.context.console_output_dir + "vulkan_upscale_frame.txt", "w")
        console_output.write(str(exec_command))
        self.active_waifu2x_subprocess = subprocess.Popen(
            exec_command,
            shell=False,
            stderr=console_output,
            stdout=console_output)
        self.active_waifu2x_subprocess.wait()
        console_output.close()

    def __remove_once_upscaled_then_stop(self):
        self.__remove_once_upscaled()
        self.signal_upscale = False

    def __remove_once_upscaled(self):

        # make a list of names that will eventually (past or future) be upscaled
        list_of_names = []
        for x in range(self.start_frame, self.frame_count):
            list_of_names.append("output_" + get_lexicon_value(6, x) + ".png")

        for x in range(len(list_of_names)):

            if not self.alive:
                return

            name = list_of_names[x]

            residual_file = self.residual_images_dir + name.replace(
                ".png", ".jpg")
            residual_upscaled_file = self.residual_upscaled_dir + name

            wait_on_file(residual_upscaled_file, self.cancel_token)

            if os.path.exists(residual_file):
                os.remove(residual_file)
            else:
                pass

    def __fix_names_all(self):
        """
        Waifu2x-ncnn-vulkan will accept a file as "file.jpg" and output as "file.jpg.png".

        Unfortunately, dandere2x wouldn't recognize this, so this function renames each name to the correct naming
        convention. This function will iteratiate through every file needing to be upscaled waifu2x-ncnn-vulkan,
        and change it's name after it's done saving

        Comments:

        - There's a really complicated try / except that exists because, even though a file may exist,
          the file handle may still be used by waifu2x-ncnn-vulkan (it hasn't released it yet). As a result,
          we need to try / except it until it's released, allowing us to rename it.

        """

        file_names = []
        for x in range(self.start_frame, self.frame_count):
            file_names.append("output_" + get_lexicon_value(6, x))

        for file in file_names:
            dirty_name = self.residual_upscaled_dir + file + ".jpg.png"
            clean_name = self.residual_upscaled_dir + file + ".png"

            wait_on_either_file(clean_name, dirty_name, self.cancel_token)

            if not self.alive:
                return

            if file_exists(clean_name):
                pass

            elif file_exists(dirty_name):
                while file_exists(dirty_name):
                    try:
                        rename_file(dirty_name, clean_name)
                    except PermissionError:
                        pass