def next_frame(self):
        """
        Call and save the next frame.
        """

        # FFMPEG extracts images pretty fast depending on hardware, so in some cases although we've already
        # we've only instructed ffmpeg to extract N frames, N + k (for some k) may already be extracted.
        if file_exists(self.input_frames_dir + "frame" + str(self.count) + self.extension_type):
            self.count += 1
            return

        # Resume the thread in order to produce a new frame.
        self.pause_resume.resume()

        # Although the file may exist, there are niche conditions in which the file on disk is
        # not processable. Make sure the image is proccessible before killing the signal.
        while not file_exists(self.input_frames_dir + "frame" + str(self.count) + self.extension_type):
            time.sleep(.00001)

        while file_is_empty(self.input_frames_dir + "frame" + str(self.count) + self.extension_type):
            time.sleep(.00001)

        # make sure the image is actually loadable before stopping the ffmpeg thread by using the wait function.
        # frame has.
        f = Frame()
        f.load_from_string_wait(self.input_frames_dir + "frame" + str(self.count) + self.extension_type)

        # Pause the thread.
        self.pause_resume.suspend()

        self.count += 1
Esempio n. 2
0
class AsyncFrameRead(threading.Thread):
    def __init__(self, input_image: str):
        # calling superclass init
        threading.Thread.__init__(self)
        self.input_image = input_image
        self.loaded_image = Frame()
        self.load_complete = False

    def run(self):
        self.loaded_image.load_from_string_wait(self.input_image)
        self.load_complete = True
Esempio n. 3
0
def residual_loop(context):
    """
    Call the 'make_residual_image' method for every image that needs to be made into a residual.

    Method Tasks:
        - Load and wait for the files needed to create a residual image.
        - Call 'make_residual_image' once the needed files exist
    """

    # load variables from context
    workspace = context.workspace
    residual_images_dir = context.residual_images_dir
    residual_data_dir = context.residual_data_dir
    pframe_data_dir = context.pframe_data_dir
    input_frames_dir = context.input_frames_dir
    frame_count = context.frame_count
    block_size = context.block_size
    extension_type = context.extension_type
    debug_dir = context.debug_dir
    debug = context.debug

    temp_image = context.temp_image_folder + "tempimage.jpg"

    logger = logging.getLogger(__name__)
    logger.info((workspace, 1, frame_count, block_size))

    # for every frame in the video, create a residual_frame given the text files.
    for x in range(1, frame_count):
        f1 = Frame()
        f1.load_from_string_wait(input_frames_dir + "frame" + str(x + 1) +
                                 extension_type)

        # Load the neccecary lists to compute this iteration of residual making
        residual_data = get_list_from_file(residual_data_dir + "residual_" +
                                           str(x) + ".txt")
        prediction_data = get_list_from_file(pframe_data_dir + "pframe_" +
                                             str(x) + ".txt")

        # Create the output files..
        debug_output_file = debug_dir + "debug" + str(x + 1) + extension_type
        output_file = residual_images_dir + "output_" + get_lexicon_value(
            6, x) + ".jpg"

        # Save to a temp folder so waifu2x-vulkan doesn't try reading it, then move it
        out_image = make_residual_image(context, f1, residual_data,
                                        prediction_data)
        out_image.save_image_temp(output_file, temp_image)

        if debug == 1:
            debug_image(block_size, f1, prediction_data, residual_data,
                        debug_output_file)
Esempio n. 4
0
class AsyncFrameRead(threading.Thread):
    """
    Read an image asynchronously
    """
    def __init__(self, input_image: str, cancel_token=CancellationToken()):
        # calling superclass init
        threading.Thread.__init__(self, name="asyncframeread")
        self.input_image = input_image
        self.loaded_image = Frame()
        self.load_complete = False
        self.cancel_token = cancel_token

    def run(self):
        self.loaded_image.load_from_string_wait(self.input_image,
                                                self.cancel_token)
        self.load_complete = True
Esempio n. 5
0
def difference_loop(context, start_frame: int):
    # load variables from context
    workspace = context.workspace
    differences_dir = context.differences_dir
    inversion_data_dir = context.inversion_data_dir
    pframe_data_dir = context.pframe_data_dir
    input_frames_dir = context.input_frames_dir
    frame_count = context.frame_count
    block_size = context.block_size
    extension_type = context.extension_type
    debug = context.debug

    temp_image = context.temp_image_folder + "tempimage.jpg"

    logger = logging.getLogger(__name__)
    logger.info((workspace, start_frame, frame_count, block_size))

    # for every frame in the video, create a difference_frame given the text files.
    for x in range(start_frame, frame_count):
        f1 = Frame()
        f1.load_from_string_wait(input_frames_dir + "frame" + str(x + 1) +
                                 extension_type)

        # Load the neccecary lists to compute this iteration of difference making
        difference_data = get_list_from_file(inversion_data_dir +
                                             "inversion_" + str(x) + ".txt")
        prediction_data = get_list_from_file(pframe_data_dir + "pframe_" +
                                             str(x) + ".txt")

        # Create the output files..
        debug_output_file = workspace + "debug/debug" + str(x +
                                                            1) + extension_type
        output_file = differences_dir + "output_" + get_lexicon_value(
            6, x) + ".jpg"

        # Save to a temp folder so waifu2x-vulkan doesn't try reading it, then move it
        out_image = make_difference_image(context, f1, difference_data,
                                          prediction_data)
        out_image.save_image_temp(output_file, temp_image)

        if debug == 1:
            debug_image(block_size, f1, prediction_data, difference_data,
                        debug_output_file)
Esempio n. 6
0
    def run(self):
        # start from 1 because ffmpeg's extracted frames starts from 1
        for x in range(self.start_frame, self.frame_count + 1):

            # loading files area
            frame = Frame()
            frame.load_from_string_wait(self.inputs_dir + "frame" + str(x) + self.extension_type, self.cancel_token)

            # stop if thread was killed
            if not self.alive:
                return

            # if the compressed frame already exists, don't compress it
            if os.path.exists(self.compressed_static_dir + "compressed_" + str(x) + ".jpg"):
                continue

            frame.save_image_quality(self.compressed_static_dir + "compressed_" + str(x) + ".jpg",
                                     self.quality_minimum)
            frame.save_image_quality(self.compressed_moving_dir + "compressed_" + str(x) + ".jpg",
                                     int(self.quality_minimum * self.quality_moving_ratio))
Esempio n. 7
0
def merge_loop(context: Context):
    """
    Call the 'make_merge_image' method for every image that needs to be upscaled.

    This method is sort of the driver for that, and has tasks needed to keep merging running smoothly.

    This method became a bit messy due to optimization-hunting, but the most important calls of the loop can be read in
    the 'Loop-iteration Core' area.

    Method Tasks:

        - Read / Write files that are used by merge asynchronously.
        - Load the text files containing the vectors needed for 'make_merge_image'

    """

    # load variables from context
    workspace = context.workspace
    upscaled_dir = context.residual_upscaled_dir
    merged_dir = context.merged_dir
    residual_data_dir = context.residual_data_dir
    pframe_data_dir = context.pframe_data_dir
    correction_data_dir = context.correction_data_dir
    fade_data_dir = context.fade_data_dir
    frame_count = context.frame_count
    extension_type = context.extension_type
    logger = logging.getLogger(__name__)

    # # # ffmpeg piping stuff # # #

    ffmpeg_pipe_encoding = context.ffmpeg_pipe_encoding

    if ffmpeg_pipe_encoding:
        nosound_file = context.nosound_file
        frame_rate = str(context.frame_rate)
        input_file = context.input_file
        output_file = context.output_file
        ffmpeg_dir = context.ffmpeg_dir
        ffmpeg_pipe_encoding_type = context.ffmpeg_pipe_encoding_type

        if ffmpeg_pipe_encoding_type in ["jpeg", "jpg"]:
            vcodec = "mjpeg"
            pipe_format = "JPEG"

        elif ffmpeg_pipe_encoding_type == "png":
            vcodec = "png"
            pipe_format = "PNG"

        else:
            print("  Error: no valid ffmpeg_pipe_encoding_type set. Using jpeg as default")
            vcodec = "mjpeg"
            pipe_format = "JPEG"

        print("\n    WARNING: EXPERIMENTAL FFMPEG PIPING IS ENABLED\n")

        ffmpegpipe = subprocess.Popen([ffmpeg_dir, "-loglevel", "panic", '-y', '-f',
                                       'image2pipe', '-vcodec', vcodec, '-r', frame_rate,
                                       '-i', '-', '-vcodec', 'libx264', '-preset', 'medium',
                                       '-qscale', '5', '-crf', '17',
                                       '-vf', ' pp=hb/vb/dr/fq|32, deband=range=22:blur=false',
                                       '-r', frame_rate, nosound_file],
                                      stdin=subprocess.PIPE)

        # pipe the first merged image as it will not be done afterwards
        wait_on_file(merged_dir + "merged_" + str(1) + extension_type)
        im = Image.open(merged_dir + "merged_" + str(1) + extension_type)

        # best jpeg quality since we won't be saving up disk space
        im.save(ffmpegpipe.stdin, format=pipe_format, quality=100)

    # # #  # # #  # # #  # # #

    # Load the genesis image + the first upscaled image.
    frame_previous = Frame()
    frame_previous.load_from_string_wait(merged_dir + "merged_" + str(1) + extension_type)

    f1 = Frame()
    f1.load_from_string_wait(upscaled_dir + "output_" + get_lexicon_value(6, 1) + ".png")

    # When upscaling every frame between start_frame to frame_count, there's obviously no x + 1 at frame_count - 1 .
    # So just make a small note not to load that image. Pretty much load images concurrently until we get to x - 1
    last_frame = False
    for x in range(1, frame_count):
        ###################################
        # Loop-iteration pre-requirements #
        ###################################

        # Check if we're at the last image
        if x == frame_count - 1:
            last_frame = True

        # load the next image ahead of time.
        if not last_frame:
            background_frame_load = AsyncFrameRead(upscaled_dir + "output_" + get_lexicon_value(6, x + 1) + ".png")
            background_frame_load.start()

        #######################
        # Loop-iteration Core #
        #######################

        logger.info("Upscaling frame " + str(x))

        prediction_data_list = get_list_from_file(pframe_data_dir + "pframe_" + str(x) + ".txt")
        residual_data_list = get_list_from_file(residual_data_dir + "residual_" + str(x) + ".txt")
        correction_data_list = get_list_from_file(correction_data_dir + "correction_" + str(x) + ".txt")
        fade_data_list = get_list_from_file(fade_data_dir + "fade_" + str(x) + ".txt")

        frame_next = make_merge_image(context, f1, frame_previous,
                                      prediction_data_list, residual_data_list,
                                      correction_data_list, fade_data_list)

        if not ffmpeg_pipe_encoding:  # ffmpeg piping is disabled, traditional way

            # Write the image in the background for the preformance increase
            output_file_merged = workspace + "merged/merged_" + str(x + 1) + extension_type
            background_frame_write = AsyncFrameWrite(frame_next, output_file_merged)
            background_frame_write.start()

        else:  # ffmpeg piping is enabled

            # Write the image directly into ffmpeg pipe
            im = frame_next.get_pil_image()
            im.save(ffmpegpipe.stdin, format=pipe_format, quality=95)

        #######################################
        # Assign variables for next iteration #
        #######################################

        if not last_frame:
            while not background_frame_load.load_complete:
                wait_on_file(upscaled_dir + "output_" + get_lexicon_value(6, x + 1) + ".png")

            f1 = background_frame_load.loaded_image

        frame_previous = frame_next

        # Ensure the file is loaded for background_frame_load. If we're on the last frame, simply ignore this section
        # Because the frame_count + 1 does not exist.

    if ffmpeg_pipe_encoding:
        ffmpegpipe.stdin.close()
        ffmpegpipe.wait()

        # add the original file audio to the nosound file
        migrate_tracks(context, nosound_file, input_file, output_file)
Esempio n. 8
0
    def run(self):

        self.pipe.start_pipe_thread()
        # Load the genesis image + the first upscaled image.
        frame_previous = Frame()
        frame_previous.load_from_string_wait(
            self.merged_dir + "merged_" + str(self.start_frame) +
            self.extension_type, self.cancel_token)

        self.pipe.save(frame_previous)

        f1 = Frame()
        f1.load_from_string_wait(
            self.upscaled_dir + "output_" +
            get_lexicon_value(6, self.start_frame) + ".png", self.cancel_token)

        last_frame = False
        for x in range(self.start_frame, self.frame_count):

            ###################################
            # Loop-iteration pre-requirements #
            ###################################

            # Check if we're at the last image, which affects the behaviour of the loop.
            if x == self.frame_count - 1:
                last_frame = True

            # Pre-load the next iteration of the loop image ahead of time, if we're not on the last frame.
            if not last_frame:
                background_frame_load = AsyncFrameRead(
                    self.upscaled_dir + "output_" +
                    get_lexicon_value(6, x + 1) + ".png", self.cancel_token)
                background_frame_load.start()

            #######################
            # Loop-iteration Core #
            #######################

            # Load the needed vectors to create the merged image.
            prediction_data_list = get_list_from_file_wait(
                self.pframe_data_dir + "pframe_" + str(x) + ".txt",
                self.cancel_token)
            residual_data_list = get_list_from_file_wait(
                self.residual_data_dir + "residual_" + str(x) + ".txt",
                self.cancel_token)
            correction_data_list = get_list_from_file_wait(
                self.correction_data_dir + "correction_" + str(x) + ".txt",
                self.cancel_token)
            fade_data_list = get_list_from_file_wait(
                self.fade_data_dir + "fade_" + str(x) + ".txt",
                self.cancel_token)

            if not self.alive:
                self.logger.info("Merge.py killed at frame " + str(x))
                return

            self.logger.info("Upscaling frame " + str(x))
            # Create the actual image itself.
            frame_next = self.make_merge_image(
                self.context, f1, frame_previous, prediction_data_list,
                residual_data_list, correction_data_list, fade_data_list)

            ###############
            # Saving Area #
            ###############

            # Directly write the image to the ffmpeg pipe line.
            self.pipe.save(frame_next)

            # Manually write the image if we're preserving frames (this is for enthusiasts / debugging).
            if self.preserve_frames:
                output_file = self.workspace + "merged/merged_" + str(
                    x + 1) + self.extension_type
                background_frame_write = AsyncFrameWrite(
                    frame_next, output_file)
                background_frame_write.start()

            #######################################
            # Assign variables for next iteration #
            #######################################

            # last_frame + 1 does not exist, so don't load.
            if not last_frame:
                # We need to wait until the next upscaled image exists before we move on.
                while not background_frame_load.load_complete:
                    wait_on_file(
                        self.upscaled_dir + "output_" +
                        get_lexicon_value(6, x + 1) + ".png",
                        self.cancel_token)

                f1 = background_frame_load.loaded_image

            frame_previous = frame_next

            # Signal to the rest of the dandere2x process we've finished upscaling frame 'x'.
            self.context.signal_merged_count = x

        self.pipe.wait_finish_stop_pipe()
Esempio n. 9
0
def residual_loop(context):
    """
    Call the 'make_residual_image' method for every image that needs to be made into a residual.

    Method Tasks:
        - Load and wait for the files needed to create a residual image.
        - Call 'make_residual_image' once the needed files exist
    """

    # load variables from context
    workspace = context.workspace
    residual_upscaled_dir = context.residual_upscaled_dir
    residual_images_dir = context.residual_images_dir
    residual_data_dir = context.residual_data_dir
    pframe_data_dir = context.pframe_data_dir
    input_frames_dir = context.input_frames_dir
    frame_count = context.frame_count
    block_size = context.block_size
    extension_type = context.extension_type
    debug_dir = context.debug_dir
    debug = context.debug

    temp_image = context.temp_image_folder + "tempimage.jpg"

    logger = logging.getLogger(__name__)
    logger.info((workspace, 1, frame_count, block_size))

    # for every frame in the video, create a residual_frame given the text files.
    for x in range(1, frame_count):
        f1 = Frame()
        f1.load_from_string_wait(input_frames_dir + "frame" + str(x + 1) +
                                 extension_type)

        # Load the neccecary lists to compute this iteration of residual making
        residual_data = get_list_from_file(residual_data_dir + "residual_" +
                                           str(x) + ".txt")
        prediction_data = get_list_from_file(pframe_data_dir + "pframe_" +
                                             str(x) + ".txt")

        # Create the output files..
        debug_output_file = debug_dir + "debug" + str(x + 1) + extension_type
        output_file = residual_images_dir + "output_" + get_lexicon_value(
            6, x) + ".jpg"

        # Save to a temp folder so waifu2x-vulkan doesn't try reading it, then move it
        out_image = make_residual_image(context, f1, residual_data,
                                        prediction_data)

        if out_image.get_res() == (1, 1):
            """
            If out_image is (1,1) in size, then frame_x and frame_x+1 are identical.

            We still need to save an outimage for sake of having N output images for N input images, so we
            save these meaningless files anyways.

            However, these 1x1 can slow whatever waifu2x implementation down, so we 'cheat' d2x 
            but 'fake' upscaling them, so that they don't need to be processed by waifu2x.
            """

            # Location of the 'fake' upscaled image.
            out_image = Frame()
            out_image.create_new(2, 2)
            output_file = residual_upscaled_dir + "output_" + get_lexicon_value(
                6, x) + ".png"
            out_image.save_image(output_file)

        else:
            # This image has things to upscale, continue normally
            out_image.save_image_temp(output_file, temp_image)

        # With this change the wrappers must be modified to not try deleting the non existing residual file

        if debug == 1:
            debug_image(block_size, f1, prediction_data, residual_data,
                        debug_output_file)
Esempio n. 10
0
def merge_loop(context: Context):
    """
    Call the 'make_merge_image' method for every image that needs to be upscaled.

    This method is sort of the driver for that, and has tasks needed to keep merging running smoothly.

    This method became a bit messy due to optimization-hunting, but the most important calls of the loop can be read in
    the 'Loop-iteration Core' area.

    Method Tasks:

        - Read / Write files that are used by merge asynchronously.
        - Load the text files containing the vectors needed for 'make_merge_image'

    """

    # load variables from context
    workspace = context.workspace
    upscaled_dir = context.residual_upscaled_dir
    merged_dir = context.merged_dir
    residual_data_dir = context.residual_data_dir
    pframe_data_dir = context.pframe_data_dir
    correction_data_dir = context.correction_data_dir
    fade_data_dir = context.fade_data_dir
    frame_count = context.frame_count
    extension_type = context.extension_type
    logger = logging.getLogger(__name__)

    # Load the genesis image + the first upscaled image.
    frame_previous = Frame()
    frame_previous.load_from_string_wait(merged_dir + "merged_" + str(1) +
                                         extension_type)

    f1 = Frame()
    f1.load_from_string_wait(upscaled_dir + "output_" +
                             get_lexicon_value(6, 1) + ".png")

    # When upscaling every frame between start_frame to frame_count, there's obviously no x + 1 at frame_count - 1 .
    # So just make a small note not to load that image. Pretty much load images concurrently until we get to x - 1
    last_frame = False
    for x in range(1, frame_count):
        ###################################
        # Loop-iteration pre-requirements #
        ###################################

        # Check if we're at the last image
        if x == frame_count - 1:
            last_frame = True

        # load the next image ahead of time.
        if not last_frame:
            background_frame_load = AsyncFrameRead(upscaled_dir + "output_" +
                                                   get_lexicon_value(6, x +
                                                                     1) +
                                                   ".png")
            background_frame_load.start()

        #######################
        # Loop-iteration Core #
        #######################

        logger.info("Upscaling frame " + str(x))

        prediction_data_list = get_list_from_file(pframe_data_dir + "pframe_" +
                                                  str(x) + ".txt")
        residual_data_list = get_list_from_file(residual_data_dir +
                                                "residual_" + str(x) + ".txt")
        correction_data_list = get_list_from_file(correction_data_dir +
                                                  "correction_" + str(x) +
                                                  ".txt")
        fade_data_list = get_list_from_file(fade_data_dir + "fade_" + str(x) +
                                            ".txt")

        output_file = workspace + "merged/merged_" + str(x +
                                                         1) + extension_type

        frame_next = make_merge_image(context, f1, frame_previous,
                                      prediction_data_list, residual_data_list,
                                      correction_data_list, fade_data_list)

        # Write the image in the background for the preformance increase
        background_frame_write = AsyncFrameWrite(frame_next, output_file)
        background_frame_write.start()

        #######################################
        # Assign variables for next iteration #
        #######################################

        # Ensure the file is loaded for background_frame_load. If we're on the last frame, simply ignore this section
        # Because the frame_count + 1 does not exist.
        if not last_frame:
            while not background_frame_load.load_complete:
                wait_on_file(upscaled_dir + "output_" +
                             get_lexicon_value(6, x + 1) + ".png")

            f1 = background_frame_load.loaded_image

        frame_previous = frame_next
Esempio n. 11
0
    def run(self):

        # for every frame in the video, create a residual_frame given the text files.
        for x in range(self.start_frame, self.frame_count):

            # loading files area
            # stop if thread is killed
            if not self.alive:
                return

            f1 = Frame()
            f1.load_from_string_wait(
                self.input_frames_dir + "frame" + str(x + 1) +
                self.extension_type, self.cancel_token)

            # Load the neccecary lists to compute this iteration of residual making
            residual_data = get_list_from_file_wait(
                self.residual_data_dir + "residual_" + str(x) + ".txt",
                self.cancel_token)
            prediction_data = get_list_from_file_wait(
                self.pframe_data_dir + "pframe_" + str(x) + ".txt",
                self.cancel_token)

            # stop if thread is killed
            if not self.alive:
                return

            # Create the output files..
            debug_output_file = self.debug_dir + "debug" + str(
                x + 1) + self.extension_type
            output_file = self.residual_images_dir + "output_" + get_lexicon_value(
                6, x) + ".jpg"

            # Save to a temp folder so waifu2x-vulkan doesn't try reading it, then move it
            out_image = self.make_residual_image(self.context, f1,
                                                 residual_data,
                                                 prediction_data)

            if out_image.get_res() == (1, 1):
                """
                If out_image is (1,1) in size, then frame_x and frame_x+1 are identical.

                We still need to save an outimage for sake of having N output images for N input images, so we
                save these meaningless files anyways.

                However, these 1x1 can slow whatever waifu2x implementation down, so we 'cheat' d2x 
                but 'fake' upscaling them, so that they don't need to be processed by waifu2x.
                """

                # Location of the 'fake' upscaled image.
                out_image = Frame()
                out_image.create_new(2, 2)
                output_file = self.residual_upscaled_dir + "output_" + get_lexicon_value(
                    6, x) + ".png"
                out_image.save_image(output_file)

            else:
                # This image has things to upscale, continue normally
                out_image.save_image_temp(output_file, self.temp_image)

            # With this change the wrappers must be modified to not try deleting the non existing residual file

            if self.context.debug == 1:
                self.debug_image(self.block_size, f1, prediction_data,
                                 residual_data, debug_output_file)
Esempio n. 12
0
def merge_loop(context: Context, start_frame: int):
    # load variables from context
    workspace = context.workspace
    upscaled_dir = context.upscaled_dir
    merged_dir = context.merged_dir
    inversion_data_dir = context.inversion_data_dir
    pframe_data_dir = context.pframe_data_dir
    correction_data_dir = context.correction_data_dir
    fade_data_dir = context.fade_data_dir
    frame_count = context.frame_count
    extension_type = context.extension_type
    logger = logging.getLogger(__name__)

    # Load the genesis image + the first upscaled image.

    base = Frame()
    base.load_from_string_wait(merged_dir + "merged_" + str(start_frame) +
                               extension_type)

    f1 = Frame()
    f1.load_from_string_wait(upscaled_dir + "output_" +
                             get_lexicon_value(6, 1) + ".png")

    # When upscaling every frame between start_frame to frame_count, there's obviously no x + 1 at frame_count - 1 .
    # So just make a small note not to load that image. Pretty much load images concurrently until we get to x - 1
    last_frame = False
    for x in range(start_frame, frame_count):
        logger.info("Upscaling frame " + str(x))

        # Check if we're at the last image
        if x == frame_count - 1:
            last_frame = True

        # load the next image ahead of time.
        if not last_frame:
            background_frame_load = AsyncFrameRead(upscaled_dir + "output_" +
                                                   get_lexicon_value(6, x +
                                                                     1) +
                                                   ".png")
            background_frame_load.start()

        # load vectors needed to piece image back together
        prediction_data_list = get_list_from_file(pframe_data_dir + "pframe_" +
                                                  str(x) + ".txt")
        difference_data_list = get_list_from_file(inversion_data_dir +
                                                  "inversion_" + str(x) +
                                                  ".txt")
        correction_data_list = get_list_from_file(correction_data_dir +
                                                  "correction_" + str(x) +
                                                  ".txt")
        fade_data_list = get_list_from_file(fade_data_dir + "fade_" + str(x) +
                                            ".txt")

        output_file = workspace + "merged/merged_" + str(x +
                                                         1) + extension_type

        new_base = make_merge_image(context, f1, base, prediction_data_list,
                                    difference_data_list, correction_data_list,
                                    fade_data_list)

        # Write the image in the background for the preformance increase
        background_frame_write = AsyncFrameWrite(new_base, output_file)
        background_frame_write.start()

        # Assign variables for next iteration

        # Ensure the file is loaded for background_frame_load. If we're on the last frame, simply ignore this section
        # Because the frame_count + 1 does not exist.
        if not last_frame:
            while not background_frame_load.load_complete:
                wait_on_file(upscaled_dir + "output_" +
                             get_lexicon_value(6, x + 1) + ".png")

            f1 = background_frame_load.loaded_image

        base = new_base