Exemple #1
0
def fade_image(context, frame_base: Frame, list_correction: list):
    """
    Apply a flat scalar to the respective blocks in the image. See "fade.cpp" in dandere2x_cpp for more in depth
    documentation. Roughly

        frame_next = frame_next + scalar

    Although frame_residuals needs to also be transformed

    Method Tasks:
        - Load all the vectors and their scalars into a list
        - Apply the scalar to all the vectors in the image
    """

    # load context
    scale_factor = int(context.scale_factor)
    block_size = int(context.block_size)

    fade_data_size = 3

    for x in range(int(len(list_correction) / fade_data_size)):
        # load vector
        vector = FadeData(int(list_correction[x * fade_data_size + 0]),
                          int(list_correction[x * fade_data_size + 1]),
                          int(list_correction[x * fade_data_size + 2]))
        # apply vector
        frame_base.fade_block(vector.x * scale_factor, vector.y * scale_factor,
                              block_size * scale_factor, vector.scalar)

    # out_image.frame = np.clip(out_image.frame, 0, 255)

    return frame_base
Exemple #2
0
 def __init__(self, input_image: str, controller=Dandere2xController()):
     # calling superclass init
     threading.Thread.__init__(self, name="asyncframeread")
     self.input_image = input_image
     self.loaded_image = Frame()
     self.load_complete = False
     self.controller = controller
Exemple #3
0
def pframe_image(context: Dandere2xServiceContext,
                 frame_next: Frame, frame_previous: Frame, frame_residual: Frame,
                 list_residual: list, list_predictive: list):
    """
    Create a new image using residuals and predictive vectors.
    Roughly, we can describe this method as

        frame_next = Transfrom(frame_previous, list_predictive) + frame_residuals.

    Although frame_residuals needs to also be transformed.

    Method Tasks:
        - Move blocks from frame_previous into frame_next using list_predictive
        - Move blocks from frame_residual into frame_next using list_residuals
    """

    # load context
    scale_factor = int(context.service_request.scale_factor)
    block_size = context.service_request.block_size
    bleed = context.bleed

    for x in range(int(len(list_predictive) / 4)):
        """
        Neat optimization trick - there's no need for pframe to copy over a block if the vectors
        point to the same place. In merge.py we just need to load the previous frame into the current frame
        to reach this optimization.
        """

        if int(list_predictive[x * 4 + 0]) != int(list_predictive[x * 4 + 1]) \
                or \
                int(list_predictive[x * 4 + 2]) != int(list_predictive[x * 4 + 3]):
            # load the vector
            vector = DisplacementVector(int(list_predictive[x * 4 + 0]),
                                        int(list_predictive[x * 4 + 1]),
                                        int(list_predictive[x * 4 + 2]),
                                        int(list_predictive[x * 4 + 3]))

            # apply the vector
            frame_next.copy_block(frame_previous, block_size * scale_factor,
                                  vector.x_2 * scale_factor,
                                  vector.y_2 * scale_factor,
                                  vector.x_1 * scale_factor,
                                  vector.y_1 * scale_factor)

    for x in range(int(len(list_residual) / 4)):
        # load every element in the list into a vector
        vector = DisplacementVector(int(list_residual[x * 4 + 0]),
                                    int(list_residual[x * 4 + 1]),
                                    int(list_residual[x * 4 + 2]),
                                    int(list_residual[x * 4 + 3]))

        # apply that vector to the image
        frame_next.copy_block(frame_residual, block_size * scale_factor,
                              (vector.x_2 * (block_size + bleed * 2)) * scale_factor + (bleed * scale_factor),
                              (vector.y_2 * (block_size + bleed * 2)) * scale_factor + (bleed * scale_factor),
                              vector.x_1 * scale_factor,
                              vector.y_1 * scale_factor)

    return frame_next
Exemple #4
0
class AsyncFrameRead(threading.Thread):
    """
    Read an image asynchronously
    """
    def __init__(self, input_image: str, controller=Dandere2xController()):
        # calling superclass init
        threading.Thread.__init__(self, name="asyncframeread")
        self.input_image = input_image
        self.loaded_image = Frame()
        self.load_complete = False
        self.controller = controller

    def run(self):
        self.loaded_image.load_from_string_controller(self.input_image,
                                                      self.controller)
        self.load_complete = True
Exemple #5
0
    def verify_upscaling_works(self) -> None:
        """
        Verify the upscaler works by upscaling a very small frame, and throws a descriptive error if it doesn't.
        """
        test_file = self.context.service_request.workspace + "test_frame.jpg"
        test_file_upscaled = self.context.service_request.workspace + "test_frame_upscaled.jpg"

        test_frame = Frame()
        test_frame.create_new(2, 2)
        test_frame.save_image(test_file)

        self.log.info(
            "Attempting to upscale file %s into %s to ensure waifu2x is working..."
            % (test_file, test_file_upscaled))

        self.upscale_file(test_file, test_file_upscaled)

        if not file_exists(test_file_upscaled):
            self.log.error(
                "Your computer could not upscale a test image, which is required for dandere2x to work."
            )
            self.log.error(
                "This may be a hardware issue or a software issue - verify your computer is capable of upscaling "
                "images using the selected upscaler.")

            raise Exception("Your computer could not upscale the test file.")

        self.log.info(
            "Upscaling *seems* successful. Deleting files and continuing forward. "
        )

        os.remove(test_file)
        os.remove(test_file_upscaled)
Exemple #6
0
    def make_merge_image(context: Dandere2xServiceContext,
                         frame_residual: Frame, frame_previous: Frame,
                         list_predictive: list, list_residual: list,
                         list_corrections: list, list_fade: list):
        """
        This section can best be explained through pictures. A visual way of expressing what 'merging'
        is doing is this section in the wiki.

        https://github.com/aka-katto/dandere2x/wiki/How-Dandere2x-Works#part-2-using-observations-to-save-time

        Inputs:
            - frame(x)
            - frame(x+1)_residual
            - Residual vectors mapping frame(x+1)_residual -> frame(x+1)
            - Predictive vectors mapping frame(x) -> frame(x+1)

        Output:
            - frame(x+1)
        """
        out_image = Frame()
        out_image.create_new(frame_previous.width, frame_previous.height)

        # If list_predictive is empty, then the residual frame is simply the newly produced image.
        if not list_predictive:
            out_image.copy_image(frame_residual)
            return out_image
        """
        By copying the image first as the first step, all the predictive elements of the form (x,y) -> (x,y)
        are also copied. This allows us to ignore copying vectors (x,y) -> (x,y), which prevents redundant copying,
        thus saving valuable computational time.
        """
        out_image.copy_image(frame_previous)

        ###################
        # Plugins Section #
        ###################

        # Note: Run the residual_plugins in the SAME order it was ran in dandere2x_cpp. If not, it won't work correctly.
        out_image = pframe_image(context, out_image, frame_previous,
                                 frame_residual, list_residual,
                                 list_predictive)
        # out_image = fade_image(context, out_image, list_fade)
        # out_image = correct_image(context, out_image, list_corrections)

        return out_image
Exemple #7
0
    def make_residual_image(context: Dandere2xServiceContext, raw_frame: Frame, list_residual: list,
                            list_predictive: list):
        """
        This section can best be explained through pictures. A visual way of expressing what 'make_residual_image'
        is doing is this section in the wiki.

        https://github.com/aka-katto/dandere2x/wiki/How-Dandere2x-Works#observation_3

        Inputs:
            - frame(x)
            - Residual vectors mapping frame(x)_residual -> frame(x)

        Output:
            - frame(x)_residual
        """

        # Some conditions to check before making a residual image, in both cases, we don't need to do any actual
        # processing in the function call, if these conditions hold true.
        if not list_residual and list_predictive:
            """
            If there are no items in 'list_residuals' but have list_predictives then the two frames are identical,
            so no residual image needed.
            """
            residual_image = Frame()
            residual_image.create_new(1, 1)
            return residual_image

        if not list_residual and not list_predictive:
            """ 
            If there are neither any predictive or inversions, then the frame is a brand new frame with no resemblence
            to previous frame. In this case, copy the entire frame over.
            """
            residual_image = Frame()
            residual_image.create_new(raw_frame.width, raw_frame.height)
            residual_image.copy_image(raw_frame)
            return residual_image

        buffer = 5
        block_size = context.service_request.block_size
        bleed = context.bleed
        """
        First make a 'bleeded' version of input_frame, as we need to create a buffer in the event the 'bleed'
        ends up going out of bounds. In other words, crop the image into an even larger image, so that if if we need
        to access out of bounds pixels, and place black pixels where it would be out of bounds. 
        """
        bleed_frame = raw_frame.create_bleeded_image(buffer)

        # size of output image is determined based off how many residuals there are
        image_size = int(math.sqrt(len(list_residual) / 4) + 1) * (block_size + bleed * 2)
        residual_image = Frame()
        residual_image.create_new(image_size, image_size)

        for x in range(int(len(list_residual) / 4)):
            # load every element in the list into a vector
            vector = DisplacementVector(int(list_residual[x * 4 + 0]),
                                        int(list_residual[x * 4 + 1]),
                                        int(list_residual[x * 4 + 2]),
                                        int(list_residual[x * 4 + 3]))

            # apply that vector to the image by copying over their respective blocks.
            residual_image.copy_block(bleed_frame, block_size + bleed * 2,
                                      vector.x_1 + buffer - bleed, vector.y_1 + buffer + - bleed,
                                      vector.x_2 * (block_size + bleed * 2), vector.y_2 * (block_size + bleed * 2))

        return residual_image
Exemple #8
0
    def run(self):
        self.log.info("Run called.")

        for x in range(1, self.con.frame_count):

            # Files needed to create a residual image
            f1 = Frame()
            f1.load_from_string_controller(self.con.input_frames_dir + "frame" + str(x + 1) + ".png",
                                           self.controller)
            # Load the neccecary lists to compute this iteration of residual making
            residual_data = get_list_from_file_and_wait(self.con.residual_data_dir + "residual_" + str(x) + ".txt")

            prediction_data = get_list_from_file_and_wait(self.con.pframe_data_dir + "pframe_" + str(x) + ".txt")

            # Create the output files..
            debug_output_file = self.con.debug_dir + "debug" + str(x + 1) + ".png"
            output_file = self.con.residual_images_dir + "output_" + get_lexicon_value(6, x) + ".png"

            # Save to a temp folder so waifu2x-vulkan doesn't try reading it, then move it
            out_image = self.make_residual_image(self.con, f1, residual_data, prediction_data)

            if out_image.get_res() == (1, 1):
                """
                If out_image is (1,1) in size, then frame_x and frame_x+1 are identical.

                We still need to save an outimage for sake of having N output images for N input images, so we
                save these meaningless files anyways.

                However, these 1x1 can slow whatever waifu2x implementation down, so we 'cheat' d2x 
                but 'fake' upscaling them, so that they don't need to be processed by waifu2x.
                """

                # Location of the 'fake' upscaled image.
                out_image = Frame()
                out_image.create_new(2, 2)
                output_file = self.con.residual_upscaled_dir + "output_" + get_lexicon_value(6, x) + ".png"
                out_image.save_image(output_file)

            else:
                # This image has things to upscale, continue normally
                out_image.save_image_temp(out_location=output_file, temp_location=self.con.temp_image)

            # With this change the wrappers must be modified to not try deleting the non existing residual file
            if self.con.debug is True:
                self.debug_image(block_size=self.con.service_request.block_size, frame_base=f1,
                                 list_predictive=prediction_data, list_residuals=residual_data,
                                 output_location=debug_output_file)
Exemple #9
0
    def debug_image(block_size, frame_base, list_predictive, list_residuals, output_location):
        """
        Note:
            I haven't made an effort to maintain this method, as it's only for debugging.

        This section can best be explained through pictures. A visual way of expressing what 'debug'
        is doing is this section in the wiki.

        https://github.com/aka-katto/dandere2x/wiki/How-Dandere2x-Works#part-1-identifying-what-needs-to-be-drawn

        In other words, this method shows where residuals are, and is useful for finding good settings to use for a video.

        Inputs:
            - frame(x)
            - Residual vectors mapping frame(x)_residual -> frame(x)

        Output:
            - frame(x) minus frame(x)_residuals = debug_image
        """
        logger = logging.getLogger(__name__)

        difference_vectors = []
        predictive_vectors = []
        out_image = Frame()
        out_image.create_new(frame_base.width, frame_base.height)
        out_image.copy_image(frame_base)

        black_image = Frame()
        black_image.create_new(frame_base.width, frame_base.height)

        if not list_predictive and not list_residuals:
            out_image.save_image(output_location)
            return

        if list_predictive and not list_residuals:
            out_image.copy_image(frame_base)
            out_image.save_image(output_location)
            return

        # load list into vector displacements
        for x in range(int(len(list_residuals) / 4)):
            difference_vectors.append(DisplacementVector(int(list_residuals[x * 4]),
                                                         int(list_residuals[x * 4 + 1]),
                                                         int(list_residuals[x * 4 + 2]),
                                                         int(list_residuals[x * 4 + 3])))
        for x in range(int(len(list_predictive) / 4)):
            if (int(list_predictive[x * 4 + 0]) != int(list_predictive[x * 4 + 1])) and \
                    (int(list_predictive[x * 4 + 2]) != int(list_predictive[x * 4 + 3])):
                predictive_vectors.append(DisplacementVector(int(list_predictive[x * 4 + 0]),
                                                             int(list_predictive[x * 4 + 1]),
                                                             int(list_predictive[x * 4 + 2]),
                                                             int(list_predictive[x * 4 + 3])))

        # copy over predictive vectors into new image
        for vector in difference_vectors:
            out_image.copy_block(black_image, block_size,
                                 vector.x_1, vector.y_1,
                                 vector.x_1, vector.y_1)

        out_image.save_image(output_location)
Exemple #10
0
    def run(self):
        self.log.info("Started")
        self.pipe.start()

        # Load the genesis image + the first upscaled image.
        frame_previous = Frame()
        frame_previous.load_from_string_controller(
            self.context.merged_dir + "merged_" + str(1) + ".jpg",
            self.controller)

        # Load and pipe the 'first' image before we start the for loop procedure, since all the other images will
        # inductively build off this first frame.
        frame_previous = Frame()
        frame_previous.load_from_string_controller(
            self.context.merged_dir + "merged_" + str(1) + ".jpg",
            self.controller)
        self.pipe.save(frame_previous)

        current_upscaled_residuals = Frame()
        current_upscaled_residuals.load_from_string_controller(
            self.context.residual_upscaled_dir + "output_" +
            get_lexicon_value(6, 1) + ".png", self.controller)

        last_frame = False
        for x in range(1, self.context.frame_count):
            ########################################
            # Pre-loop logic checks and conditions #
            ########################################

            # Check if we're at the last image, which affects the behaviour of the loop.
            if x == self.context.frame_count - 1:
                last_frame = True

            # Pre-load the next iteration of the loop image ahead of time, if we're not on the last frame.
            if not last_frame:
                """ 
                By asynchronously loading frames ahead of time, this provides a small but meaningful
                boost in performance when spanned over N frames. There's some code over head but 
                it's well worth it. 
                """
                background_frame_load = AsyncFrameRead(
                    self.context.residual_upscaled_dir + "output_" +
                    get_lexicon_value(6, x + 1) + ".png", self.controller)
                background_frame_load.start()

            ######################
            # Core Logic of Loop #
            ######################

            # Load the needed vectors to create the merged image.

            prediction_data_list = get_list_from_file_and_wait(
                self.context.pframe_data_dir + "pframe_" + str(x) + ".txt")
            residual_data_list = get_list_from_file_and_wait(
                self.context.residual_data_dir + "residual_" + str(x) + ".txt")
            correction_data_list = get_list_from_file_and_wait(
                self.context.correction_data_dir + "correction_" + str(x) +
                ".txt")
            fade_data_list = get_list_from_file_and_wait(
                self.context.fade_data_dir + "fade_" + str(x) + ".txt")

            # Create the actual image itself.
            current_frame = self.make_merge_image(
                self.context, current_upscaled_residuals, frame_previous,
                prediction_data_list, residual_data_list, correction_data_list,
                fade_data_list)
            ###############
            # Saving Area #
            ###############
            # Directly write the image to the ffmpeg pipe line.
            self.pipe.save(current_frame)

            # Manually write the image if we're preserving frames (this is for enthusiasts / debugging).
            # if self.preserve_frames:
            # if True:
            #     output_file = self.context.merged_dir + "merged_" + str(x + 1) + ".jpg"
            #     background_frame_write = AsyncFrameWrite(current_frame, output_file)
            #     background_frame_write.start()

            #######################################
            # Assign variables for next iteration #
            #######################################
            if not last_frame:
                # We need to wait until the next upscaled image exists before we move on.
                while not background_frame_load.load_complete:
                    wait_on_file(self.context.residual_upscaled_dir +
                                 "output_" + get_lexicon_value(6, x + 1) +
                                 ".png")
            """
            Now that we're all done with the current frame, the current `current_frame` is now the frame_previous
            (with respect to the next iteration). We could obviously manually load frame_previous = Frame(n-1) each
            time, but this is an optimization that makes a substantial difference over N frames.
            """
            frame_previous = current_frame
            current_upscaled_residuals = background_frame_load.loaded_image
            self.controller.update_frame_count(x)

        self.pipe.kill()