def next_frame(self):
        """
        Call and save the next frame.
        """

        # FFMPEG extracts images pretty fast depending on hardware, so in some cases although we've already
        # we've only instructed ffmpeg to extract N frames, N + k (for some k) may already be extracted.
        if file_exists(self.input_frames_dir + "frame" + str(self.count) + self.extension_type):
            self.count += 1
            return

        # Resume the thread in order to produce a new frame.
        self.pause_resume.resume()

        # Although the file may exist, there are niche conditions in which the file on disk is
        # not processable. Make sure the image is proccessible before killing the signal.
        while not file_exists(self.input_frames_dir + "frame" + str(self.count) + self.extension_type):
            time.sleep(.00001)

        while file_is_empty(self.input_frames_dir + "frame" + str(self.count) + self.extension_type):
            time.sleep(.00001)

        # make sure the image is actually loadable before stopping the ffmpeg thread by using the wait function.
        # frame has.
        f = Frame()
        f.load_from_string_wait(self.input_frames_dir + "frame" + str(self.count) + self.extension_type)

        # Pause the thread.
        self.pause_resume.suspend()

        self.count += 1
Esempio n. 2
0
def fade_image(context, frame_base: Frame, list_correction: list):
    """
    Apply a flat scalar to the respective blocks in the image. See "fade.cpp" in dandere2x_cpp for more in depth
    documentation. Roughly

        frame_next = frame_next + scalar

    Although frame_residuals needs to also be transformed

    Method Tasks:
        - Load all the vectors and their scalars into a list
        - Apply the scalar to all the vectors in the image
    """

    # load context
    scale_factor = int(context.scale_factor)
    block_size = int(context.block_size)

    fade_data_size = 3

    for x in range(int(len(list_correction) / fade_data_size)):
        # load vector
        vector = FadeData(int(list_correction[x * fade_data_size + 0]),
                          int(list_correction[x * fade_data_size + 1]),
                          int(list_correction[x * fade_data_size + 2]))
        # apply vector
        frame_base.fade_block(vector.x * scale_factor, vector.y * scale_factor,
                              block_size * scale_factor, vector.scalar)

    # out_image.frame = np.clip(out_image.frame, 0, 255)

    return frame_base
Esempio n. 3
0
 def __init__(self, input_image: str, cancel_token=CancellationToken()):
     # calling superclass init
     threading.Thread.__init__(self, name="asyncframeread")
     self.input_image = input_image
     self.loaded_image = Frame()
     self.load_complete = False
     self.cancel_token = cancel_token
Esempio n. 4
0
def make_merge_image(context: Context, frame_inversion: Frame,
                     frame_base: Frame, list_predictive: list,
                     list_differences: list, list_corrections: list,
                     list_fade: list):
    # Load context
    logger = logging.getLogger(__name__)

    out_image = Frame()
    out_image.create_new(frame_base.width, frame_base.height)

    # assess the two cases where out images are either duplicates or a new frame completely
    if not list_predictive and not list_differences:
        out_image.copy_image(frame_inversion)
        return out_image

    if list_predictive and not list_differences:
        out_image.copy_image(frame_base)
        return out_image

    # by copying the image first as the first step, all the predictive elements like
    # (0,0) -> (0,0) are also coppied
    out_image.copy_image(frame_base)

    # run the image through the same plugins IN ORDER it was ran in d2x_cpp
    out_image = pframe_image(context, out_image, frame_base, frame_inversion,
                             list_differences, list_predictive)
    out_image = fade_image(context, out_image, list_fade)
    out_image = correct_image(context, out_image, list_corrections)

    return out_image
Esempio n. 5
0
def verify_user_settings(context):
    from wrappers.frame.frame import Frame

    input_frames_dir = context.input_frames_dir
    extension_type = context.extension_type
    block_size = context.block_size

    f1 = Frame()
    f1.load_from_string(input_frames_dir + "frame1" + extension_type)

    valid = True

    if f1.width % block_size != 0 and f1.height % block_size != 0:
        print("----------------------ERROR---------------------------------------")
        print("Your block size is incompatible with the resolution you provided. ")
        print("Valid Block sizes are:")
        print("------------------------------------------------------------------")
        valid_sizes = []

        larger_val = [f1.width, f1.height][f1.height > f1.width]

        for x in range(1, larger_val):
            if f1.width % x == 0 and f1.height % x == 0:
                valid_sizes.append(x)

        print(valid_sizes)
        print("------------------------------------------------------------------")
        new_block_size = int(input("Enter your value (recommended 25 or greater): "))

        while new_block_size not in valid_sizes:
            new_block_size = int(input("Invalid Choice! Re-Enter a correct value: "))

        context.block_size = new_block_size
Esempio n. 6
0
 def __init__(self, input_image: str, controller=Controller()):
     # calling superclass init
     threading.Thread.__init__(self, name="asyncframeread")
     self.input_image = input_image
     self.loaded_image = Frame()
     self.load_complete = False
     self.controller = controller
Esempio n. 7
0
def pframe_image(context,
                 frame_next: Frame, frame_previous: Frame, frame_residual: Frame,
                 list_residual: list, list_predictive: list):
    """
    Create a new image using residuals and predictive vectors.
    Roughly, we can describe this method as

        frame_next = Transfrom(frame_previous, list_predictive) + frame_residuals.

    Although frame_residuals needs to also be transformed.

    Method Tasks:
        - Move blocks from frame_previous into frame_next using list_predictive
        - Move blocks from frame_residual into frame_next using list_residuals
    """

    # load context
    predictive_vectors = []
    residual_vectors = []
    scale_factor = int(context.scale_factor)
    block_size = context.block_size
    bleed = context.bleed

    # load lists into vector displacements
    for x in range(int(len(list_residual) / 4)):
        residual_vectors.append(DisplacementVector(int(list_residual[x * 4 + 0]),
                                                   int(list_residual[x * 4 + 1]),
                                                   int(list_residual[x * 4 + 2]),
                                                   int(list_residual[x * 4 + 3])))

    # Neat optimization trick - there's no need for pframe to copy over a block if the vectors
    # point to the same place. In merge.py we just need to load the previous frame into the current frame
    # To reach this optimization. 
    for x in range(int(len(list_predictive) / 4)):
        if int(list_predictive[x * 4 + 0]) != int(list_predictive[x * 4 + 1]) and \
                int(list_predictive[x * 4 + 2]) != int(list_predictive[x * 4 + 3]):
            predictive_vectors.append(DisplacementVector(int(list_predictive[x * 4 + 0]),
                                                         int(list_predictive[x * 4 + 1]),
                                                         int(list_predictive[x * 4 + 2]),
                                                         int(list_predictive[x * 4 + 3])))

    # copy over blocks from one image to the others using the vectors generated by waifu2x_cpp
    for vector in predictive_vectors:
        frame_next.copy_block(frame_previous, block_size * scale_factor,
                              vector.x_2 * scale_factor,
                              vector.y_2 * scale_factor,
                              vector.x_1 * scale_factor,
                              vector.y_1 * scale_factor)

    # copy over inversion vectors (the difference images) into new image using vectors generated by waifu2x_cpp
    for vector in residual_vectors:
        frame_next.copy_block(frame_residual, block_size * scale_factor,
                              (vector.x_2 * (block_size + bleed * 2)) * scale_factor + (bleed * scale_factor),
                              (vector.y_2 * (block_size + bleed * 2)) * scale_factor + (bleed * scale_factor),
                              vector.x_1 * scale_factor,
                              vector.y_1 * scale_factor)

    return frame_next
Esempio n. 8
0
def pframe_image(context,
                 frame_next: Frame, frame_previous: Frame, frame_residual: Frame,
                 list_residual: list, list_predictive: list):
    """
    Create a new image using residuals and predictive vectors.
    Roughly, we can describe this method as

        frame_next = Transfrom(frame_previous, list_predictive) + frame_residuals.

    Although frame_residuals needs to also be transformed.

    Method Tasks:
        - Move blocks from frame_previous into frame_next using list_predictive
        - Move blocks from frame_residual into frame_next using list_residuals
    """

    # load context
    scale_factor = int(context.scale_factor)
    block_size = context.block_size
    bleed = context.bleed

    for x in range(int(len(list_predictive) / 4)):

        # Neat optimization trick - there's no need for pframe to copy over a block if the vectors
        # point to the same place. In merge.py we just need to load the previous frame into the current frame
        # to reach this optimization.
        if int(list_predictive[x * 4 + 0]) != int(list_predictive[x * 4 + 1]) \
                and \
                int(list_predictive[x * 4 + 2]) != int(list_predictive[x * 4 + 3]):
            # load the vector
            vector = DisplacementVector(int(list_predictive[x * 4 + 0]),
                                        int(list_predictive[x * 4 + 1]),
                                        int(list_predictive[x * 4 + 2]),
                                        int(list_predictive[x * 4 + 3]))

            # apply the vector
            frame_next.copy_block(frame_previous, block_size * scale_factor,
                                  vector.x_2 * scale_factor,
                                  vector.y_2 * scale_factor,
                                  vector.x_1 * scale_factor,
                                  vector.y_1 * scale_factor)

    for x in range(int(len(list_residual) / 4)):
        # load every element in the list into a vector
        vector = DisplacementVector(int(list_residual[x * 4 + 0]),
                                    int(list_residual[x * 4 + 1]),
                                    int(list_residual[x * 4 + 2]),
                                    int(list_residual[x * 4 + 3]))

        # apply that vector to the image
        frame_next.copy_block(frame_residual, block_size * scale_factor,
                              (vector.x_2 * (block_size + bleed * 2)) * scale_factor + (bleed * scale_factor),
                              (vector.y_2 * (block_size + bleed * 2)) * scale_factor + (bleed * scale_factor),
                              vector.x_1 * scale_factor,
                              vector.y_1 * scale_factor)

    return frame_next
Esempio n. 9
0
def main():
    block_size = 4
    scale_factor = 2

    frame_base = Frame()
    frame_base.load_from_string("C:\\Users\\windwoz\\Desktop\\image_research\\shelter\\merged2x.jpg")
    list_predictive = get_list_from_file("C:\\Users\\windwoz\\Desktop\\image_research\\shelter\\correction.txt")
    out_location = ("C:\\Users\\windwoz\\Desktop\\image_research\\shelter\\new_correction.jpg")

    correct_image(block_size, scale_factor, frame_base, list_predictive, out_location)
Esempio n. 10
0
class AsyncFrameRead(threading.Thread):
    def __init__(self, input_image: str):
        # calling superclass init
        threading.Thread.__init__(self)
        self.input_image = input_image
        self.loaded_image = Frame()
        self.load_complete = False

    def run(self):
        self.loaded_image.load_from_string_wait(self.input_image)
        self.load_complete = True
Esempio n. 11
0
def residual_loop(context):
    """
    Call the 'make_residual_image' method for every image that needs to be made into a residual.

    Method Tasks:
        - Load and wait for the files needed to create a residual image.
        - Call 'make_residual_image' once the needed files exist
    """

    # load variables from context
    workspace = context.workspace
    residual_images_dir = context.residual_images_dir
    residual_data_dir = context.residual_data_dir
    pframe_data_dir = context.pframe_data_dir
    input_frames_dir = context.input_frames_dir
    frame_count = context.frame_count
    block_size = context.block_size
    extension_type = context.extension_type
    debug_dir = context.debug_dir
    debug = context.debug

    temp_image = context.temp_image_folder + "tempimage.jpg"

    logger = logging.getLogger(__name__)
    logger.info((workspace, 1, frame_count, block_size))

    # for every frame in the video, create a residual_frame given the text files.
    for x in range(1, frame_count):
        f1 = Frame()
        f1.load_from_string_wait(input_frames_dir + "frame" + str(x + 1) +
                                 extension_type)

        # Load the neccecary lists to compute this iteration of residual making
        residual_data = get_list_from_file(residual_data_dir + "residual_" +
                                           str(x) + ".txt")
        prediction_data = get_list_from_file(pframe_data_dir + "pframe_" +
                                             str(x) + ".txt")

        # Create the output files..
        debug_output_file = debug_dir + "debug" + str(x + 1) + extension_type
        output_file = residual_images_dir + "output_" + get_lexicon_value(
            6, x) + ".jpg"

        # Save to a temp folder so waifu2x-vulkan doesn't try reading it, then move it
        out_image = make_residual_image(context, f1, residual_data,
                                        prediction_data)
        out_image.save_image_temp(output_file, temp_image)

        if debug == 1:
            debug_image(block_size, f1, prediction_data, residual_data,
                        debug_output_file)
Esempio n. 12
0
def correct_image(context, frame_base: Frame, list_correction: list):
    logger = logging.getLogger(__name__)

    # load context
    scale_factor = context.scale_factor

    predictive_vectors = []
    out_image = Frame()
    out_image.create_new(frame_base.width, frame_base.height)
    out_image.copy_image(frame_base)
    scale_factor = int(scale_factor)
    block_size = context.correction_block_size

    for x in range(int(len(list_correction) /
                       4)):  # / 4 because each there's 4 data points per block
        predictive_vectors.append(
            DisplacementVector(int(list_correction[x * 4 + 0]),
                               int(list_correction[x * 4 + 1]),
                               int(list_correction[x * 4 + 2]),
                               int(list_correction[x * 4 + 3])))
    # copy over predictive vectors into new image
    for vector in predictive_vectors:
        out_image.copy_block(frame_base, block_size * scale_factor,
                             vector.x_2 * scale_factor,
                             vector.y_2 * scale_factor,
                             vector.x_1 * scale_factor,
                             vector.y_1 * scale_factor)

    return out_image
Esempio n. 13
0
def pframe_image(context, out_image: Frame, frame_base: Frame,
                 frame_inversion: Frame, list_differences: list,
                 list_predictive: list):
    # load context
    predictive_vectors = []
    difference_vectors = []
    scale_factor = int(context.scale_factor)
    block_size = context.block_size
    bleed = context.bleed

    # load lists into vector displacements
    for x in range(int(len(list_differences) / 4)):
        difference_vectors.append(
            DisplacementVector(int(list_differences[x * 4 + 0]),
                               int(list_differences[x * 4 + 1]),
                               int(list_differences[x * 4 + 2]),
                               int(list_differences[x * 4 + 3])))

    # Neat optimization trick - there's no need for pframe to copy over a block if the vectors
    # point to the same place. In merge.py we just need to load the previous frame into the current frame
    # To reach this optimization.
    for x in range(int(len(list_predictive) / 4)):
        if int(list_predictive[x * 4 + 0]) != int(list_predictive[x * 4 + 1]) and \
                int(list_predictive[x * 4 + 2]) != int(list_predictive[x * 4 + 3]):
            predictive_vectors.append(
                DisplacementVector(int(list_predictive[x * 4 + 0]),
                                   int(list_predictive[x * 4 + 1]),
                                   int(list_predictive[x * 4 + 2]),
                                   int(list_predictive[x * 4 + 3])))

    # copy over blocks from one image to the others using the vectors generated by waifu2x_cpp
    for vector in predictive_vectors:
        out_image.copy_block(frame_base, block_size * scale_factor,
                             vector.x_2 * scale_factor,
                             vector.y_2 * scale_factor,
                             vector.x_1 * scale_factor,
                             vector.y_1 * scale_factor)

    # copy over inversion vectors (the difference images) into new image using vectors generated by waifu2x_cpp
    for vector in difference_vectors:
        out_image.copy_block(
            frame_inversion, block_size * scale_factor,
            (vector.x_2 *
             (block_size + bleed * 2)) * scale_factor + (bleed * scale_factor),
            (vector.y_2 *
             (block_size + bleed * 2)) * scale_factor + (bleed * scale_factor),
            vector.x_1 * scale_factor, vector.y_1 * scale_factor)

    return out_image
Esempio n. 14
0
class AsyncFrameRead(threading.Thread):
    """
    Read an image asynchronously
    """
    def __init__(self, input_image: str, controller=Controller()):
        # calling superclass init
        threading.Thread.__init__(self, name="asyncframeread")
        self.input_image = input_image
        self.loaded_image = Frame()
        self.load_complete = False
        self.controller = controller

    def run(self):
        self.loaded_image.load_from_string_controller(self.input_image,
                                                      self.controller)
        self.load_complete = True
Esempio n. 15
0
class AsyncFrameRead(threading.Thread):
    """
    Read an image asynchronously
    """
    def __init__(self, input_image: str, cancel_token=CancellationToken()):
        # calling superclass init
        threading.Thread.__init__(self, name="asyncframeread")
        self.input_image = input_image
        self.loaded_image = Frame()
        self.load_complete = False
        self.cancel_token = cancel_token

    def run(self):
        self.loaded_image.load_from_string_wait(self.input_image,
                                                self.cancel_token)
        self.load_complete = True
Esempio n. 16
0
def difference_loop(context, start_frame: int):
    # load variables from context
    workspace = context.workspace
    differences_dir = context.differences_dir
    inversion_data_dir = context.inversion_data_dir
    pframe_data_dir = context.pframe_data_dir
    input_frames_dir = context.input_frames_dir
    frame_count = context.frame_count
    block_size = context.block_size
    extension_type = context.extension_type
    debug = context.debug

    temp_image = context.temp_image_folder + "tempimage.jpg"

    logger = logging.getLogger(__name__)
    logger.info((workspace, start_frame, frame_count, block_size))

    # for every frame in the video, create a difference_frame given the text files.
    for x in range(start_frame, frame_count):
        f1 = Frame()
        f1.load_from_string_wait(input_frames_dir + "frame" + str(x + 1) +
                                 extension_type)

        # Load the neccecary lists to compute this iteration of difference making
        difference_data = get_list_from_file(inversion_data_dir +
                                             "inversion_" + str(x) + ".txt")
        prediction_data = get_list_from_file(pframe_data_dir + "pframe_" +
                                             str(x) + ".txt")

        # Create the output files..
        debug_output_file = workspace + "debug/debug" + str(x +
                                                            1) + extension_type
        output_file = differences_dir + "output_" + get_lexicon_value(
            6, x) + ".jpg"

        # Save to a temp folder so waifu2x-vulkan doesn't try reading it, then move it
        out_image = make_difference_image(context, f1, difference_data,
                                          prediction_data)
        out_image.save_image_temp(output_file, temp_image)

        if debug == 1:
            debug_image(block_size, f1, prediction_data, difference_data,
                        debug_output_file)
    def verify_upscaling_works(self) -> None:
        """
        Verify the upscaler works by upscaling a very small frame, and throws a descriptive error if it doesn't.
        """
        test_file = self.context.workspace + "test_frame.jpg"
        test_file_upscaled = self.context.workspace + "test_frame_upscaled.jpg"

        test_frame = Frame()
        test_frame.create_new(2, 2)
        test_frame.save_image(test_file)

        self.log.info(
            "Attempting to upscale file %s into %s to ensure waifu2x is working..."
            % (test_file, test_file_upscaled))

        self.upscale_file(test_file, test_file_upscaled)

        if not file_exists(test_file_upscaled):
            self.log.error(
                "Your computer could not upscale a test image, which is required for dandere2x to work."
            )
            self.log.error(
                "This may be a hardware issue or a software issue - verify your computer is capable of upscaling "
                "images using the selected upscaler.")

            raise Exception("Your computer could not upscale the test file.")

        self.log.info(
            "Upscaling *seems* successful. Deleting files and continuing forward. "
        )

        os.remove(test_file)
        os.remove(test_file_upscaled)
Esempio n. 18
0
    def verify_upscaling_works(self) -> None:
        """
        Verify the upscaler works by upscaling a very small frame, and throws a descriptive error if it doesn't.
        """
        test_file = self.context.workspace + "test_frame.jpg"
        test_file_upscaled = self.context.workspace + "test_frame_upscaled.jpg"

        test_frame = Frame()
        test_frame.create_new(2, 2)
        test_frame.save_image(test_file)

        self.upscale_file(test_file, test_file_upscaled)

        if not file_exists(test_file_upscaled):
            print(
                "Your computer could not upscale a test image, which is required for dandere2x to work."
            )
            print(
                "This may be a hardware issue or a software issue - verify your computer is capable of upscaling "
                "images using the selected upscaler.")

            raise Exception("Your computer could not upscale the test file.")

        os.remove(test_file)
        os.remove(test_file_upscaled)
Esempio n. 19
0
    def make_merge_image(context: Context, frame_residual: Frame,
                         frame_previous: Frame, list_predictive: list,
                         list_residual: list, list_corrections: list,
                         list_fade: list):
        """
        This section can best be explained through pictures. A visual way of expressing what 'merging'
        is doing is this section in the wiki.

        https://github.com/aka-katto/dandere2x/wiki/How-Dandere2x-Works#part-2-using-observations-to-save-time

        Inputs:
            - frame(x)
            - frame(x+1)_residual
            - Residual vectors mapping frame(x+1)_residual -> frame(x+1)
            - Predictive vectors mapping frame(x) -> frame(x+1)

        Output:
            - frame(x+1)
        """

        # Load context
        logger = logging.getLogger(__name__)

        out_image = Frame()
        out_image.create_new(frame_previous.width, frame_previous.height)

        # If list_predictive is empty, then the residual frame is simply the newly
        # produced image.
        if not list_predictive:
            out_image.copy_image(frame_residual)
            return out_image

        # By copying the image first as the first step, all the predictive elements of the form (x,y) -> (x,y)
        # are also copied. This allows us to ignore copying vectors (x,y) -> (x,y), which prevents redundant copying,
        # thus saving valuable computational time.
        out_image.copy_image(frame_previous)

        ###################
        # Plugins Section #
        ###################

        # Note: Run the plugins in the SAME order it was ran in dandere2x_cpp. If not, it won't work correctly.
        out_image = pframe_image(context, out_image, frame_previous,
                                 frame_residual, list_residual,
                                 list_predictive)
        out_image = fade_image(context, out_image, list_fade)
        out_image = correct_image(context, out_image, list_corrections)

        return out_image
Esempio n. 20
0
def fade_image(context, out_image: Frame, list_correction: list):
    # load context
    scale_factor = int(context.scale_factor)
    logger = logging.getLogger(__name__)

    fade_list = []
    block_size = int(context.block_size)

    fade_data_size = 3

    for x in range(int(len(list_correction) / fade_data_size)):
        fade_list.append(
            FadeData(int(list_correction[x * fade_data_size + 0]),
                     int(list_correction[x * fade_data_size + 1]),
                     int(list_correction[x * fade_data_size + 2])))

    # copy over predictive vectors into new image
    for vector in fade_list:
        out_image.fade_block(vector.x * scale_factor, vector.y * scale_factor,
                             block_size * scale_factor, vector.scalar)

    # out_image.frame = np.clip(out_image.frame, 0, 255)

    return out_image
Esempio n. 21
0
def make_merge_image(context: Context, frame_residual: Frame,
                     frame_previous: Frame, list_predictive: list,
                     list_residual: list, list_corrections: list,
                     list_fade: list):
    """
    This section can best be explained through pictures. A visual way of expressing what 'merging'
    is doing is this section in the wiki.

    https://github.com/aka-katto/dandere2x/wiki/How-Dandere2x-Works#part-2-using-observations-to-save-time

    Inputs:
        - frame(x)
        - frame(x+1)_residual
        - Residual vectors mapping frame(x+1)_residual -> frame(x+1)
        - Predictive vectors mapping frame(x) -> frame(x+1)

    Output:
        - frame(x+1)
    """

    # Load context
    logger = logging.getLogger(__name__)

    out_image = Frame()
    out_image.create_new(frame_previous.width, frame_previous.height)

    # If list_predictive and list_predictive are both empty, then the residual frame
    # is simply the new image.
    if not list_predictive and not list_predictive:
        out_image.copy_image(frame_residual)
        return out_image

    # by copying the image first as the first step, all the predictive elements like
    # (0,0) -> (0,0) are also coppied
    out_image.copy_image(frame_previous)

    # run the image through the same plugins IN ORDER it was ran in d2x_cpp
    out_image = pframe_image(context, out_image, frame_previous,
                             frame_residual, list_residual, list_predictive)
    out_image = fade_image(context, out_image, list_fade)
    out_image = correct_image(context, out_image, list_corrections)

    return out_image
Esempio n. 22
0
    def run(self):
        # start from 1 because ffmpeg's extracted frames starts from 1
        for x in range(self.start_frame, self.frame_count + 1):

            # loading files area
            frame = Frame()
            frame.load_from_string_wait(self.inputs_dir + "frame" + str(x) + self.extension_type, self.cancel_token)

            # stop if thread was killed
            if not self.alive:
                return

            # if the compressed frame already exists, don't compress it
            if os.path.exists(self.compressed_static_dir + "compressed_" + str(x) + ".jpg"):
                continue

            frame.save_image_quality(self.compressed_static_dir + "compressed_" + str(x) + ".jpg",
                                     self.quality_minimum)
            frame.save_image_quality(self.compressed_moving_dir + "compressed_" + str(x) + ".jpg",
                                     int(self.quality_minimum * self.quality_moving_ratio))
Esempio n. 23
0
def compress_frames(context: Context):
    inputs_dir = context.input_frames_dir
    frame_count = context.frame_count
    quality_moving_ratio = context.quality_moving_ratio
    compressed_static_dir = context.compressed_static_dir
    compressed_moving_dir = context.compressed_moving_dir
    quality_minimum = context.quality_minimum
    extension_type = context.extension_type

    for x in range(1, frame_count + 1):
        if os.path.exists(compressed_static_dir + "compressed_" + str(x) +
                          ".jpg"):
            continue

        frame = Frame()
        frame.load_from_string(inputs_dir + "frame" + str(x) + extension_type)
        frame.save_image_quality(
            compressed_static_dir + "compressed_" + str(x) + ".jpg",
            quality_minimum)
        frame.save_image_quality(
            compressed_moving_dir + "compressed_" + str(x) + ".jpg",
            int(quality_minimum * quality_moving_ratio))
Esempio n. 24
0
def compress_frames(context: Context):
    """
    Use frame's save_image_quality function to save a series of compressed images, which are used in
    Dandere2x_Cpp as a loss function. This function on it's own is a bit esoteric - I recommend reading
    the white paper to understand why we need to compress these frames.

    Input:
        - context

    Output:
        - All the images in 'input_frames' compressed into two different folders, each with their own
          level of compression.
    """

    inputs_dir = context.input_frames_dir
    frame_count = context.frame_count
    quality_moving_ratio = context.quality_moving_ratio
    compressed_static_dir = context.compressed_static_dir
    compressed_moving_dir = context.compressed_moving_dir
    quality_minimum = context.quality_minimum
    extension_type = context.extension_type

    # start from 1 because ffmpeg's extracted frames starts from 1
    for x in range(1, frame_count + 1):

        # if the compressed frame already exists, don't compress it
        if os.path.exists(compressed_static_dir + "compressed_" + str(x) +
                          ".jpg"):
            continue

        frame = Frame()
        frame.load_from_string(inputs_dir + "frame" + str(x) + extension_type)
        frame.save_image_quality(
            compressed_static_dir + "compressed_" + str(x) + ".jpg",
            quality_minimum)
        frame.save_image_quality(
            compressed_moving_dir + "compressed_" + str(x) + ".jpg",
            int(quality_minimum * quality_moving_ratio))
Esempio n. 25
0
def correct_image(context, frame_base: Frame, list_correction: list):
    """
    Try and fix some artifact-residuals by using the same image as reference.


    Method Tasks:
        - Load all the vectors for blocks pointing to a block with lower MSE
        - Apply all the vectors to the image to produce a more 'correct' image
    """

    logger = logging.getLogger(__name__)

    # load context
    scale_factor = context.scale_factor

    out_image = Frame()
    out_image.create_new(frame_base.width, frame_base.height)
    out_image.copy_image(frame_base)
    scale_factor = int(scale_factor)
    block_size = context.correction_block_size

    for x in range(int(len(list_correction) /
                       4)):  # / 4 because each there's 4 data points per block

        # load vector
        vector = DisplacementVector(int(list_correction[x * 4 + 0]),
                                    int(list_correction[x * 4 + 1]),
                                    int(list_correction[x * 4 + 2]),
                                    int(list_correction[x * 4 + 3]))

        # apply vector
        out_image.copy_block(frame_base, block_size * scale_factor,
                             vector.x_2 * scale_factor,
                             vector.y_2 * scale_factor,
                             vector.x_1 * scale_factor,
                             vector.y_1 * scale_factor)

    return out_image
Esempio n. 26
0
    def run(self):
        self.log.info("Run called.")

        for x in range(self.start_frame, self.frame_count):

            # Stop if thread is killed
            if not self.context.controller.is_alive():
                break

            # Files needed to create a residual image
            f1 = Frame()
            f1.load_from_string_controller(
                self.input_frames_dir + "frame" + str(x + 1) +
                self.extension_type, self.context.controller)
            # Load the neccecary lists to compute this iteration of residual making
            residual_data = get_list_from_file_and_wait(
                self.residual_data_dir + "residual_" + str(x) + ".txt",
                self.context.controller)

            prediction_data = get_list_from_file_and_wait(
                self.pframe_data_dir + "pframe_" + str(x) + ".txt",
                self.context.controller)

            # stop if thread is killed
            if not self.context.controller.is_alive():
                break

            # Create the output files..
            debug_output_file = self.debug_dir + "debug" + str(
                x + 1) + self.extension_type
            output_file = self.residual_images_dir + "output_" + get_lexicon_value(
                6, x) + ".jpg"

            # Save to a temp folder so waifu2x-vulkan doesn't try reading it, then move it
            out_image = self.make_residual_image(self.context, f1,
                                                 residual_data,
                                                 prediction_data)

            if out_image.get_res() == (1, 1):
                """
                If out_image is (1,1) in size, then frame_x and frame_x+1 are identical.

                We still need to save an outimage for sake of having N output images for N input images, so we
                save these meaningless files anyways.

                However, these 1x1 can slow whatever waifu2x implementation down, so we 'cheat' d2x 
                but 'fake' upscaling them, so that they don't need to be processed by waifu2x.
                """

                # Location of the 'fake' upscaled image.
                out_image = Frame()
                out_image.create_new(2, 2)
                output_file = self.residual_upscaled_dir + "output_" + get_lexicon_value(
                    6, x) + ".png"
                out_image.save_image(output_file)

            else:
                # This image has things to upscale, continue normally
                out_image.save_image_temp(output_file, self.temp_image)

            # With this change the wrappers must be modified to not try deleting the non existing residual file
            if self.context.debug == 1:
                self.debug_image(self.block_size, f1, prediction_data,
                                 residual_data, debug_output_file)
Esempio n. 27
0
    def debug_image(block_size, frame_base, list_predictive, list_differences,
                    output_location):
        """
        Note:
            I haven't made an effort to maintain this method, as it's only for debugging.

        This section can best be explained through pictures. A visual way of expressing what 'debug'
        is doing is this section in the wiki.

        https://github.com/aka-katto/dandere2x/wiki/How-Dandere2x-Works#part-1-identifying-what-needs-to-be-drawn

        In other words, this method shows where residuals are, and is useful for finding good settings to use for a video.

        Inputs:
            - frame(x)
            - Residual vectors mapping frame(x)_residual -> frame(x)

        Output:
            - frame(x) minus frame(x)_residuals = debug_image
        """
        logger = logging.getLogger(__name__)

        difference_vectors = []
        predictive_vectors = []
        out_image = Frame()
        out_image.create_new(frame_base.width, frame_base.height)
        out_image.copy_image(frame_base)

        black_image = Frame()
        black_image.create_new(frame_base.width, frame_base.height)

        if not list_predictive and not list_differences:
            out_image.save_image(output_location)
            return

        if list_predictive and not list_differences:
            out_image.copy_image(frame_base)
            out_image.save_image(output_location)
            return

        # load list into vector displacements
        for x in range(int(len(list_differences) / 4)):
            difference_vectors.append(
                DisplacementVector(int(list_differences[x * 4]),
                                   int(list_differences[x * 4 + 1]),
                                   int(list_differences[x * 4 + 2]),
                                   int(list_differences[x * 4 + 3])))
        for x in range(int(len(list_predictive) / 4)):
            if (int(list_predictive[x * 4 + 0]) != int(list_predictive[x * 4 + 1])) and \
                    (int(list_predictive[x * 4 + 2]) != int(list_predictive[x * 4 + 3])):
                predictive_vectors.append(
                    DisplacementVector(int(list_predictive[x * 4 + 0]),
                                       int(list_predictive[x * 4 + 1]),
                                       int(list_predictive[x * 4 + 2]),
                                       int(list_predictive[x * 4 + 3])))

        # copy over predictive vectors into new image
        for vector in difference_vectors:
            out_image.copy_block(black_image, block_size, vector.x_1,
                                 vector.y_1, vector.x_1, vector.y_1)

        out_image.save_image_quality(output_location, 25)
Esempio n. 28
0
    def run(self):
        self.log.info("Started")
        self.pipe.start()

        # Load the genesis image + the first upscaled image.
        frame_previous = Frame()
        frame_previous.load_from_string_controller(
            self.merged_dir + "merged_" + str(self.start_frame) +
            self.extension_type, self.context.controller)

        # Load and pipe the 'first' image before we start the for loop procedure, since all the other images will
        # inductively build off this first frame.
        frame_previous = Frame()
        frame_previous.load_from_string_controller(
            self.merged_dir + "merged_" + str(self.start_frame) +
            self.extension_type, self.context.controller)
        self.pipe.save(frame_previous)

        current_upscaled_residuals = Frame()
        current_upscaled_residuals.load_from_string_controller(
            self.upscaled_dir + "output_" +
            get_lexicon_value(6, self.start_frame) + ".png",
            self.context.controller)

        last_frame = False
        for x in range(self.start_frame, self.frame_count):
            ########################################
            # Pre-loop logic checks and conditions #
            ########################################

            # Check if we're at the last image, which affects the behaviour of the loop.
            if x == self.frame_count - 1:
                last_frame = True

            # Pre-load the next iteration of the loop image ahead of time, if we're not on the last frame.
            if not last_frame:
                """ 
                By asynchronously loading frames ahead of time, this provides a small but meaningful
                boost in performance when spanned over N frames. There's some code over head but 
                it's well worth it. 
                """
                background_frame_load = AsyncFrameRead(
                    self.upscaled_dir + "output_" +
                    get_lexicon_value(6, x + 1) + ".png",
                    self.context.controller)
                background_frame_load.start()

            ######################
            # Core Logic of Loop #
            ######################

            # Load the needed vectors to create the merged image.

            prediction_data_list = get_list_from_file_and_wait(
                self.pframe_data_dir + "pframe_" + str(x) + ".txt",
                self.context.controller)
            residual_data_list = get_list_from_file_and_wait(
                self.residual_data_dir + "residual_" + str(x) + ".txt",
                self.context.controller)
            correction_data_list = get_list_from_file_and_wait(
                self.correction_data_dir + "correction_" + str(x) + ".txt",
                self.context.controller)
            fade_data_list = get_list_from_file_and_wait(
                self.fade_data_dir + "fade_" + str(x) + ".txt",
                self.context.controller)

            if not self.context.controller.is_alive():
                self.log.info(" Merge thread killed at frame %s " % str(x))
                break

            # Create the actual image itself.
            current_frame = self.make_merge_image(
                self.context, current_upscaled_residuals, frame_previous,
                prediction_data_list, residual_data_list, correction_data_list,
                fade_data_list)
            ###############
            # Saving Area #
            ###############
            # Directly write the image to the ffmpeg pipe line.
            self.pipe.save(current_frame)

            # Manually write the image if we're preserving frames (this is for enthusiasts / debugging).
            if self.preserve_frames:
                output_file = self.workspace + "merged/merged_" + str(
                    x + 1) + self.extension_type
                background_frame_write = AsyncFrameWrite(
                    current_frame, output_file)
                background_frame_write.start()

            #######################################
            # Assign variables for next iteration #
            #######################################
            if not last_frame:
                # We need to wait until the next upscaled image exists before we move on.
                while not background_frame_load.load_complete:
                    wait_on_file(
                        self.upscaled_dir + "output_" +
                        get_lexicon_value(6, x + 1) + ".png",
                        self.context.controller)
            """
            Now that we're all done with the current frame, the current `current_frame` is now the frame_previous
            (with respect to the next iteration). We could obviously manually load frame_previous = Frame(n-1) each
            time, but this is an optimization that makes a substantial difference over N frames.
            """
            frame_previous = current_frame
            current_upscaled_residuals = background_frame_load.loaded_image
            self.context.controller.update_frame_count(x)

        self.pipe.kill()
Esempio n. 29
0
def merge_loop(context: Context):
    """
    Call the 'make_merge_image' method for every image that needs to be upscaled.

    This method is sort of the driver for that, and has tasks needed to keep merging running smoothly.

    This method became a bit messy due to optimization-hunting, but the most important calls of the loop can be read in
    the 'Loop-iteration Core' area.

    Method Tasks:

        - Read / Write files that are used by merge asynchronously.
        - Load the text files containing the vectors needed for 'make_merge_image'

    """

    # load variables from context
    workspace = context.workspace
    upscaled_dir = context.residual_upscaled_dir
    merged_dir = context.merged_dir
    residual_data_dir = context.residual_data_dir
    pframe_data_dir = context.pframe_data_dir
    correction_data_dir = context.correction_data_dir
    fade_data_dir = context.fade_data_dir
    frame_count = context.frame_count
    extension_type = context.extension_type
    logger = logging.getLogger(__name__)

    # # # ffmpeg piping stuff # # #

    ffmpeg_pipe_encoding = context.ffmpeg_pipe_encoding

    if ffmpeg_pipe_encoding:
        nosound_file = context.nosound_file
        frame_rate = str(context.frame_rate)
        input_file = context.input_file
        output_file = context.output_file
        ffmpeg_dir = context.ffmpeg_dir
        ffmpeg_pipe_encoding_type = context.ffmpeg_pipe_encoding_type

        if ffmpeg_pipe_encoding_type in ["jpeg", "jpg"]:
            vcodec = "mjpeg"
            pipe_format = "JPEG"

        elif ffmpeg_pipe_encoding_type == "png":
            vcodec = "png"
            pipe_format = "PNG"

        else:
            print("  Error: no valid ffmpeg_pipe_encoding_type set. Using jpeg as default")
            vcodec = "mjpeg"
            pipe_format = "JPEG"

        print("\n    WARNING: EXPERIMENTAL FFMPEG PIPING IS ENABLED\n")

        ffmpegpipe = subprocess.Popen([ffmpeg_dir, "-loglevel", "panic", '-y', '-f',
                                       'image2pipe', '-vcodec', vcodec, '-r', frame_rate,
                                       '-i', '-', '-vcodec', 'libx264', '-preset', 'medium',
                                       '-qscale', '5', '-crf', '17',
                                       '-vf', ' pp=hb/vb/dr/fq|32, deband=range=22:blur=false',
                                       '-r', frame_rate, nosound_file],
                                      stdin=subprocess.PIPE)

        # pipe the first merged image as it will not be done afterwards
        wait_on_file(merged_dir + "merged_" + str(1) + extension_type)
        im = Image.open(merged_dir + "merged_" + str(1) + extension_type)

        # best jpeg quality since we won't be saving up disk space
        im.save(ffmpegpipe.stdin, format=pipe_format, quality=100)

    # # #  # # #  # # #  # # #

    # Load the genesis image + the first upscaled image.
    frame_previous = Frame()
    frame_previous.load_from_string_wait(merged_dir + "merged_" + str(1) + extension_type)

    f1 = Frame()
    f1.load_from_string_wait(upscaled_dir + "output_" + get_lexicon_value(6, 1) + ".png")

    # When upscaling every frame between start_frame to frame_count, there's obviously no x + 1 at frame_count - 1 .
    # So just make a small note not to load that image. Pretty much load images concurrently until we get to x - 1
    last_frame = False
    for x in range(1, frame_count):
        ###################################
        # Loop-iteration pre-requirements #
        ###################################

        # Check if we're at the last image
        if x == frame_count - 1:
            last_frame = True

        # load the next image ahead of time.
        if not last_frame:
            background_frame_load = AsyncFrameRead(upscaled_dir + "output_" + get_lexicon_value(6, x + 1) + ".png")
            background_frame_load.start()

        #######################
        # Loop-iteration Core #
        #######################

        logger.info("Upscaling frame " + str(x))

        prediction_data_list = get_list_from_file(pframe_data_dir + "pframe_" + str(x) + ".txt")
        residual_data_list = get_list_from_file(residual_data_dir + "residual_" + str(x) + ".txt")
        correction_data_list = get_list_from_file(correction_data_dir + "correction_" + str(x) + ".txt")
        fade_data_list = get_list_from_file(fade_data_dir + "fade_" + str(x) + ".txt")

        frame_next = make_merge_image(context, f1, frame_previous,
                                      prediction_data_list, residual_data_list,
                                      correction_data_list, fade_data_list)

        if not ffmpeg_pipe_encoding:  # ffmpeg piping is disabled, traditional way

            # Write the image in the background for the preformance increase
            output_file_merged = workspace + "merged/merged_" + str(x + 1) + extension_type
            background_frame_write = AsyncFrameWrite(frame_next, output_file_merged)
            background_frame_write.start()

        else:  # ffmpeg piping is enabled

            # Write the image directly into ffmpeg pipe
            im = frame_next.get_pil_image()
            im.save(ffmpegpipe.stdin, format=pipe_format, quality=95)

        #######################################
        # Assign variables for next iteration #
        #######################################

        if not last_frame:
            while not background_frame_load.load_complete:
                wait_on_file(upscaled_dir + "output_" + get_lexicon_value(6, x + 1) + ".png")

            f1 = background_frame_load.loaded_image

        frame_previous = frame_next

        # Ensure the file is loaded for background_frame_load. If we're on the last frame, simply ignore this section
        # Because the frame_count + 1 does not exist.

    if ffmpeg_pipe_encoding:
        ffmpegpipe.stdin.close()
        ffmpegpipe.wait()

        # add the original file audio to the nosound file
        migrate_tracks(context, nosound_file, input_file, output_file)
Esempio n. 30
0
#
# from dandere2x import Dandere2x
# import json
# from context import Context
#
# start = time.time()
#
# # resume only works if
#
# with open("dandere2x_win32.json", "r") as read_file:
#     config_json = json.load(read_file)
#
# context = Context(config_json)
#
# d = Dandere2x(context)
# d.resume_concurrent()
#
# end = time.time()
#
# print("\n duration: " + str(time.time() - start))
#
#

from wrappers.frame.frame import Frame

f = Frame()

f.load_from_string(
    "C:\\Users\\windwoz\\Documents\\github_projects\\src\\workspace\\workspace1\\inputs\\frame1.jpg"
)