Exemple #1
0
    def resume_run(self):
        logger = logging.getLogger(__name__)
        last_found = int(self.frame_count)

        # count how many vector files that have already been produced by dandere2xcpp
        logger.info("looking for previous frames...")
        while last_found > 0:
            exists = os.path.isfile(self.workspace + os.path.sep +
                                    "pframe_data" + os.path.sep + "pframe_" +
                                    str(last_found) + ".txt")
            if not exists:
                last_found -= 1

            elif exists:
                break

        # Delete the most recent files produced. Not all 3 files may exist (we only know the pframe_data exists)
        # so we do a try. There's cases where inversion data or difference_image didn't save.
        try:
            os.remove(self.workspace + os.path.sep + "pframe_data" +
                      os.path.sep + "pframe_" + str(last_found) + ".txt")
            os.remove(self.workspace + os.path.sep + "inversion_data" +
                      os.path.sep + "inversion_" + str(last_found) + ".txt")
            os.remove(self.differences_dir + "output_" +
                      get_lexicon_value(6, last_found) + ".png")

        except FileNotFoundError:
            pass

        # start one lower because we deleted the file(s)
        last_found = last_found - 1
        logger.info("Resuming at p_frame # " + str(last_found))

        # Delete the current files, and resume work from there. We know all 3 of these files exist
        # because we started one lower.
        os.remove(self.workspace + os.path.sep + "pframe_data" + os.path.sep +
                  "pframe_" + str(last_found) + ".txt")
        os.remove(self.workspace + os.path.sep + "inversion_data" +
                  os.path.sep + "inversion_" + str(last_found) + ".txt")
        os.remove(self.differences_dir + "output_" +
                  get_lexicon_value(6, last_found) + ".png")

        exec = [
            self.dandere2x_cpp_dir, self.workspace,
            str(self.frame_count),
            str(self.block_size),
            str(self.step_size), "r",
            str(last_found), self.extension_type
        ]

        logger.info(exec)
        return_val = subprocess.run(
            exec, creationflags=subprocess.CREATE_NEW_CONSOLE).returncode

        if return_val == 0:
            logger.info("d2xcpp finished correctly")
        else:
            logger.info("d2xcpp ended unexpectedly")
    def run(self):
        logger = logging.getLogger(__name__)

        differences_dir = self.context.differences_dir
        upscaled_dir = self.context.upscaled_dir
        exec = copy.copy(self.waifu2x_vulkan_upscale_frame)

        for x in range(1, self.frame_count):
            wait_on_file(differences_dir + "output_" +
                         get_lexicon_value(6, x) + ".jpg")

            self.upscale_file(
                differences_dir + "output_" + get_lexicon_value(6, x) + ".jpg",
                upscaled_dir + "output_" + get_lexicon_value(6, x) + ".png")
Exemple #3
0
    def run(self):
        """
        Upscale every image that will *eventually* appear in the residuals_dir folder by waifu2x.
        """
        logger = logging.getLogger(__name__)

        differences_dir = self.context.residual_images_dir
        upscaled_dir = self.context.residual_upscaled_dir
        exec = copy.copy(self.waifu2x_vulkan_legacy_upscale_frame)

        for x in range(1, self.frame_count):
            wait_on_either_file(
                differences_dir + "output_" + get_lexicon_value(6, x) + ".jpg",
                upscaled_dir + "output_" + get_lexicon_value(6, x) + ".png")

            if file_exists(differences_dir + "output_" + get_lexicon_value(6, x) + ".jpg") \
                    and not file_exists(upscaled_dir + "output_" + get_lexicon_value(6, x) + ".png"):
                self.upscale_file(
                    differences_dir + "output_" + get_lexicon_value(6, x) +
                    ".jpg", upscaled_dir + "output_" +
                    get_lexicon_value(6, x) + ".png")

            elif not file_exists(differences_dir + "output_" + get_lexicon_value(6, x) + ".jpg") \
                    and file_exists(upscaled_dir + "output_" + get_lexicon_value(6, x) + ".png"):
                continue
    def __fix_waifu2x_converter_cpp_names(self):
        """
            Waifu2x-Conveter-Cpp (legacy) will output the file names in a format that needs to be fixed for
            dandere2x to work. I believe this is fixed in later versions, hence the TODO
        """

        file_names = []
        for x in range(1, self.frame_count):
            file_names.append("output_" + get_lexicon_value(6, x))

        for file in file_names:
            dirty_name = self.residual_upscaled_dir + file + '_[NS-L' + str(
                self.noise_level) + '][x' + str(
                    self.scale_factor) + '.000000]' + ".png"
            clean_name = self.residual_upscaled_dir + file + ".png"

            wait_on_either_file_controller(clean_name, dirty_name,
                                           self.controller)

            if file_exists(clean_name):
                pass

            elif file_exists(dirty_name):
                while file_exists(dirty_name):
                    try:
                        rename_file(dirty_name, clean_name)
                    except PermissionError:
                        pass
    def fix_names_all(self):
        """
        Waifu2x-ncnn-vulkan will accept a file as "file.jpg" and output as "file.jpg.png".

        Unfortunately, dandere2x wouldn't recognize this, so this function renames each name to the correct naming
        convention. This function will iteratiate through every file needing to be upscaled waifu2x-ncnn-vulkan,
        and change it's name after it's done saving

        Comments:

        - There's a really complicated try / except that exists because, even though a file may exist,
          the file handle may still be used by waifu2x-ncnn-vulkan (it hasn't released it yet). As a result,
          we need to try / except it until it's released, allowing us to rename it.

        """

        file_names = []
        for x in range(1, self.frame_count):
            file_names.append("output_" + get_lexicon_value(6, x))

        for file in file_names:
            dirty_name = self.residual_upscaled_dir + file + ".jpg.png"
            clean_name = self.residual_upscaled_dir + file + ".png"

            wait_on_either_file(clean_name, dirty_name)

            if file_exists(clean_name):
                pass

            elif file_exists(dirty_name):
                while file_exists(dirty_name):
                    try:
                        rename_file(dirty_name, clean_name)
                    except PermissionError:
                        pass
Exemple #6
0
def print_status(context: Context):
    workspace = context.workspace
    extension_type = context.extension_type
    frame_count = context.frame_count

    last_10 = [0]

    for x in range(1, frame_count):
        percent = int((x / frame_count) * 100)

        average = 0
        for time_count in last_10:
            average = average + time_count

        average = round(average / len(last_10), 2)

        sys.stdout.write('\r')
        sys.stdout.write("Frame: [%s] %i%%    Average of Last 10 Frames: %s sec / frame" % (x, percent, average))

        file_dir = workspace + "residual_upscaled/output_" + get_lexicon_value(6, x) + ".png"
        if len(last_10) == 10:
            last_10.pop(0)

        now = time.time()

        exists = os.path.isfile(file_dir)
        while not exists:
            exists = os.path.isfile(file_dir)
            time.sleep(.01)

        later = time.time()
        difference = float(later - now)
        last_10.append(difference)
Exemple #7
0
    def run(self):
        console_output = open(
            self.context.log_dir + "waifu2x_upscale_frames_command.txt", "w")
        logger = logging.getLogger(__name__)
        # if there are pre-existing files, fix them (this occurs during a resume session)
        self.fix_names()

        fix_names_forever_thread = threading.Thread(target=self.fix_names_all)
        fix_names_forever_thread.start()

        # we need to os.chdir or else waifu2x-conveter won't work.
        os.chdir(self.waifu2x_converter_cpp_path)

        exec = copy.copy(self.waifu2x_conv_upscale_frame)

        # replace the exec command withthe files we're concerned with
        for x in range(len(exec)):
            if exec[x] == "[input_file]":
                exec[x] = self.differences_dir

            if exec[x] == "[output_file]":
                exec[x] = self.upscaled_dir

        logger.info("waifu2xconv session")
        logger.info(exec)

        # make a list of names that will eventually (past or future) be upscaled
        names = []
        for x in range(1, self.frame_count):
            names.append("output_" + get_lexicon_value(6, x) + ".png")

        count_removed = 0

        # remove from the list images that have already been upscaled
        for name in names[::-1]:
            if os.path.isfile(self.upscaled_dir + name):
                names.remove(name)
                count_removed += 1

        if count_removed:
            logger.info("Already have " + str(count_removed) + " upscaled")

        # while there are pictures that have yet to be upscaled, keep calling the upscale command
        while names:
            logger.info("Frames remaining before batch: ")
            logger.info(len(names))

            console_output.write(str(exec))
            subprocess.call(exec,
                            shell=True,
                            stderr=console_output,
                            stdout=console_output)

            for name in names[::-1]:
                if os.path.isfile(self.upscaled_dir + name):
                    os.remove(self.differences_dir +
                              name.replace(".png", ".jpg"))
                    names.remove(name)
def delete_digit_files_in_range(context: Context, file_prefix, extension,
                                lexiconic_digits, start, end):
    logger = context.logger
    logger.info("Deleting files " + file_prefix + extension + " from " +
                str(start) + " to " + str(end))

    for x in range(start, end):
        os.remove(file_prefix + str(get_lexicon_value(lexiconic_digits, x)) +
                  extension)
Exemple #9
0
    def __init__(self, context):
        Thread.__init__(self, name="Remove Upscale Files Thread")
        super().__init__()

        self.context = context

        # make a list of names that will eventually (past or future) be upscaled
        self.list_of_names = []
        for x in range(self.context.start_frame, self.context.frame_count):
            self.list_of_names.append("output_" + get_lexicon_value(6, x) +
                                      ".jpg")
Exemple #10
0
    def run(self):
        logger = logging.getLogger(__name__)
        console_output = open(
            self.context.log_dir + "waifu2x_caffe_upscale_frame_all.txt", "w")

        residual_images_dir = self.context.residual_images_dir
        residual_upscaled_dir = self.context.residual_upscaled_dir
        exec_command = copy.copy(self.waifu2x_caffe_upscale_frame)

        # replace the exec command withthe files we're concerned with
        for x in range(len(exec_command)):
            if exec_command[x] == "[input_file]":
                exec_command[x] = residual_images_dir

            if exec_command[x] == "[output_file]":
                exec_command[x] = residual_upscaled_dir

        # make a list of names that will eventually (past or future) be upscaled
        upscaled_names = []
        for x in range(1, self.frame_count):
            upscaled_names.append("output_" + get_lexicon_value(6, x) + ".png")

        count_removed = 0

        # remove from the list images that have already been upscaled
        for name in upscaled_names[::-1]:
            if os.path.isfile(self.residual_upscaled_dir + name):
                upscaled_names.remove(name)
                count_removed += 1

        if count_removed:
            logger.info("Already have " + str(count_removed) + " upscaled")

        # while there are pictures that have yet to be upscaled, keep calling the upscale command

        while upscaled_names:
            for name in upscaled_names[::-1]:
                if os.path.exists(self.residual_upscaled_dir + name):

                    residual_file = self.residual_images_dir + name

                    if os.path.exists(residual_file):
                        os.remove(residual_file)
                    else:
                        '''
                        In residuals.py we created fake 'upscaled' images by saving them to the 'residuals_upscaled', 
                        and never saved the residuals file. In that case, only remove the 'residuals_upscaled' 
                        since 'residuals' never existed. 
                        '''
                        pass

                    upscaled_names.remove(name)
Exemple #11
0
    def __delete_used_files(self, remove_before):
        """
        Delete the files produced by dandere2x up to index_to_remove.

        Author: Tremex
        """

        # load context

        pframe_data_dir = self.context.pframe_data_dir
        residual_data_dir = self.context.residual_data_dir
        correction_data_dir = self.context.correction_data_dir
        fade_data_dir = self.context.fade_data_dir
        input_frames_dir = self.context.input_frames_dir
        compressed_static_dir = self.context.compressed_static_dir
        compressed_moving_dir = self.context.compressed_moving_dir
        residual_upscaled_dir = self.context.residual_upscaled_dir

        # get the files to delete "_r(emove)"

        index_to_remove = str(remove_before - 2)

        prediction_data_file_r = pframe_data_dir + "pframe_" + index_to_remove + ".txt"
        residual_data_file_r = residual_data_dir + "residual_" + index_to_remove + ".txt"
        correction_data_file_r = correction_data_dir + "correction_" + index_to_remove + ".txt"
        fade_data_file_r = fade_data_dir + "fade_" + index_to_remove + ".txt"

        input_image_r = input_frames_dir + "frame" + index_to_remove + ".jpg"

        compressed_file_static_r = compressed_static_dir + "compressed_" + index_to_remove + ".jpg"
        compressed_file_moving_r = compressed_moving_dir + "compressed_" + index_to_remove + ".jpg"

        # "mark" them
        remove = [
            prediction_data_file_r,
            residual_data_file_r,
            correction_data_file_r,
            fade_data_file_r,
            input_image_r,  # upscaled_file_r,
            compressed_file_static_r,
            compressed_file_moving_r
        ]

        upscaled_file_r = residual_upscaled_dir + "output_" + get_lexicon_value(
            6, int(remove_before)) + ".png"
        remove.append(upscaled_file_r)

        # remove
        threading.Thread(target=self.__delete_files_from_list,
                         args=(remove, ),
                         daemon=True,
                         name="mindiskusage").start()
def delete_digit_files_in_range(context: Context, file_prefix, extension,
                                lexiconic_digits, start, end):
    """
    Delete files that come in the form filename_1.png, filename_2.png .... filename_3.png
    """

    logger = context.logger
    logger.info("Deleting files " + file_prefix + extension + " from " +
                str(start) + " to " + str(end))

    for x in range(start, end):
        os.remove(file_prefix + str(get_lexicon_value(lexiconic_digits, x)) +
                  extension)
Exemple #13
0
def residual_loop(context):
    """
    Call the 'make_residual_image' method for every image that needs to be made into a residual.

    Method Tasks:
        - Load and wait for the files needed to create a residual image.
        - Call 'make_residual_image' once the needed files exist
    """

    # load variables from context
    workspace = context.workspace
    residual_images_dir = context.residual_images_dir
    residual_data_dir = context.residual_data_dir
    pframe_data_dir = context.pframe_data_dir
    input_frames_dir = context.input_frames_dir
    frame_count = context.frame_count
    block_size = context.block_size
    extension_type = context.extension_type
    debug_dir = context.debug_dir
    debug = context.debug

    temp_image = context.temp_image_folder + "tempimage.jpg"

    logger = logging.getLogger(__name__)
    logger.info((workspace, 1, frame_count, block_size))

    # for every frame in the video, create a residual_frame given the text files.
    for x in range(1, frame_count):
        f1 = Frame()
        f1.load_from_string_wait(input_frames_dir + "frame" + str(x + 1) +
                                 extension_type)

        # Load the neccecary lists to compute this iteration of residual making
        residual_data = get_list_from_file(residual_data_dir + "residual_" +
                                           str(x) + ".txt")
        prediction_data = get_list_from_file(pframe_data_dir + "pframe_" +
                                             str(x) + ".txt")

        # Create the output files..
        debug_output_file = debug_dir + "debug" + str(x + 1) + extension_type
        output_file = residual_images_dir + "output_" + get_lexicon_value(
            6, x) + ".jpg"

        # Save to a temp folder so waifu2x-vulkan doesn't try reading it, then move it
        out_image = make_residual_image(context, f1, residual_data,
                                        prediction_data)
        out_image.save_image_temp(output_file, temp_image)

        if debug == 1:
            debug_image(block_size, f1, prediction_data, residual_data,
                        debug_output_file)
Exemple #14
0
    def run(self):
        logger = logging.getLogger(__name__)
        console_output = open(
            self.context.log_dir + "waifu2x_caffe_upscale_frame_all.txt", "w")

        differences_dir = self.context.differences_dir
        upscaled_dir = self.context.upscaled_dir
        exec = copy.copy(self.waifu2x_caffe_upscale_frame)

        # replace the exec command withthe files we're concerned with
        for x in range(len(exec)):
            if exec[x] == "[input_file]":
                exec[x] = differences_dir

            if exec[x] == "[output_file]":
                exec[x] = upscaled_dir

        # make a list of names that will eventually (past or future) be upscaled
        names = []
        for x in range(1, self.frame_count):
            names.append("output_" + get_lexicon_value(6, x) + ".png")

        count_removed = 0

        # remove from the list images that have already been upscaled
        for name in names[::-1]:
            if os.path.isfile(self.upscaled_dir + name):
                names.remove(name)
                count_removed += 1

        if count_removed:
            logger.info("Already have " + str(count_removed) + " upscaled")

        # while there are pictures that have yet to be upscaled, keep calling the upscale command
        while names:
            logger.info("Frames remaining before batch: ")
            logger.info(len(names))

            console_output.write(str(exec))
            subprocess.call(exec,
                            shell=True,
                            stderr=console_output,
                            stdout=console_output)

            for name in names[::-1]:
                if os.path.isfile(self.upscaled_dir + name):
                    os.remove(self.differences_dir +
                              name.replace(".png", ".jpg"))
                    names.remove(name)
Exemple #15
0
def difference_loop(context, start_frame: int):
    # load variables from context
    workspace = context.workspace
    differences_dir = context.differences_dir
    inversion_data_dir = context.inversion_data_dir
    pframe_data_dir = context.pframe_data_dir
    input_frames_dir = context.input_frames_dir
    frame_count = context.frame_count
    block_size = context.block_size
    extension_type = context.extension_type
    debug = context.debug

    temp_image = context.temp_image_folder + "tempimage.jpg"

    logger = logging.getLogger(__name__)
    logger.info((workspace, start_frame, frame_count, block_size))

    # for every frame in the video, create a difference_frame given the text files.
    for x in range(start_frame, frame_count):
        f1 = Frame()
        f1.load_from_string_wait(input_frames_dir + "frame" + str(x + 1) +
                                 extension_type)

        # Load the neccecary lists to compute this iteration of difference making
        difference_data = get_list_from_file(inversion_data_dir +
                                             "inversion_" + str(x) + ".txt")
        prediction_data = get_list_from_file(pframe_data_dir + "pframe_" +
                                             str(x) + ".txt")

        # Create the output files..
        debug_output_file = workspace + "debug/debug" + str(x +
                                                            1) + extension_type
        output_file = differences_dir + "output_" + get_lexicon_value(
            6, x) + ".jpg"

        # Save to a temp folder so waifu2x-vulkan doesn't try reading it, then move it
        out_image = make_difference_image(context, f1, difference_data,
                                          prediction_data)
        out_image.save_image_temp(output_file, temp_image)

        if debug == 1:
            debug_image(block_size, f1, prediction_data, difference_data,
                        debug_output_file)
    def fix_names_all(self):

        file_names = []
        for x in range(1, self.frame_count):
            file_names.append("output_" + get_lexicon_value(6, x))

        for file in file_names:
            dirty_name = self.upscaled_dir + file + ".jpg.png"
            clean_name = self.upscaled_dir + file + ".png"

            wait_on_either_file(clean_name, dirty_name)

            if file_exists(clean_name):
                pass

            elif file_exists(dirty_name):
                while file_exists(dirty_name):
                    try:
                        rename_file(dirty_name, clean_name)
                    except PermissionError:
                        pass
Exemple #17
0
    def __remove_once_upscaled(self):

        # make a list of names that will eventually (past or future) be upscaled
        list_of_names = []
        for x in range(1, self.frame_count):
            list_of_names.append("output_" + get_lexicon_value(6, x) + ".png")

        for x in range(len(list_of_names)):

            name = list_of_names[x]

            residual_file = self.residual_images_dir + name.replace(
                ".png", ".jpg")
            residual_upscaled_file = self.residual_upscaled_dir + name

            while not file_exists(residual_upscaled_file):
                time.sleep(.00001)

            if os.path.exists(residual_file):
                os.remove(residual_file)
            else:
                pass
    def fix_names_all(self):

        file_names = []
        for x in range(1, self.frame_count):
            file_names.append("output_" + get_lexicon_value(6, x))

        for file in file_names:
            dirty_name = self.residual_upscaled_dir + file + '_[NS-L' + str(
                self.noise_level) + '][x' + str(
                    self.scale_factor) + '.000000]' + ".png"
            clean_name = self.residual_upscaled_dir + file + ".png"

            wait_on_either_file(clean_name, dirty_name)

            if file_exists(clean_name):
                pass

            elif file_exists(dirty_name):
                while file_exists(dirty_name):
                    try:
                        rename_file(dirty_name, clean_name)
                    except PermissionError:
                        pass
Exemple #19
0
    def __remove_once_upscaled(self):

        # make a list of names that will eventually (past or future) be upscaled
        list_of_names = []
        for x in range(self.start_frame, self.frame_count):
            list_of_names.append("output_" + get_lexicon_value(6, x) + ".png")

        for x in range(len(list_of_names)):

            if not self.alive:
                return

            name = list_of_names[x]

            residual_file = self.residual_images_dir + name.replace(
                ".png", ".jpg")
            residual_upscaled_file = self.residual_upscaled_dir + name

            wait_on_file(residual_upscaled_file, self.cancel_token)

            if os.path.exists(residual_file):
                os.remove(residual_file)
            else:
                pass
Exemple #20
0
def difference_loop_resume(context):
    # load variables from context
    frame_count = context.frame_count
    upscaled_dir = context.upscaled_dir

    logger = logging.getLogger(__name__)

    last_found = frame_count

    while last_found > 1:
        upscaled_exists = os.path.isfile(upscaled_dir + "output_" +
                                         get_lexicon_value(6, last_found) +
                                         ".png")

        if not upscaled_exists:
            last_found -= 1

        elif upscaled_exists:
            break

    last_found -= 1
    logger.info("difference loop last frame found: " + str(last_found))

    difference_loop(context, start_frame=last_found)
def run_realtime_encoding(context: Context, output_file: str):
    logger = context.logger
    logger.info("Real time encoding process started")

    # Load context
    workspace = context.workspace
    frames_per_video = int(context.frame_rate *
                           context.realtime_encoding_seconds_per_video)
    frame_count = int(context.frame_count)
    realtime_encoding_delete_files = context.realtime_encoding_delete_files
    extension_type = context.extension_type
    input_file = context.input_file

    # directories
    merged_files_prefix = context.merged_dir + "merged_"
    upscaled_files_prefix = context.upscaled_dir + "output_"
    compressed_files_prefix = context.compressed_static_dir + "compressed_"
    input_frames_prefix = context.input_frames_dir + "frame"

    # Create an encoded every frame_rate seconds.
    for x in range(0, int(frame_count / frames_per_video)):
        text_file = open(
            workspace + "encoded" + os.path.sep + "list.txt",
            'a+')  # text file for ffmpeg to use to concat vids together
        encoded_vid = workspace + "encoded" + os.path.sep + "encoded_" + str(
            x) + ".mkv"

        if file_exists(encoded_vid):
            logger.info(encoded_vid + " already exists: skipping iteration")
            continue

        wait_on_file(merged_files_prefix + str(x * frames_per_video + 1) +
                     extension_type)
        wait_on_file(merged_files_prefix +
                     str(x * frames_per_video + frames_per_video) +
                     extension_type)

        # create a video for frames in this section
        create_video_from_specific_frames(context, merged_files_prefix,
                                          encoded_vid,
                                          x * frames_per_video + 1,
                                          frames_per_video)

        # ensure ffmpeg video exists before deleting files
        wait_on_file(encoded_vid)

        # write to text file video for ffmpeg to concat vids with
        text_file.write("file " + "'" + encoded_vid + "'" + "\n")

        # put files to delete inside of here.
        if realtime_encoding_delete_files:
            delete_digit_files_in_range(
                context, merged_files_prefix, extension_type, 0,
                x * frames_per_video + 1,
                x * frames_per_video + frames_per_video + 1)

            delete_digit_files_in_range(
                context, compressed_files_prefix, extension_type, 0,
                x * frames_per_video + 1,
                x * frames_per_video + frames_per_video + 1)

            delete_digit_files_in_range(
                context, input_frames_prefix, extension_type, 0,
                x * frames_per_video + 1,
                x * frames_per_video + frames_per_video + 1)

            # upscaled files end on a different number than merged files.
            if x == int(frame_count / frames_per_video) - 1:

                wait_on_file(upscaled_files_prefix +
                             get_lexicon_value(6, x * frames_per_video + 1) +
                             ".png")
                wait_on_file(upscaled_files_prefix + get_lexicon_value(
                    6, x * frames_per_video + frames_per_video) + ".png")

                delete_digit_files_in_range(
                    context, upscaled_files_prefix, ".png", 6,
                    x * frames_per_video + 1,
                    x * frames_per_video + frames_per_video)

            else:

                wait_on_file(upscaled_files_prefix +
                             get_lexicon_value(6, x * frames_per_video + 1) +
                             ".png")
                wait_on_file(upscaled_files_prefix + get_lexicon_value(
                    6, x * frames_per_video + frames_per_video + 1) + ".png")

                delete_digit_files_in_range(
                    context, upscaled_files_prefix, ".png", 6,
                    x * frames_per_video + 1,
                    x * frames_per_video + frames_per_video + 1)

    # Because we divided the video into int(frame_count / frames_per_video) videos, and
    # int(frame_count / frames_per_video) != frame_count / frames_per_video, there's still frames that are left out.
    # We need to now encode those separately

    if frame_count - int(
            frame_count / frames_per_video) * frames_per_video > 0:
        print("got in here")
        x = int(frame_count / frames_per_video)
        encoded_vid = workspace + "encoded" + os.path.sep + "encoded_" + str(
            x) + ".mkv"

        wait_on_file(merged_files_prefix + str(x * frames_per_video + 1) +
                     extension_type)
        wait_on_file(merged_files_prefix +
                     str(frame_count - x * frames_per_video +
                         frames_per_video) + extension_type)

        # create a video for frames in this section
        create_video_from_specific_frames(context, merged_files_prefix,
                                          encoded_vid,
                                          x * frames_per_video + 1,
                                          frames_per_video)

        # ensure ffmpeg video exists before deleting files
        wait_on_file(encoded_vid)

        # write to text file video for ffmpeg to concat vids with
        text_file.write("file " + "'" + encoded_vid + "'" + "\n")

    text_file.close()

    concat_encoded_vids(context, workspace + "nosound.mkv")
    migrate_tracks(context, workspace + "nosound.mkv", input_file, output_file)
Exemple #22
0
def merge_loop(context: Context):
    """
    Call the 'make_merge_image' method for every image that needs to be upscaled.

    This method is sort of the driver for that, and has tasks needed to keep merging running smoothly.

    This method became a bit messy due to optimization-hunting, but the most important calls of the loop can be read in
    the 'Loop-iteration Core' area.

    Method Tasks:

        - Read / Write files that are used by merge asynchronously.
        - Load the text files containing the vectors needed for 'make_merge_image'

    """

    # load variables from context
    workspace = context.workspace
    upscaled_dir = context.residual_upscaled_dir
    merged_dir = context.merged_dir
    residual_data_dir = context.residual_data_dir
    pframe_data_dir = context.pframe_data_dir
    correction_data_dir = context.correction_data_dir
    fade_data_dir = context.fade_data_dir
    frame_count = context.frame_count
    extension_type = context.extension_type
    logger = logging.getLogger(__name__)

    # Load the genesis image + the first upscaled image.
    frame_previous = Frame()
    frame_previous.load_from_string_wait(merged_dir + "merged_" + str(1) +
                                         extension_type)

    f1 = Frame()
    f1.load_from_string_wait(upscaled_dir + "output_" +
                             get_lexicon_value(6, 1) + ".png")

    # When upscaling every frame between start_frame to frame_count, there's obviously no x + 1 at frame_count - 1 .
    # So just make a small note not to load that image. Pretty much load images concurrently until we get to x - 1
    last_frame = False
    for x in range(1, frame_count):
        ###################################
        # Loop-iteration pre-requirements #
        ###################################

        # Check if we're at the last image
        if x == frame_count - 1:
            last_frame = True

        # load the next image ahead of time.
        if not last_frame:
            background_frame_load = AsyncFrameRead(upscaled_dir + "output_" +
                                                   get_lexicon_value(6, x +
                                                                     1) +
                                                   ".png")
            background_frame_load.start()

        #######################
        # Loop-iteration Core #
        #######################

        logger.info("Upscaling frame " + str(x))

        prediction_data_list = get_list_from_file(pframe_data_dir + "pframe_" +
                                                  str(x) + ".txt")
        residual_data_list = get_list_from_file(residual_data_dir +
                                                "residual_" + str(x) + ".txt")
        correction_data_list = get_list_from_file(correction_data_dir +
                                                  "correction_" + str(x) +
                                                  ".txt")
        fade_data_list = get_list_from_file(fade_data_dir + "fade_" + str(x) +
                                            ".txt")

        output_file = workspace + "merged/merged_" + str(x +
                                                         1) + extension_type

        frame_next = make_merge_image(context, f1, frame_previous,
                                      prediction_data_list, residual_data_list,
                                      correction_data_list, fade_data_list)

        # Write the image in the background for the preformance increase
        background_frame_write = AsyncFrameWrite(frame_next, output_file)
        background_frame_write.start()

        #######################################
        # Assign variables for next iteration #
        #######################################

        # Ensure the file is loaded for background_frame_load. If we're on the last frame, simply ignore this section
        # Because the frame_count + 1 does not exist.
        if not last_frame:
            while not background_frame_load.load_complete:
                wait_on_file(upscaled_dir + "output_" +
                             get_lexicon_value(6, x + 1) + ".png")

            f1 = background_frame_load.loaded_image

        frame_previous = frame_next
Exemple #23
0
    def run(self):
        self.log.info("Started")
        self.pipe.start()

        # Load the genesis image + the first upscaled image.
        frame_previous = Frame()
        frame_previous.load_from_string_controller(
            self.merged_dir + "merged_" + str(self.start_frame) +
            self.extension_type, self.context.controller)

        # Load and pipe the 'first' image before we start the for loop procedure, since all the other images will
        # inductively build off this first frame.
        frame_previous = Frame()
        frame_previous.load_from_string_controller(
            self.merged_dir + "merged_" + str(self.start_frame) +
            self.extension_type, self.context.controller)
        self.pipe.save(frame_previous)

        current_upscaled_residuals = Frame()
        current_upscaled_residuals.load_from_string_controller(
            self.upscaled_dir + "output_" +
            get_lexicon_value(6, self.start_frame) + ".png",
            self.context.controller)

        last_frame = False
        for x in range(self.start_frame, self.frame_count):
            ########################################
            # Pre-loop logic checks and conditions #
            ########################################

            # Check if we're at the last image, which affects the behaviour of the loop.
            if x == self.frame_count - 1:
                last_frame = True

            # Pre-load the next iteration of the loop image ahead of time, if we're not on the last frame.
            if not last_frame:
                """ 
                By asynchronously loading frames ahead of time, this provides a small but meaningful
                boost in performance when spanned over N frames. There's some code over head but 
                it's well worth it. 
                """
                background_frame_load = AsyncFrameRead(
                    self.upscaled_dir + "output_" +
                    get_lexicon_value(6, x + 1) + ".png",
                    self.context.controller)
                background_frame_load.start()

            ######################
            # Core Logic of Loop #
            ######################

            # Load the needed vectors to create the merged image.

            prediction_data_list = get_list_from_file_and_wait(
                self.pframe_data_dir + "pframe_" + str(x) + ".txt",
                self.context.controller)
            residual_data_list = get_list_from_file_and_wait(
                self.residual_data_dir + "residual_" + str(x) + ".txt",
                self.context.controller)
            correction_data_list = get_list_from_file_and_wait(
                self.correction_data_dir + "correction_" + str(x) + ".txt",
                self.context.controller)
            fade_data_list = get_list_from_file_and_wait(
                self.fade_data_dir + "fade_" + str(x) + ".txt",
                self.context.controller)

            if not self.context.controller.is_alive():
                self.log.info(" Merge thread killed at frame %s " % str(x))
                break

            # Create the actual image itself.
            current_frame = self.make_merge_image(
                self.context, current_upscaled_residuals, frame_previous,
                prediction_data_list, residual_data_list, correction_data_list,
                fade_data_list)
            ###############
            # Saving Area #
            ###############
            # Directly write the image to the ffmpeg pipe line.
            self.pipe.save(current_frame)

            # Manually write the image if we're preserving frames (this is for enthusiasts / debugging).
            if self.preserve_frames:
                output_file = self.workspace + "merged/merged_" + str(
                    x + 1) + self.extension_type
                background_frame_write = AsyncFrameWrite(
                    current_frame, output_file)
                background_frame_write.start()

            #######################################
            # Assign variables for next iteration #
            #######################################
            if not last_frame:
                # We need to wait until the next upscaled image exists before we move on.
                while not background_frame_load.load_complete:
                    wait_on_file(
                        self.upscaled_dir + "output_" +
                        get_lexicon_value(6, x + 1) + ".png",
                        self.context.controller)
            """
            Now that we're all done with the current frame, the current `current_frame` is now the frame_previous
            (with respect to the next iteration). We could obviously manually load frame_previous = Frame(n-1) each
            time, but this is an optimization that makes a substantial difference over N frames.
            """
            frame_previous = current_frame
            current_upscaled_residuals = background_frame_load.loaded_image
            self.context.controller.update_frame_count(x)

        self.pipe.kill()
Exemple #24
0
def residual_loop(context):
    """
    Call the 'make_residual_image' method for every image that needs to be made into a residual.

    Method Tasks:
        - Load and wait for the files needed to create a residual image.
        - Call 'make_residual_image' once the needed files exist
    """

    # load variables from context
    workspace = context.workspace
    residual_upscaled_dir = context.residual_upscaled_dir
    residual_images_dir = context.residual_images_dir
    residual_data_dir = context.residual_data_dir
    pframe_data_dir = context.pframe_data_dir
    input_frames_dir = context.input_frames_dir
    frame_count = context.frame_count
    block_size = context.block_size
    extension_type = context.extension_type
    debug_dir = context.debug_dir
    debug = context.debug

    temp_image = context.temp_image_folder + "tempimage.jpg"

    logger = logging.getLogger(__name__)
    logger.info((workspace, 1, frame_count, block_size))

    # for every frame in the video, create a residual_frame given the text files.
    for x in range(1, frame_count):
        f1 = Frame()
        f1.load_from_string_wait(input_frames_dir + "frame" + str(x + 1) +
                                 extension_type)

        # Load the neccecary lists to compute this iteration of residual making
        residual_data = get_list_from_file(residual_data_dir + "residual_" +
                                           str(x) + ".txt")
        prediction_data = get_list_from_file(pframe_data_dir + "pframe_" +
                                             str(x) + ".txt")

        # Create the output files..
        debug_output_file = debug_dir + "debug" + str(x + 1) + extension_type
        output_file = residual_images_dir + "output_" + get_lexicon_value(
            6, x) + ".jpg"

        # Save to a temp folder so waifu2x-vulkan doesn't try reading it, then move it
        out_image = make_residual_image(context, f1, residual_data,
                                        prediction_data)

        if out_image.get_res() == (1, 1):
            """
            If out_image is (1,1) in size, then frame_x and frame_x+1 are identical.

            We still need to save an outimage for sake of having N output images for N input images, so we
            save these meaningless files anyways.

            However, these 1x1 can slow whatever waifu2x implementation down, so we 'cheat' d2x 
            but 'fake' upscaling them, so that they don't need to be processed by waifu2x.
            """

            # Location of the 'fake' upscaled image.
            out_image = Frame()
            out_image.create_new(2, 2)
            output_file = residual_upscaled_dir + "output_" + get_lexicon_value(
                6, x) + ".png"
            out_image.save_image(output_file)

        else:
            # This image has things to upscale, continue normally
            out_image.save_image_temp(output_file, temp_image)

        # With this change the wrappers must be modified to not try deleting the non existing residual file

        if debug == 1:
            debug_image(block_size, f1, prediction_data, residual_data,
                        debug_output_file)
    def run(self):
        console_output = open(
            self.context.log_dir + "waifu2x_upscale_frames_command.txt", "w")
        logger = logging.getLogger(__name__)
        # if there are pre-existing files, fix them (this occurs during a resume session)
        self.fix_names()

        fix_names_forever_thread = threading.Thread(target=self.fix_names_all)
        fix_names_forever_thread.start()

        # we need to os.chdir or else waifu2x-conveter won't work.
        os.chdir(self.waifu2x_converter_cpp_path)

        exec_command = copy.copy(self.waifu2x_conv_upscale_frame)

        # replace the exec command withthe files we're concerned with
        for x in range(len(exec_command)):
            if exec_command[x] == "[input_file]":
                exec_command[x] = self.residual_images_dir

            if exec_command[x] == "[output_file]":
                exec_command[x] = self.residual_upscaled_dir

        logger.info("waifu2xconv session")
        logger.info(exec_command)

        # make a list of names that will eventually (past or future) be upscaled
        names = []
        for x in range(1, self.frame_count):
            names.append("output_" + get_lexicon_value(6, x) + ".png")

        count_removed = 0

        # remove from the list images that have already been upscaled
        for upscaled_names in names[::-1]:
            if os.path.isfile(self.residual_upscaled_dir + upscaled_names):
                names.remove(upscaled_names)
                count_removed += 1

        if count_removed:
            logger.info("Already have " + str(count_removed) + " upscaled")

        # while there are pictures that have yet to be upscaled, keep calling the upscale command

        while upscaled_names:
            for name in upscaled_names[::-1]:
                if os.path.exists(self.residual_upscaled_dir + name):

                    residual_file = self.residual_images_dir + name

                    if os.path.exists(residual_file):
                        os.remove(residual_file)
                    else:
                        '''
                        In residuals.py we created fake 'upscaled' images by saving them to the 'residuals_upscaled', 
                        and never saved the residuals file. In that case, only remove the 'residuals_upscaled' 
                        since 'residuals' never existed. 
                        '''
                        pass

                    upscaled_names.remove(name)
Exemple #26
0
    def run(self):

        self.pipe.start()
        # Load the genesis image + the first upscaled image.
        frame_previous = Frame()
        frame_previous.load_from_string_controller(
            self.merged_dir + "merged_" + str(self.start_frame) +
            self.extension_type, self.context.controller)

        self.pipe.save(frame_previous)

        f1 = Frame()
        f1.load_from_string_controller(
            self.upscaled_dir + "output_" +
            get_lexicon_value(6, self.start_frame) + ".png",
            self.context.controller)

        last_frame = False
        for x in range(self.start_frame, self.frame_count):
            ###################################
            # Loop-iteration pre-requirements #
            ###################################
            # Check if we're at the last image, which affects the behaviour of the loop.
            if x == self.frame_count - 1:
                last_frame = True

            # Pre-load the next iteration of the loop image ahead of time, if we're not on the last frame.
            if not last_frame:
                background_frame_load = \
                    AsyncFrameRead(
                        self.upscaled_dir + "output_" + get_lexicon_value(6, x + 1) + ".png", self.context.controller)

                background_frame_load.start()

            #######################
            # Loop-iteration Core #
            #######################
            # Load the needed vectors to create the merged image.
            prediction_data_list = get_list_from_file_wait_controller(
                self.pframe_data_dir + "pframe_" + str(x) + ".txt",
                self.context.controller)
            residual_data_list = get_list_from_file_wait_controller(
                self.residual_data_dir + "residual_" + str(x) + ".txt",
                self.context.controller)
            correction_data_list = get_list_from_file_wait_controller(
                self.correction_data_dir + "correction_" + str(x) + ".txt",
                self.context.controller)
            fade_data_list = get_list_from_file_wait_controller(
                self.fade_data_dir + "fade_" + str(x) + ".txt",
                self.context.controller)

            if not self.context.controller.is_alive():
                self.logger.info("Merge.py killed at frame " + str(x))
                break

            self.logger.info("Upscaling frame " + str(x))
            # Create the actual image itself.
            frame_next = self.make_merge_image(
                self.context, f1, frame_previous, prediction_data_list,
                residual_data_list, correction_data_list, fade_data_list)

            ###############
            # Saving Area #
            ###############

            # Directly write the image to the ffmpeg pipe line.
            self.pipe.save(frame_next)

            # Manually write the image if we're preserving frames (this is for enthusiasts / debugging).
            if self.preserve_frames:
                output_file = self.workspace + "merged/merged_" + str(
                    x + 1) + self.extension_type
                background_frame_write = AsyncFrameWrite(
                    frame_next, output_file)
                background_frame_write.start()

            #######################################
            # Assign variables for next iteration #
            #######################################
            # last_frame + 1 does not exist, so don't load.
            if not last_frame:
                # We need to wait until the next upscaled image exists before we move on.
                while not background_frame_load.load_complete:
                    wait_on_file_controller(
                        self.upscaled_dir + "output_" +
                        get_lexicon_value(6, x + 1) + ".png",
                        self.context.controller)

                f1 = background_frame_load.loaded_image

            frame_previous = frame_next

            # Signal to the rest of the dandere2x process we've finished upscaling frame 'x'.
            self.context.controller.update_frame_count(x)

        self.pipe.kill()
Exemple #27
0
def merge_loop(context: Context):
    """
    Call the 'make_merge_image' method for every image that needs to be upscaled.

    This method is sort of the driver for that, and has tasks needed to keep merging running smoothly.

    This method became a bit messy due to optimization-hunting, but the most important calls of the loop can be read in
    the 'Loop-iteration Core' area.

    Method Tasks:

        - Read / Write files that are used by merge asynchronously.
        - Load the text files containing the vectors needed for 'make_merge_image'

    """

    # load variables from context
    workspace = context.workspace
    upscaled_dir = context.residual_upscaled_dir
    merged_dir = context.merged_dir
    residual_data_dir = context.residual_data_dir
    pframe_data_dir = context.pframe_data_dir
    correction_data_dir = context.correction_data_dir
    fade_data_dir = context.fade_data_dir
    frame_count = context.frame_count
    extension_type = context.extension_type
    logger = logging.getLogger(__name__)

    # # # ffmpeg piping stuff # # #

    ffmpeg_pipe_encoding = context.ffmpeg_pipe_encoding

    if ffmpeg_pipe_encoding:
        nosound_file = context.nosound_file
        frame_rate = str(context.frame_rate)
        input_file = context.input_file
        output_file = context.output_file
        ffmpeg_dir = context.ffmpeg_dir
        ffmpeg_pipe_encoding_type = context.ffmpeg_pipe_encoding_type

        if ffmpeg_pipe_encoding_type in ["jpeg", "jpg"]:
            vcodec = "mjpeg"
            pipe_format = "JPEG"

        elif ffmpeg_pipe_encoding_type == "png":
            vcodec = "png"
            pipe_format = "PNG"

        else:
            print("  Error: no valid ffmpeg_pipe_encoding_type set. Using jpeg as default")
            vcodec = "mjpeg"
            pipe_format = "JPEG"

        print("\n    WARNING: EXPERIMENTAL FFMPEG PIPING IS ENABLED\n")

        ffmpegpipe = subprocess.Popen([ffmpeg_dir, "-loglevel", "panic", '-y', '-f',
                                       'image2pipe', '-vcodec', vcodec, '-r', frame_rate,
                                       '-i', '-', '-vcodec', 'libx264', '-preset', 'medium',
                                       '-qscale', '5', '-crf', '17',
                                       '-vf', ' pp=hb/vb/dr/fq|32, deband=range=22:blur=false',
                                       '-r', frame_rate, nosound_file],
                                      stdin=subprocess.PIPE)

        # pipe the first merged image as it will not be done afterwards
        wait_on_file(merged_dir + "merged_" + str(1) + extension_type)
        im = Image.open(merged_dir + "merged_" + str(1) + extension_type)

        # best jpeg quality since we won't be saving up disk space
        im.save(ffmpegpipe.stdin, format=pipe_format, quality=100)

    # # #  # # #  # # #  # # #

    # Load the genesis image + the first upscaled image.
    frame_previous = Frame()
    frame_previous.load_from_string_wait(merged_dir + "merged_" + str(1) + extension_type)

    f1 = Frame()
    f1.load_from_string_wait(upscaled_dir + "output_" + get_lexicon_value(6, 1) + ".png")

    # When upscaling every frame between start_frame to frame_count, there's obviously no x + 1 at frame_count - 1 .
    # So just make a small note not to load that image. Pretty much load images concurrently until we get to x - 1
    last_frame = False
    for x in range(1, frame_count):
        ###################################
        # Loop-iteration pre-requirements #
        ###################################

        # Check if we're at the last image
        if x == frame_count - 1:
            last_frame = True

        # load the next image ahead of time.
        if not last_frame:
            background_frame_load = AsyncFrameRead(upscaled_dir + "output_" + get_lexicon_value(6, x + 1) + ".png")
            background_frame_load.start()

        #######################
        # Loop-iteration Core #
        #######################

        logger.info("Upscaling frame " + str(x))

        prediction_data_list = get_list_from_file(pframe_data_dir + "pframe_" + str(x) + ".txt")
        residual_data_list = get_list_from_file(residual_data_dir + "residual_" + str(x) + ".txt")
        correction_data_list = get_list_from_file(correction_data_dir + "correction_" + str(x) + ".txt")
        fade_data_list = get_list_from_file(fade_data_dir + "fade_" + str(x) + ".txt")

        frame_next = make_merge_image(context, f1, frame_previous,
                                      prediction_data_list, residual_data_list,
                                      correction_data_list, fade_data_list)

        if not ffmpeg_pipe_encoding:  # ffmpeg piping is disabled, traditional way

            # Write the image in the background for the preformance increase
            output_file_merged = workspace + "merged/merged_" + str(x + 1) + extension_type
            background_frame_write = AsyncFrameWrite(frame_next, output_file_merged)
            background_frame_write.start()

        else:  # ffmpeg piping is enabled

            # Write the image directly into ffmpeg pipe
            im = frame_next.get_pil_image()
            im.save(ffmpegpipe.stdin, format=pipe_format, quality=95)

        #######################################
        # Assign variables for next iteration #
        #######################################

        if not last_frame:
            while not background_frame_load.load_complete:
                wait_on_file(upscaled_dir + "output_" + get_lexicon_value(6, x + 1) + ".png")

            f1 = background_frame_load.loaded_image

        frame_previous = frame_next

        # Ensure the file is loaded for background_frame_load. If we're on the last frame, simply ignore this section
        # Because the frame_count + 1 does not exist.

    if ffmpeg_pipe_encoding:
        ffmpegpipe.stdin.close()
        ffmpegpipe.wait()

        # add the original file audio to the nosound file
        migrate_tracks(context, nosound_file, input_file, output_file)
    def run(self):
        self.log.info("Run called.")

        for x in range(self.start_frame, self.frame_count):

            # Stop if thread is killed
            if not self.context.controller.is_alive():
                break

            # Files needed to create a residual image
            f1 = Frame()
            f1.load_from_string_controller(
                self.input_frames_dir + "frame" + str(x + 1) +
                self.extension_type, self.context.controller)
            # Load the neccecary lists to compute this iteration of residual making
            residual_data = get_list_from_file_and_wait(
                self.residual_data_dir + "residual_" + str(x) + ".txt",
                self.context.controller)

            prediction_data = get_list_from_file_and_wait(
                self.pframe_data_dir + "pframe_" + str(x) + ".txt",
                self.context.controller)

            # stop if thread is killed
            if not self.context.controller.is_alive():
                break

            # Create the output files..
            debug_output_file = self.debug_dir + "debug" + str(
                x + 1) + self.extension_type
            output_file = self.residual_images_dir + "output_" + get_lexicon_value(
                6, x) + ".jpg"

            # Save to a temp folder so waifu2x-vulkan doesn't try reading it, then move it
            out_image = self.make_residual_image(self.context, f1,
                                                 residual_data,
                                                 prediction_data)

            if out_image.get_res() == (1, 1):
                """
                If out_image is (1,1) in size, then frame_x and frame_x+1 are identical.

                We still need to save an outimage for sake of having N output images for N input images, so we
                save these meaningless files anyways.

                However, these 1x1 can slow whatever waifu2x implementation down, so we 'cheat' d2x 
                but 'fake' upscaling them, so that they don't need to be processed by waifu2x.
                """

                # Location of the 'fake' upscaled image.
                out_image = Frame()
                out_image.create_new(2, 2)
                output_file = self.residual_upscaled_dir + "output_" + get_lexicon_value(
                    6, x) + ".png"
                out_image.save_image(output_file)

            else:
                # This image has things to upscale, continue normally
                out_image.save_image_temp(output_file, self.temp_image)

            # With this change the wrappers must be modified to not try deleting the non existing residual file
            if self.context.debug == 1:
                self.debug_image(self.block_size, f1, prediction_data,
                                 residual_data, debug_output_file)
Exemple #29
0
def merge_loop(context: Context, start_frame: int):
    # load variables from context
    workspace = context.workspace
    upscaled_dir = context.upscaled_dir
    merged_dir = context.merged_dir
    inversion_data_dir = context.inversion_data_dir
    pframe_data_dir = context.pframe_data_dir
    correction_data_dir = context.correction_data_dir
    fade_data_dir = context.fade_data_dir
    frame_count = context.frame_count
    extension_type = context.extension_type
    logger = logging.getLogger(__name__)

    # Load the genesis image + the first upscaled image.

    base = Frame()
    base.load_from_string_wait(merged_dir + "merged_" + str(start_frame) +
                               extension_type)

    f1 = Frame()
    f1.load_from_string_wait(upscaled_dir + "output_" +
                             get_lexicon_value(6, 1) + ".png")

    # When upscaling every frame between start_frame to frame_count, there's obviously no x + 1 at frame_count - 1 .
    # So just make a small note not to load that image. Pretty much load images concurrently until we get to x - 1
    last_frame = False
    for x in range(start_frame, frame_count):
        logger.info("Upscaling frame " + str(x))

        # Check if we're at the last image
        if x == frame_count - 1:
            last_frame = True

        # load the next image ahead of time.
        if not last_frame:
            background_frame_load = AsyncFrameRead(upscaled_dir + "output_" +
                                                   get_lexicon_value(6, x +
                                                                     1) +
                                                   ".png")
            background_frame_load.start()

        # load vectors needed to piece image back together
        prediction_data_list = get_list_from_file(pframe_data_dir + "pframe_" +
                                                  str(x) + ".txt")
        difference_data_list = get_list_from_file(inversion_data_dir +
                                                  "inversion_" + str(x) +
                                                  ".txt")
        correction_data_list = get_list_from_file(correction_data_dir +
                                                  "correction_" + str(x) +
                                                  ".txt")
        fade_data_list = get_list_from_file(fade_data_dir + "fade_" + str(x) +
                                            ".txt")

        output_file = workspace + "merged/merged_" + str(x +
                                                         1) + extension_type

        new_base = make_merge_image(context, f1, base, prediction_data_list,
                                    difference_data_list, correction_data_list,
                                    fade_data_list)

        # Write the image in the background for the preformance increase
        background_frame_write = AsyncFrameWrite(new_base, output_file)
        background_frame_write.start()

        # Assign variables for next iteration

        # Ensure the file is loaded for background_frame_load. If we're on the last frame, simply ignore this section
        # Because the frame_count + 1 does not exist.
        if not last_frame:
            while not background_frame_load.load_complete:
                wait_on_file(upscaled_dir + "output_" +
                             get_lexicon_value(6, x + 1) + ".png")

            f1 = background_frame_load.loaded_image

        base = new_base
Exemple #30
0
    def run(self):
        """
        Input:
            - Files made by residuals.py appearing in the /residual_images/ folder.

        Output:
            - Files upscaled in /residual_upscaled/

        Code Description:

        The current Dandere2x implementation requires files to be removed from the 'residual_images' folder
        during runtime. When waifu2x-ncnn-vulkan calls 'upscale folder', it will only upscale what's in the folder
        at that moment, and it'll re-upscale the images that it already upscaled in a previous iteration.

        Considering that residual_images produced by Dandere2x don't all exist during the initial
        Waifu2x call, we need to call the 'upscale folder' command multiple times. To prevent waifu2x from re-upscaling
        the same image twice, various work arounds are in place to allow Dandere2x and Waifu2x to work in real time.

        Briefly, 1) Create a list of names that will be upscaled by waifu2x,
                 2) Call waifu2x to upscale whatever images are in 'differences' folder
                 3) After waifu2x call is finished, delete whatever files were upscaled, and remove those names from list.
                   (this is to prevent Waifu2x from re-upscaling the same image again)
                 4) Repeat this process until all the names are removed.
        """

        logger = logging.getLogger(__name__)

        residual_images_dir = self.context.residual_images_dir
        residual_upscaled_dir = self.context.residual_upscaled_dir
        exec_command = copy.copy(self.waifu2x_vulkan_upscale_frame)

        console_output = open(self.context.log_dir + "vulkan_upscale_frames.txt", "w")

        # replace the exec command with the files we're concerned with
        for x in range(len(exec_command)):
            if exec_command[x] == "[input_file]":
                exec_command[x] = residual_images_dir

            if exec_command[x] == "[output_file]":
                exec_command[x] = residual_upscaled_dir

        # we need to os.chdir to set the directory or else waifu2x-vulkan won't work.
        os.chdir(self.waifu2x_ncnn_vulkan_path)

        logger.info("waifu2x_vulkan session")
        logger.info(exec_command)

        # make a list of names that will eventually (past or future) be upscaled
        upscaled_names = []
        for x in range(1, self.frame_count):
            upscaled_names.append("output_" + get_lexicon_value(6, x) + ".png")

        fix_names_forever_thread = threading.Thread(target=self.fix_names_all)
        fix_names_forever_thread.start()

        count_removed = 0

        # remove from the list images that have already been upscaled
        for name in upscaled_names[::-1]:
            if os.path.isfile(self.residual_upscaled_dir + name):
                upscaled_names.remove(name)
                count_removed += 1

        if count_removed:
            logger.info("Already have " + str(count_removed) + " upscaled")

        # while there are pictures that have yet to be upscaled, keep calling the upscale command
        while upscaled_names:

            logger.info("Frames remaining before batch: ")
            logger.info(len(upscaled_names))

            console_output.write(str(exec_command))
            subprocess.call(exec_command, shell=False, stderr=console_output, stdout=console_output)

            for name in upscaled_names[::-1]:
                if os.path.exists(self.residual_upscaled_dir + name):

                    residual_file = self.residual_images_dir + name.replace(".png", ".jpg")

                    if os.path.exists(residual_file):
                        os.remove(residual_file)
                    else:
                        '''
                        In residuals.py we created fake 'upscaled' images by saving them to the 'residuals_upscaled', 
                        and never saved the residuals file. In that case, only remove the 'residuals_upscaled' 
                        since 'residuals' never existed. 
                        '''
                        pass

                    upscaled_names.remove(name)

        console_output.close()