def next_frame(self):
        """
        Call and save the next frame.
        """

        # FFMPEG extracts images pretty fast depending on hardware, so in some cases although we've already
        # we've only instructed ffmpeg to extract N frames, N + k (for some k) may already be extracted.
        if file_exists(self.input_frames_dir + "frame" + str(self.count) + self.extension_type):
            self.count += 1
            return

        # Resume the thread in order to produce a new frame.
        self.pause_resume.resume()

        # Although the file may exist, there are niche conditions in which the file on disk is
        # not processable. Make sure the image is proccessible before killing the signal.
        while not file_exists(self.input_frames_dir + "frame" + str(self.count) + self.extension_type):
            time.sleep(.00001)

        while file_is_empty(self.input_frames_dir + "frame" + str(self.count) + self.extension_type):
            time.sleep(.00001)

        # make sure the image is actually loadable before stopping the ffmpeg thread by using the wait function.
        # frame has.
        f = Frame()
        f.load_from_string_wait(self.input_frames_dir + "frame" + str(self.count) + self.extension_type)

        # Pause the thread.
        self.pause_resume.suspend()

        self.count += 1
    def __fix_waifu2x_converter_cpp_names(self):
        """
            Waifu2x-Conveter-Cpp (legacy) will output the file names in a format that needs to be fixed for
            dandere2x to work. I believe this is fixed in later versions, hence the TODO
        """

        file_names = []
        for x in range(1, self.frame_count):
            file_names.append("output_" + get_lexicon_value(6, x))

        for file in file_names:
            dirty_name = self.residual_upscaled_dir + file + '_[NS-L' + str(
                self.noise_level) + '][x' + str(
                    self.scale_factor) + '.000000]' + ".png"
            clean_name = self.residual_upscaled_dir + file + ".png"

            wait_on_either_file_controller(clean_name, dirty_name,
                                           self.controller)

            if file_exists(clean_name):
                pass

            elif file_exists(dirty_name):
                while file_exists(dirty_name):
                    try:
                        rename_file(dirty_name, clean_name)
                    except PermissionError:
                        pass
Esempio n. 3
0
    def fix_names_all(self):
        """
        Waifu2x-ncnn-vulkan will accept a file as "file.jpg" and output as "file.jpg.png".

        Unfortunately, dandere2x wouldn't recognize this, so this function renames each name to the correct naming
        convention. This function will iteratiate through every file needing to be upscaled waifu2x-ncnn-vulkan,
        and change it's name after it's done saving

        Comments:

        - There's a really complicated try / except that exists because, even though a file may exist,
          the file handle may still be used by waifu2x-ncnn-vulkan (it hasn't released it yet). As a result,
          we need to try / except it until it's released, allowing us to rename it.

        """

        file_names = []
        for x in range(1, self.frame_count):
            file_names.append("output_" + get_lexicon_value(6, x))

        for file in file_names:
            dirty_name = self.residual_upscaled_dir + file + ".jpg.png"
            clean_name = self.residual_upscaled_dir + file + ".png"

            wait_on_either_file(clean_name, dirty_name)

            if file_exists(clean_name):
                pass

            elif file_exists(dirty_name):
                while file_exists(dirty_name):
                    try:
                        rename_file(dirty_name, clean_name)
                    except PermissionError:
                        pass
Esempio n. 4
0
    def run(self):
        """
        Upscale every image that will *eventually* appear in the residuals_dir folder by waifu2x.
        """
        logger = logging.getLogger(__name__)

        differences_dir = self.context.residual_images_dir
        upscaled_dir = self.context.residual_upscaled_dir
        exec = copy.copy(self.waifu2x_vulkan_legacy_upscale_frame)

        for x in range(1, self.frame_count):
            wait_on_either_file(
                differences_dir + "output_" + get_lexicon_value(6, x) + ".jpg",
                upscaled_dir + "output_" + get_lexicon_value(6, x) + ".png")

            if file_exists(differences_dir + "output_" + get_lexicon_value(6, x) + ".jpg") \
                    and not file_exists(upscaled_dir + "output_" + get_lexicon_value(6, x) + ".png"):
                self.upscale_file(
                    differences_dir + "output_" + get_lexicon_value(6, x) +
                    ".jpg", upscaled_dir + "output_" +
                    get_lexicon_value(6, x) + ".png")

            elif not file_exists(differences_dir + "output_" + get_lexicon_value(6, x) + ".jpg") \
                    and file_exists(upscaled_dir + "output_" + get_lexicon_value(6, x) + ".png"):
                continue
Esempio n. 5
0
    def verify_upscaling_works(self) -> None:
        """
        Verify the upscaler works by upscaling a very small frame, and throws a descriptive error if it doesn't.
        """
        test_file = self.context.workspace + "test_frame.jpg"
        test_file_upscaled = self.context.workspace + "test_frame_upscaled.jpg"

        test_frame = Frame()
        test_frame.create_new(2, 2)
        test_frame.save_image(test_file)

        self.upscale_file(test_file, test_file_upscaled)

        if not file_exists(test_file_upscaled):
            print(
                "Your computer could not upscale a test image, which is required for dandere2x to work."
            )
            print(
                "This may be a hardware issue or a software issue - verify your computer is capable of upscaling "
                "images using the selected upscaler.")

            raise Exception("Your computer could not upscale the test file.")

        os.remove(test_file)
        os.remove(test_file_upscaled)
Esempio n. 6
0
    def __upscale_first_frame(self):
        """The first frame of any dandere2x session needs to be upscaled fully, and this is done as it's own
        process. Ensuring the first frame can get upscaled also provides a source of error checking for the user."""

        # measure the time to upscale a single frame for printing purposes
        one_frame_time = time.time()
        self.waifu2x.upscale_file(
            input_file=self.context.input_frames_dir + "frame" +
            str(self.first_frame) + self.context.extension_type,
            output_file=self.context.merged_dir + "merged_" +
            str(self.first_frame) + self.context.extension_type)

        if not file_exists(self.context.merged_dir + "merged_" +
                           str(self.first_frame) +
                           self.context.extension_type):
            """ 
            Ensure the first file was able to get upscaled. We literally cannot continue if it doesn't.
            """

            print(
                "Could not upscale first file.. check logs file to see what's wrong"
            )
            logging.info(
                "Could not upscale first file.. check logs file to see what's wrong"
            )
            logging.info("Exiting Dandere2x...")
            sys.exit(1)

        print("\n Time to upscale an uncompressed frame: " +
              str(round(time.time() - one_frame_time, 2)))
Esempio n. 7
0
def cli_start(args):
    """
    Start Dandere2x using command line

    :param args: args loaded from load_parser()
    :return: none
    """

    # get config based on OS
    configfile = "dandere2x_%s.yaml" % get_operating_system()

    # load yaml

    with open(configfile, "r") as read_file:
        config = yaml.safe_load(read_file)

    config['dandere2x']['usersettings']['output_file'] = args.output_file
    config['dandere2x']['usersettings']['input_file'] = args.input_file

    config['dandere2x']['usersettings']['block_size'] = args.block_size
    config['dandere2x']['usersettings']['quality_minimum'] = args.image_quality
    config['dandere2x']['usersettings']['waifu2x_type'] = args.waifu2x_type
    config['dandere2x']['usersettings']['scale_factor'] = args.scale_factor
    config['dandere2x']['usersettings']['denoise_level'] = args.noise_level

    print("arg input file: " + args.input_file)
    if os.path.isdir(args.input_file):
        print("is not dir")
        if not os.path.isdir(args.output_file):
            print(
                "input is type 'directory' but output is not type 'directory'. Dandere2x exiting"
            )
            sys.exit(1)
        config['dandere2x']['usersettings']['input_folder'] = args.input_file
        config['dandere2x']['usersettings']['output_folder'] = args.output_file

        d2x = Dandere2xUpscaleFolder(config)
        d2x.start()

    else:
        context = Context(config)

        if dir_exists(context.workspace):
            print("Deleted Folder")

            # This is a recurring bug that seems to be popping up on other people's operating systems.
            # I'm unsure if this will fix it, but it could provide a solution for people who can't even get d2x to work.
            try:
                shutil.rmtree(context.workspace)
            except PermissionError:
                print(
                    "Trying to delete workspace via RM tree threw PermissionError - Dandere2x may not work."
                )

            while (file_exists(context.workspace)):
                time.sleep(1)

        d2x = Dandere2x(context)
        d2x.start()
        d2x.join()
Esempio n. 8
0
    def run(self):

        if dir_exists(self.dandere2x.context.workspace):
            print("Deleted Folder")

            # This is a recurring bug that seems to be popping up on other people's operating systems.
            # I'm unsure if this will fix it, but it could provide a solution for people who can't even get d2x to work.
            try:
                shutil.rmtree(self.dandere2x.context.workspace)
            except PermissionError:
                print(
                    "Trying to delete workspace via RM tree threw PermissionError - Dandere2x may not work."
                )

            while (file_exists(self.dandere2x.context.workspace)):
                time.sleep(1)

        try:
            self.dandere2x.start()

        except:
            print("dandere2x failed to work correctly")
            sys.exit(1)

        self.join()
    def verify_upscaling_works(self) -> None:
        """
        Verify the upscaler works by upscaling a very small frame, and throws a descriptive error if it doesn't.
        """
        test_file = self.context.workspace + "test_frame.jpg"
        test_file_upscaled = self.context.workspace + "test_frame_upscaled.jpg"

        test_frame = Frame()
        test_frame.create_new(2, 2)
        test_frame.save_image(test_file)

        self.log.info(
            "Attempting to upscale file %s into %s to ensure waifu2x is working..."
            % (test_file, test_file_upscaled))

        self.upscale_file(test_file, test_file_upscaled)

        if not file_exists(test_file_upscaled):
            self.log.error(
                "Your computer could not upscale a test image, which is required for dandere2x to work."
            )
            self.log.error(
                "This may be a hardware issue or a software issue - verify your computer is capable of upscaling "
                "images using the selected upscaler.")

            raise Exception("Your computer could not upscale the test file.")

        self.log.info(
            "Upscaling *seems* successful. Deleting files and continuing forward. "
        )

        os.remove(test_file)
        os.remove(test_file_upscaled)
Esempio n. 10
0
    def fix_names_all(self):

        file_names = []
        for x in range(1, self.frame_count):
            file_names.append("output_" + get_lexicon_value(6, x))

        for file in file_names:
            dirty_name = self.upscaled_dir + file + ".jpg.png"
            clean_name = self.upscaled_dir + file + ".png"

            wait_on_either_file(clean_name, dirty_name)

            if file_exists(clean_name):
                pass

            elif file_exists(dirty_name):
                while file_exists(dirty_name):
                    try:
                        rename_file(dirty_name, clean_name)
                    except PermissionError:
                        pass
    def extract_frames_to(self, stop_frame: int):

        self.count = stop_frame

        # Resume the thread in order to produce a new frame.
        self.pause_resume.resume()
        # Although the file may exist, there are niche conditions in which the file on disk is
        # not processable. Make sure the image is proccessible before killing the signal.
        while not file_exists(self.input_frames_dir + "frame" + str(self.count) + self.extension_type):
            time.sleep(.00001)

        while file_is_empty(self.input_frames_dir + "frame" + str(self.count) + self.extension_type):
            time.sleep(.00001)
Esempio n. 12
0
    def fix_names_all(self):

        file_names = []
        for x in range(1, self.frame_count):
            file_names.append("output_" + get_lexicon_value(6, x))

        for file in file_names:
            dirty_name = self.residual_upscaled_dir + file + '_[NS-L' + str(
                self.noise_level) + '][x' + str(
                    self.scale_factor) + '.000000]' + ".png"
            clean_name = self.residual_upscaled_dir + file + ".png"

            wait_on_either_file(clean_name, dirty_name)

            if file_exists(clean_name):
                pass

            elif file_exists(dirty_name):
                while file_exists(dirty_name):
                    try:
                        rename_file(dirty_name, clean_name)
                    except PermissionError:
                        pass
    def start(self):

        files_in_folder = []

        for file in glob.glob(os.path.join(self.input_folder, "*")):
            files_in_folder.append(os.path.basename(file))

        for x in range(len(files_in_folder)):
            # Cycle through each file

            iteration_yaml = copy.copy(self.config_yaml)

            file_name = os.path.join(self.input_folder, files_in_folder[x])

            path, name = os.path.split(files_in_folder[x])
            name_only = name.split(".")[0]

            # Set the output name to be 'upscaled + original name'
            output_name = os.path.join(self.output_folder,
                                       "upscaled_" + name_only + ".mp4")

            # change the yaml to contain the data for this iteration of dandere2x
            iteration_yaml['dandere2x']['usersettings'][
                'input_file'] = file_name
            iteration_yaml['dandere2x']['usersettings'][
                'output_file'] = output_name
            iteration_yaml['dandere2x']['developer_settings'][
                'workspace'] = self.workspace + str(x) + os.path.sep

            context = Context(iteration_yaml)

            # Delete the workspace if it already exists to prevent bugs
            if dir_exists(context.workspace):
                print("Deleted Folder")

                try:
                    shutil.rmtree(context.workspace)
                except PermissionError:
                    print(
                        "Trying to delete workspace via RM tree threw PermissionError - Dandere2x may not work."
                    )

                while (file_exists(context.workspace)):
                    time.sleep(1)

            d2x = Dandere2x(context)
            d2x.start()
            d2x.join()
Esempio n. 14
0
    def __upscale_first_frame(self):
        """ The first frame of any dandere2x session needs to be upscaled fully, and this is done as it's own
        process. Ensuring the first frame can get upscaled also provides a source of error checking for the user. """

        # measure the time to upscale a single frame for printing purposes
        one_frame_time = time.time()
        self.waifu2x.upscale_file(
            input_image=self.context.input_frames_dir + "frame" + str(
                self.context.start_frame) + self.context.extension_type,
            output_image=self.context.merged_dir + "merged_" + str(
                self.context.start_frame) + self.context.extension_type)

        if not file_exists(
                self.context.merged_dir + "merged_" + str(self.context.start_frame) + self.context.extension_type):
            """ Ensure the first file was able to get upscaled. We literally cannot continue if it doesn't. """
            self.log.error("Could not upscale first file. Dandere2x CANNOT continue.")
            self.log.error("Have you tried making sure your waifu2x works?")

            raise Exception("Could not upscale first file.. check logs file to see what's wrong")

        self.log.info("Time to upscale a single frame: %s " % str(round(time.time() - one_frame_time, 2)))
Esempio n. 15
0
    def __remove_once_upscaled(self):

        # make a list of names that will eventually (past or future) be upscaled
        list_of_names = []
        for x in range(1, self.frame_count):
            list_of_names.append("output_" + get_lexicon_value(6, x) + ".png")

        for x in range(len(list_of_names)):

            name = list_of_names[x]

            residual_file = self.residual_images_dir + name.replace(
                ".png", ".jpg")
            residual_upscaled_file = self.residual_upscaled_dir + name

            while not file_exists(residual_upscaled_file):
                time.sleep(.00001)

            if os.path.exists(residual_file):
                os.remove(residual_file)
            else:
                pass
Esempio n. 16
0
def run_realtime_encoding(context: Context, output_file: str):
    logger = context.logger
    logger.info("Real time encoding process started")

    # Load context
    workspace = context.workspace
    frames_per_video = int(context.frame_rate *
                           context.realtime_encoding_seconds_per_video)
    frame_count = int(context.frame_count)
    realtime_encoding_delete_files = context.realtime_encoding_delete_files
    extension_type = context.extension_type
    input_file = context.input_file

    # directories
    merged_files_prefix = context.merged_dir + "merged_"
    upscaled_files_prefix = context.upscaled_dir + "output_"
    compressed_files_prefix = context.compressed_static_dir + "compressed_"
    input_frames_prefix = context.input_frames_dir + "frame"

    # Create an encoded every frame_rate seconds.
    for x in range(0, int(frame_count / frames_per_video)):
        text_file = open(
            workspace + "encoded" + os.path.sep + "list.txt",
            'a+')  # text file for ffmpeg to use to concat vids together
        encoded_vid = workspace + "encoded" + os.path.sep + "encoded_" + str(
            x) + ".mkv"

        if file_exists(encoded_vid):
            logger.info(encoded_vid + " already exists: skipping iteration")
            continue

        wait_on_file(merged_files_prefix + str(x * frames_per_video + 1) +
                     extension_type)
        wait_on_file(merged_files_prefix +
                     str(x * frames_per_video + frames_per_video) +
                     extension_type)

        # create a video for frames in this section
        create_video_from_specific_frames(context, merged_files_prefix,
                                          encoded_vid,
                                          x * frames_per_video + 1,
                                          frames_per_video)

        # ensure ffmpeg video exists before deleting files
        wait_on_file(encoded_vid)

        # write to text file video for ffmpeg to concat vids with
        text_file.write("file " + "'" + encoded_vid + "'" + "\n")

        # put files to delete inside of here.
        if realtime_encoding_delete_files:
            delete_digit_files_in_range(
                context, merged_files_prefix, extension_type, 0,
                x * frames_per_video + 1,
                x * frames_per_video + frames_per_video + 1)

            delete_digit_files_in_range(
                context, compressed_files_prefix, extension_type, 0,
                x * frames_per_video + 1,
                x * frames_per_video + frames_per_video + 1)

            delete_digit_files_in_range(
                context, input_frames_prefix, extension_type, 0,
                x * frames_per_video + 1,
                x * frames_per_video + frames_per_video + 1)

            # upscaled files end on a different number than merged files.
            if x == int(frame_count / frames_per_video) - 1:

                wait_on_file(upscaled_files_prefix +
                             get_lexicon_value(6, x * frames_per_video + 1) +
                             ".png")
                wait_on_file(upscaled_files_prefix + get_lexicon_value(
                    6, x * frames_per_video + frames_per_video) + ".png")

                delete_digit_files_in_range(
                    context, upscaled_files_prefix, ".png", 6,
                    x * frames_per_video + 1,
                    x * frames_per_video + frames_per_video)

            else:

                wait_on_file(upscaled_files_prefix +
                             get_lexicon_value(6, x * frames_per_video + 1) +
                             ".png")
                wait_on_file(upscaled_files_prefix + get_lexicon_value(
                    6, x * frames_per_video + frames_per_video + 1) + ".png")

                delete_digit_files_in_range(
                    context, upscaled_files_prefix, ".png", 6,
                    x * frames_per_video + 1,
                    x * frames_per_video + frames_per_video + 1)

    # Because we divided the video into int(frame_count / frames_per_video) videos, and
    # int(frame_count / frames_per_video) != frame_count / frames_per_video, there's still frames that are left out.
    # We need to now encode those separately

    if frame_count - int(
            frame_count / frames_per_video) * frames_per_video > 0:
        print("got in here")
        x = int(frame_count / frames_per_video)
        encoded_vid = workspace + "encoded" + os.path.sep + "encoded_" + str(
            x) + ".mkv"

        wait_on_file(merged_files_prefix + str(x * frames_per_video + 1) +
                     extension_type)
        wait_on_file(merged_files_prefix +
                     str(frame_count - x * frames_per_video +
                         frames_per_video) + extension_type)

        # create a video for frames in this section
        create_video_from_specific_frames(context, merged_files_prefix,
                                          encoded_vid,
                                          x * frames_per_video + 1,
                                          frames_per_video)

        # ensure ffmpeg video exists before deleting files
        wait_on_file(encoded_vid)

        # write to text file video for ffmpeg to concat vids with
        text_file.write("file " + "'" + encoded_vid + "'" + "\n")

    text_file.close()

    concat_encoded_vids(context, workspace + "nosound.mkv")
    migrate_tracks(context, workspace + "nosound.mkv", input_file, output_file)
Esempio n. 17
0
    def run_concurrent(self):
        """
        Starts the dandere2x_python process at large.

        Inputs:
        - context

        Pre-Reqs:
        'This is all the stuff that needs to be done before dandere2x can officially start'

        - creates workspaces needed for dandere2x to work
        - edits the video if it's needed to be trimmed or needs resolution needs to be resized.
        - extracts all the frames in the video into it's own folder.
        - upscales the first frame using waifu2x and ensuring the genesis image upscaled correctly.

        Threading Area:

        - calls a series of threads for dandere2x_python to work
          (residuals, merging, waifu2x, dandere2xcpp, realtime-encoding)
        """

        # load context
        output_file = self.context.output_file

        ############
        # PRE REQS #
        ############

        # The first thing to do is create the dirs we will need during runtime
        create_directories(self.context.directories)
        self.context.set_logger()

        # If the user wishes to trim the video, trim the video, then rename the file_dir to point to the trimmed video
        if self.context.user_trim_video:
            trimed_video = os.path.join(self.context.workspace, "trimmed.mkv")
            trim_video(self.context, trimed_video)
            self.context.input_file = trimed_video

        # Before we extract all the frames, we need to ensure the settings are valid. If not, resize the video
        # To make the settings valid somehow.
        if not valid_input_resolution(self.context.width, self.context.height, self.context.block_size):
            self.append_video_resize_filter()

        # Extract all the frames
        print("extracting frames from video... this might take a while..")
        extract_frames(self.context, self.context.input_file)
        self.context.update_frame_count()

        # Assign the waifu2x object to whatever waifu2x we're using
        waifu2x = self.get_waifu2x_class(self.context.waifu2x_type)

        # Upscale the first file (the genesis file is treated different in Dandere2x)
        one_frame_time = time.time()  # This timer prints out how long it takes to upscale one frame
        waifu2x.upscale_file(input_file=self.context.input_frames_dir + "frame1" + self.context.extension_type,
                             output_file=self.context.merged_dir + "merged_1" + self.context.extension_type)

        # Ensure the first file was able to get upscaled. We literally cannot continue if it doesn't.
        if not file_exists(self.context.merged_dir + "merged_1" + self.context.extension_type):
            print("Could not upscale first file.. check logs file to see what's wrong")
            logging.info("Could not upscale first file.. check logs file to see what's wrong")
            logging.info("Exiting Dandere2x...")
            sys.exit(1)

        print("\n Time to upscale an uncompressed frame: " + str(round(time.time() - one_frame_time, 2)))

        ####################
        #  THREADING AREA  #
        ####################

        # This is where Dandere2x's core functions start. Each core function is divided into a series of threads,
        # All with their own segregated tasks and goals. Dandere2x starts all the threads, and lets it go from there.
        compress_frames_thread = threading.Thread(target=compress_frames, args=(self.context,))
        dandere2xcpp_thread = Dandere2xCppWrapper(self.context)
        merge_thread = threading.Thread(target=merge_loop, args=(self.context,))
        residual_thread = threading.Thread(target=residual_loop, args=(self.context,))
        status_thread = threading.Thread(target=print_status, args=(self.context,))
        realtime_encode_thread = threading.Thread(target=run_realtime_encoding, args=(self.context, output_file))

        logging.info("starting new d2x process")
        waifu2x.start()

        merge_thread.start()
        residual_thread.start()
        dandere2xcpp_thread.start()
        status_thread.start()
        compress_frames_thread.start()

        if self.context.realtime_encoding_enabled:
            realtime_encode_thread.start()

        compress_frames_thread.join()
        merge_thread.join()
        dandere2xcpp_thread.join()
        residual_thread.join()
        waifu2x.join()
        status_thread.join()

        if self.context.realtime_encoding_enabled:
            realtime_encode_thread.join()

        self.context.logger.info("Threaded Processes Finished succcesfully")
Esempio n. 18
0
    "Please verify that %s is your complete upscaled video, just has no audio"
    % nosound_file)
time.sleep(0.1)
input("Press Enter to continue...")

output_extension = os.path.splitext(nosound_file)[1]
output_file = dandere2x.context.workspace + "outputfile" + output_extension
log.info("We will now begin to try to manually migrate the tracks... standby")
log.info("Output video will be at %s " % output_file)

migrate_tracks(context=dandere2x.context,
               no_audio=nosound_file,
               file_dir=pre_processed_file,
               output_file=output_file)

if file_exists(output_file):
    log.info("It seems migration succeeded? Check %s to see if it finished." %
             output_file)
else:
    log.warning(
        "It seems the file is not there.. this is indicative of a migration failure somewhere"
    )
    log.warning(
        "You can try migrating yourself (above you should see an output called 'Migrate Command:' or something"
    )
    log.warning(
        "From ffmmpeg.py, and you can try changing the flags until it migrates correctly, but tbh beyond that"
    )
    log.warning("You may need to goto forums to answer this problem. ")

time.sleep(0.1)