Beispiel #1
0
    def _pre_processing(self):
        """
        This MUST be the first thing `run` calls, or else dandere2x.py will not work!

        Description: This function is a series of instructions dandere2x MUST perform before the main threads
                     are able to be called, and serves as a preliminary "health checker" for dandere2x to diagnose
                     bugs before the main threads are called.
        """

        self.log.info("Beginning pre-processing stage.")
        self.log.info("Dandere2x will process your video in a way that attempts to remove ambiguities caused by"
                      " container formats.")

        force_delete_directory(self.context.workspace)

        try:
            self.context.load_video_settings_ffprobe(file=self.context.input_file)
        except FileNotFoundError as e:
            from sys import exit
            self.log.error("Caught FileNotFoundError. This is likeley caused by 'externals' missing a neccecary file.")
            self.log.error("Are you sure you hit the 'download externals' button?")
            exit(1)

        if not valid_input_resolution(self.context.width, self.context.height, self.context.block_size):
            """ 
            Dandere2x needs the width and height to be a share a common factor with the block size so append a video
            filter if needed to make the size conform. For example, 1921x1081 is not evenly divisalbe by 30, so we'd 
            need to resize the video in that scenario.
            """

            self.log.warning(
                "Input video needs to be resized to be compatible with block-size - this is expected behaviour.")
            append_video_resize_filter(self.context)

        create_directories(self.context.workspace, self.context.directories)
        self.set_file_logger(self.context.workspace + "log.txt")  # write to a log file in the workspace

        self.waifu2x.verify_upscaling_works()

        """ 
        Re-encode the user input video. We do this because file container formats can be difficult to work with
        and can cause Dandere2x to not function properly (some videos resolutions are different, variable frame rate
        will cause video to have black spots, etc. 
        """
        workspace = self.context.workspace
        input_file = self.context.input_file
        unmigrated = workspace + "d2x_input_video_nonmigrated.mkv"
        pre_processed_video = self.context.pre_processed_video

        # have dandere2x load up the pre-processed video and re-assign video settings to use that instead
        re_encode_video(self.context, input_file, unmigrated, throw_exception=True)
        migrate_tracks(self.context, unmigrated, input_file, pre_processed_video, copy_if_failed=True)
        os.remove(unmigrated)
        wait_on_file(pre_processed_video, controller=self.context.controller)
        self.context.load_pre_processed_video(file=pre_processed_video)
Beispiel #2
0
    def save_image_temp(self, out_location, temp_location):
        """
        Save an image in the "temp_location" folder to prevent another program from accessing the file
        until it's done writing.

        This is done to prevent other parts from using an image until it's entirely done writing.
        """

        self.save_image(temp_location)
        wait_on_file(temp_location)
        rename_file(temp_location, out_location)
Beispiel #3
0
    def join(self, timeout=None):

        start = time.time()  # for printing out total runtime

        logging.info("dandere2x joined called")

        # due to a weird quirk, prevent dandere2x from being joined until nosound.mkv exists (at least).
        wait_on_file(self.context.nosound_file)

        logging.info("joining residual")
        self.residual_thread.join()

        if self.context.use_min_disk:
            logging.info("joining min disk demon")
            self.min_disk_demon.join()

        logging.info("joining merge")
        self.merge_thread.join()
        logging.info("joining waifu2x")
        self.waifu2x.join()
        logging.info("joining dandere2x")
        self.dandere2x_cpp_thread.join()
        logging.info("joining status")
        self.status_thread.join()
        logging.info("joining compress")
        self.compress_frames_thread.join()

        self.context.logger.info("All threaded processes have finished")
        print("All threaded processes have been finished")

        if self.resume_session:
            print("Session is a resume session, concatenating two videos")
            logging.info(
                "Session is a resume session, concatenating two videos")
            file_to_be_concat = self.context.workspace + "file_to_be_concat.mp4"

            rename_file(self.context.nosound_file, file_to_be_concat)
            concat_two_videos(
                self.context,
                self.context.config_yaml['resume_settings']['nosound_file'],
                file_to_be_concat, self.context.nosound_file)

        # if this became a suspended dandere2x session, kill it.
        if not self.alive:
            logging.info("Invoking suspend exit conditions")
            self.__suspend_exit_conditions()

        elif self.alive:
            logging.info("Migrating tracks")
            migrate_tracks(self.context, self.context.nosound_file,
                           self.context.sound_file, self.context.output_file)

        print("Total runtime : ", time.time() - start)
Beispiel #4
0
    def save_image_quality(self, out_location, quality_per):
        extension = os.path.splitext(os.path.basename(out_location))[1]

        if 'jpg' in extension:
            jpegsave = Image.fromarray(self.frame.astype(np.uint8))
            jpegsave.save(out_location + "temp" + extension, format='JPEG', subsampling=0, quality=quality_per)
            wait_on_file(out_location + "temp" + extension)
            rename_file(out_location + "temp" + extension, out_location)
        else:
            misc.imsave(out_location + "temp" + extension, self.frame)
            wait_on_file(out_location + "temp" + extension)
            rename_file(out_location + "temp" + extension, out_location)
    def run(self):
        logger = logging.getLogger(__name__)

        differences_dir = self.context.differences_dir
        upscaled_dir = self.context.upscaled_dir
        exec = copy.copy(self.waifu2x_vulkan_upscale_frame)

        for x in range(1, self.frame_count):
            wait_on_file(differences_dir + "output_" +
                         get_lexicon_value(6, x) + ".jpg")

            self.upscale_file(
                differences_dir + "output_" + get_lexicon_value(6, x) + ".jpg",
                upscaled_dir + "output_" + get_lexicon_value(6, x) + ".png")
    def run(self):
        """
        Upscale every image that will *eventually* appear in the residuals_dir folder by waifu2x.
        """
        logger = logging.getLogger(__name__)

        differences_dir = self.context.residual_images_dir
        upscaled_dir = self.context.residual_upscaled_dir
        exec = copy.copy(self.waifu2x_vulkan_upscale_frame)

        for x in range(1, self.frame_count):
            wait_on_file(differences_dir + "output_" + get_lexicon_value(6, x) + ".jpg")

            self.upscale_file(differences_dir + "output_" + get_lexicon_value(6, x) + ".jpg",
                              upscaled_dir + "output_" + get_lexicon_value(6, x) + ".png")
    def run(self) -> None:
        for x in range(len(self.list_of_names)):
            name = self.list_of_names[x]
            residual_file = self.context.residual_images_dir + name.replace(
                ".png", ".jpg")
            residual_upscaled_file = self.context.residual_upscaled_dir + name.replace(
                ".jpg", ".png")

            wait_on_file(residual_upscaled_file, self.context.controller)
            if not self.context.controller.is_alive():
                return

            if os.path.exists(residual_file):
                os.remove(residual_file)
            else:
                pass
Beispiel #8
0
    def save_image(self, out_location):
        """
        Save an image with specific instructions depending on it's extension type.
        """
        extension = os.path.splitext(os.path.basename(out_location))[1]

        if 'jpg' in extension:
            jpegsave = self.get_pil_image()
            jpegsave.save(out_location + "temp" + extension, format='JPEG', subsampling=0, quality=100)
            wait_on_file(out_location + "temp" + extension)
            rename_file(out_location + "temp" + extension, out_location)

        else:
            save_image = self.get_pil_image()
            save_image.save(out_location + "temp" + extension, format='PNG')
            wait_on_file(out_location + "temp" + extension)
            rename_file(out_location + "temp" + extension, out_location)
Beispiel #9
0
    def save_image_quality(self, out_location, quality_per):
        """
        Save an image with JPEG using the JPEG quality-compression ratio. 100 will be the best, while 0 will
        be the worst.
        """

        extension = os.path.splitext(os.path.basename(out_location))[1]

        if 'jpg' in extension:
            jpegsave = Image.fromarray(self.frame.astype(np.uint8))
            jpegsave.save(out_location + "temp" + extension, format='JPEG', subsampling=0, quality=quality_per)
            wait_on_file(out_location + "temp" + extension)
            rename_file(out_location + "temp" + extension, out_location)
        else:
            misc.imsave(out_location + "temp" + extension, self.frame)
            wait_on_file(out_location + "temp" + extension)
            rename_file(out_location + "temp" + extension, out_location)
Beispiel #10
0
    def start(self):

        files_in_folder = []

        for file in glob.glob(os.path.join(self.input_folder, "*")):
            files_in_folder.append(os.path.basename(file))

        for x in range(len(files_in_folder)):
            # Cycle through each file

            iteration_yaml = copy.copy(self.config_yaml)

            file_name = os.path.join(self.input_folder, files_in_folder[x])

            path, name = os.path.split(files_in_folder[x])
            name_only = name.split(".")[0]

            # Set the output name to be 'upscaled + original name'
            output_name = os.path.join(self.output_folder, "upscaled_" + name_only + ".mp4")

            # change the yaml to contain the data for this iteration of dandere2x
            iteration_yaml['dandere2x']['usersettings']['input_file'] = file_name
            iteration_yaml['dandere2x']['usersettings']['output_file'] = output_name
            iteration_yaml['dandere2x']['developer_settings']['workspace'] = self.workspace + str(x) + os.path.sep

            context = Context(iteration_yaml)

            # Delete the workspace if it already exists to prevent bugs
            if dir_exists(context.workspace):
                print("Deleted Folder")

                try:
                    shutil.rmtree(context.workspace)
                except PermissionError:
                    print("Trying to delete workspace via RM tree threw PermissionError - Dandere2x may not work.")

                while (file_exists(context.workspace)):
                    time.sleep(1)

            d2x = Dandere2x(context)
            d2x.start()

            wait_on_file(d2x.context.nosound_file)
            d2x.join()
Beispiel #11
0
    def save_image_quality(self, out_location, quality_per):
        """
        Save an image with JPEG using the JPEG quality-compression ratio. 100 will be the best, while 0 will
        be the worst.
        """

        extension = os.path.splitext(os.path.basename(out_location))[1]

        if 'jpg' in extension:
            jpegsave = Image.fromarray(self.frame.astype(np.uint8))
            jpegsave.save(out_location + "temp" + extension,
                          format='JPEG',
                          subsampling=0,
                          quality=quality_per)
            wait_on_file(out_location + "temp" + extension)
            rename_file(out_location + "temp" + extension, out_location)
        else:
            # todo, fix this
            self.logger.error(
                "Aka-katto has removed this customization you added - he's going to re-add it later."
            )
            self.logger.error('Sorry about that : \\')
            raise ValueError('See Console')
Beispiel #12
0
    def __remove_once_upscaled(self):

        # make a list of names that will eventually (past or future) be upscaled
        list_of_names = []
        for x in range(self.start_frame, self.frame_count):
            list_of_names.append("output_" + get_lexicon_value(6, x) + ".png")

        for x in range(len(list_of_names)):

            if not self.alive:
                return

            name = list_of_names[x]

            residual_file = self.residual_images_dir + name.replace(
                ".png", ".jpg")
            residual_upscaled_file = self.residual_upscaled_dir + name

            wait_on_file(residual_upscaled_file, self.cancel_token)

            if os.path.exists(residual_file):
                os.remove(residual_file)
            else:
                pass
Beispiel #13
0
    def run(self):
        self.log.info("Started")
        self.pipe.start()

        # Load the genesis image + the first upscaled image.
        frame_previous = Frame()
        frame_previous.load_from_string_controller(
            self.merged_dir + "merged_" + str(self.start_frame) +
            self.extension_type, self.context.controller)

        # Load and pipe the 'first' image before we start the for loop procedure, since all the other images will
        # inductively build off this first frame.
        frame_previous = Frame()
        frame_previous.load_from_string_controller(
            self.merged_dir + "merged_" + str(self.start_frame) +
            self.extension_type, self.context.controller)
        self.pipe.save(frame_previous)

        current_upscaled_residuals = Frame()
        current_upscaled_residuals.load_from_string_controller(
            self.upscaled_dir + "output_" +
            get_lexicon_value(6, self.start_frame) + ".png",
            self.context.controller)

        last_frame = False
        for x in range(self.start_frame, self.frame_count):
            ########################################
            # Pre-loop logic checks and conditions #
            ########################################

            # Check if we're at the last image, which affects the behaviour of the loop.
            if x == self.frame_count - 1:
                last_frame = True

            # Pre-load the next iteration of the loop image ahead of time, if we're not on the last frame.
            if not last_frame:
                """ 
                By asynchronously loading frames ahead of time, this provides a small but meaningful
                boost in performance when spanned over N frames. There's some code over head but 
                it's well worth it. 
                """
                background_frame_load = AsyncFrameRead(
                    self.upscaled_dir + "output_" +
                    get_lexicon_value(6, x + 1) + ".png",
                    self.context.controller)
                background_frame_load.start()

            ######################
            # Core Logic of Loop #
            ######################

            # Load the needed vectors to create the merged image.

            prediction_data_list = get_list_from_file_and_wait(
                self.pframe_data_dir + "pframe_" + str(x) + ".txt",
                self.context.controller)
            residual_data_list = get_list_from_file_and_wait(
                self.residual_data_dir + "residual_" + str(x) + ".txt",
                self.context.controller)
            correction_data_list = get_list_from_file_and_wait(
                self.correction_data_dir + "correction_" + str(x) + ".txt",
                self.context.controller)
            fade_data_list = get_list_from_file_and_wait(
                self.fade_data_dir + "fade_" + str(x) + ".txt",
                self.context.controller)

            if not self.context.controller.is_alive():
                self.log.info(" Merge thread killed at frame %s " % str(x))
                break

            # Create the actual image itself.
            current_frame = self.make_merge_image(
                self.context, current_upscaled_residuals, frame_previous,
                prediction_data_list, residual_data_list, correction_data_list,
                fade_data_list)
            ###############
            # Saving Area #
            ###############
            # Directly write the image to the ffmpeg pipe line.
            self.pipe.save(current_frame)

            # Manually write the image if we're preserving frames (this is for enthusiasts / debugging).
            if self.preserve_frames:
                output_file = self.workspace + "merged/merged_" + str(
                    x + 1) + self.extension_type
                background_frame_write = AsyncFrameWrite(
                    current_frame, output_file)
                background_frame_write.start()

            #######################################
            # Assign variables for next iteration #
            #######################################
            if not last_frame:
                # We need to wait until the next upscaled image exists before we move on.
                while not background_frame_load.load_complete:
                    wait_on_file(
                        self.upscaled_dir + "output_" +
                        get_lexicon_value(6, x + 1) + ".png",
                        self.context.controller)
            """
            Now that we're all done with the current frame, the current `current_frame` is now the frame_previous
            (with respect to the next iteration). We could obviously manually load frame_previous = Frame(n-1) each
            time, but this is an optimization that makes a substantial difference over N frames.
            """
            frame_previous = current_frame
            current_upscaled_residuals = background_frame_load.loaded_image
            self.context.controller.update_frame_count(x)

        self.pipe.kill()
Beispiel #14
0
 def save_image_temp(self, out_location, temp_location):
     self.save_image(temp_location)
     wait_on_file(temp_location)
     rename_file(temp_location, out_location)
Beispiel #15
0
def merge_loop(context: Context):
    """
    Call the 'make_merge_image' method for every image that needs to be upscaled.

    This method is sort of the driver for that, and has tasks needed to keep merging running smoothly.

    This method became a bit messy due to optimization-hunting, but the most important calls of the loop can be read in
    the 'Loop-iteration Core' area.

    Method Tasks:

        - Read / Write files that are used by merge asynchronously.
        - Load the text files containing the vectors needed for 'make_merge_image'

    """

    # load variables from context
    workspace = context.workspace
    upscaled_dir = context.residual_upscaled_dir
    merged_dir = context.merged_dir
    residual_data_dir = context.residual_data_dir
    pframe_data_dir = context.pframe_data_dir
    correction_data_dir = context.correction_data_dir
    fade_data_dir = context.fade_data_dir
    frame_count = context.frame_count
    extension_type = context.extension_type
    logger = logging.getLogger(__name__)

    # # # ffmpeg piping stuff # # #

    ffmpeg_pipe_encoding = context.ffmpeg_pipe_encoding

    if ffmpeg_pipe_encoding:
        nosound_file = context.nosound_file
        frame_rate = str(context.frame_rate)
        input_file = context.input_file
        output_file = context.output_file
        ffmpeg_dir = context.ffmpeg_dir
        ffmpeg_pipe_encoding_type = context.ffmpeg_pipe_encoding_type

        if ffmpeg_pipe_encoding_type in ["jpeg", "jpg"]:
            vcodec = "mjpeg"
            pipe_format = "JPEG"

        elif ffmpeg_pipe_encoding_type == "png":
            vcodec = "png"
            pipe_format = "PNG"

        else:
            print("  Error: no valid ffmpeg_pipe_encoding_type set. Using jpeg as default")
            vcodec = "mjpeg"
            pipe_format = "JPEG"

        print("\n    WARNING: EXPERIMENTAL FFMPEG PIPING IS ENABLED\n")

        ffmpegpipe = subprocess.Popen([ffmpeg_dir, "-loglevel", "panic", '-y', '-f',
                                       'image2pipe', '-vcodec', vcodec, '-r', frame_rate,
                                       '-i', '-', '-vcodec', 'libx264', '-preset', 'medium',
                                       '-qscale', '5', '-crf', '17',
                                       '-vf', ' pp=hb/vb/dr/fq|32, deband=range=22:blur=false',
                                       '-r', frame_rate, nosound_file],
                                      stdin=subprocess.PIPE)

        # pipe the first merged image as it will not be done afterwards
        wait_on_file(merged_dir + "merged_" + str(1) + extension_type)
        im = Image.open(merged_dir + "merged_" + str(1) + extension_type)

        # best jpeg quality since we won't be saving up disk space
        im.save(ffmpegpipe.stdin, format=pipe_format, quality=100)

    # # #  # # #  # # #  # # #

    # Load the genesis image + the first upscaled image.
    frame_previous = Frame()
    frame_previous.load_from_string_wait(merged_dir + "merged_" + str(1) + extension_type)

    f1 = Frame()
    f1.load_from_string_wait(upscaled_dir + "output_" + get_lexicon_value(6, 1) + ".png")

    # When upscaling every frame between start_frame to frame_count, there's obviously no x + 1 at frame_count - 1 .
    # So just make a small note not to load that image. Pretty much load images concurrently until we get to x - 1
    last_frame = False
    for x in range(1, frame_count):
        ###################################
        # Loop-iteration pre-requirements #
        ###################################

        # Check if we're at the last image
        if x == frame_count - 1:
            last_frame = True

        # load the next image ahead of time.
        if not last_frame:
            background_frame_load = AsyncFrameRead(upscaled_dir + "output_" + get_lexicon_value(6, x + 1) + ".png")
            background_frame_load.start()

        #######################
        # Loop-iteration Core #
        #######################

        logger.info("Upscaling frame " + str(x))

        prediction_data_list = get_list_from_file(pframe_data_dir + "pframe_" + str(x) + ".txt")
        residual_data_list = get_list_from_file(residual_data_dir + "residual_" + str(x) + ".txt")
        correction_data_list = get_list_from_file(correction_data_dir + "correction_" + str(x) + ".txt")
        fade_data_list = get_list_from_file(fade_data_dir + "fade_" + str(x) + ".txt")

        frame_next = make_merge_image(context, f1, frame_previous,
                                      prediction_data_list, residual_data_list,
                                      correction_data_list, fade_data_list)

        if not ffmpeg_pipe_encoding:  # ffmpeg piping is disabled, traditional way

            # Write the image in the background for the preformance increase
            output_file_merged = workspace + "merged/merged_" + str(x + 1) + extension_type
            background_frame_write = AsyncFrameWrite(frame_next, output_file_merged)
            background_frame_write.start()

        else:  # ffmpeg piping is enabled

            # Write the image directly into ffmpeg pipe
            im = frame_next.get_pil_image()
            im.save(ffmpegpipe.stdin, format=pipe_format, quality=95)

        #######################################
        # Assign variables for next iteration #
        #######################################

        if not last_frame:
            while not background_frame_load.load_complete:
                wait_on_file(upscaled_dir + "output_" + get_lexicon_value(6, x + 1) + ".png")

            f1 = background_frame_load.loaded_image

        frame_previous = frame_next

        # Ensure the file is loaded for background_frame_load. If we're on the last frame, simply ignore this section
        # Because the frame_count + 1 does not exist.

    if ffmpeg_pipe_encoding:
        ffmpegpipe.stdin.close()
        ffmpegpipe.wait()

        # add the original file audio to the nosound file
        migrate_tracks(context, nosound_file, input_file, output_file)
Beispiel #16
0
    def run(self):

        self.pipe.start_pipe_thread()
        # Load the genesis image + the first upscaled image.
        frame_previous = Frame()
        frame_previous.load_from_string_wait(
            self.merged_dir + "merged_" + str(self.start_frame) +
            self.extension_type, self.cancel_token)

        self.pipe.save(frame_previous)

        f1 = Frame()
        f1.load_from_string_wait(
            self.upscaled_dir + "output_" +
            get_lexicon_value(6, self.start_frame) + ".png", self.cancel_token)

        last_frame = False
        for x in range(self.start_frame, self.frame_count):

            ###################################
            # Loop-iteration pre-requirements #
            ###################################

            # Check if we're at the last image, which affects the behaviour of the loop.
            if x == self.frame_count - 1:
                last_frame = True

            # Pre-load the next iteration of the loop image ahead of time, if we're not on the last frame.
            if not last_frame:
                background_frame_load = AsyncFrameRead(
                    self.upscaled_dir + "output_" +
                    get_lexicon_value(6, x + 1) + ".png", self.cancel_token)
                background_frame_load.start()

            #######################
            # Loop-iteration Core #
            #######################

            # Load the needed vectors to create the merged image.
            prediction_data_list = get_list_from_file_wait(
                self.pframe_data_dir + "pframe_" + str(x) + ".txt",
                self.cancel_token)
            residual_data_list = get_list_from_file_wait(
                self.residual_data_dir + "residual_" + str(x) + ".txt",
                self.cancel_token)
            correction_data_list = get_list_from_file_wait(
                self.correction_data_dir + "correction_" + str(x) + ".txt",
                self.cancel_token)
            fade_data_list = get_list_from_file_wait(
                self.fade_data_dir + "fade_" + str(x) + ".txt",
                self.cancel_token)

            if not self.alive:
                self.logger.info("Merge.py killed at frame " + str(x))
                return

            self.logger.info("Upscaling frame " + str(x))
            # Create the actual image itself.
            frame_next = self.make_merge_image(
                self.context, f1, frame_previous, prediction_data_list,
                residual_data_list, correction_data_list, fade_data_list)

            ###############
            # Saving Area #
            ###############

            # Directly write the image to the ffmpeg pipe line.
            self.pipe.save(frame_next)

            # Manually write the image if we're preserving frames (this is for enthusiasts / debugging).
            if self.preserve_frames:
                output_file = self.workspace + "merged/merged_" + str(
                    x + 1) + self.extension_type
                background_frame_write = AsyncFrameWrite(
                    frame_next, output_file)
                background_frame_write.start()

            #######################################
            # Assign variables for next iteration #
            #######################################

            # last_frame + 1 does not exist, so don't load.
            if not last_frame:
                # We need to wait until the next upscaled image exists before we move on.
                while not background_frame_load.load_complete:
                    wait_on_file(
                        self.upscaled_dir + "output_" +
                        get_lexicon_value(6, x + 1) + ".png",
                        self.cancel_token)

                f1 = background_frame_load.loaded_image

            frame_previous = frame_next

            # Signal to the rest of the dandere2x process we've finished upscaling frame 'x'.
            self.context.signal_merged_count = x

        self.pipe.wait_finish_stop_pipe()
Beispiel #17
0
def merge_loop(context: Context):
    """
    Call the 'make_merge_image' method for every image that needs to be upscaled.

    This method is sort of the driver for that, and has tasks needed to keep merging running smoothly.

    This method became a bit messy due to optimization-hunting, but the most important calls of the loop can be read in
    the 'Loop-iteration Core' area.

    Method Tasks:

        - Read / Write files that are used by merge asynchronously.
        - Load the text files containing the vectors needed for 'make_merge_image'

    """

    # load variables from context
    workspace = context.workspace
    upscaled_dir = context.residual_upscaled_dir
    merged_dir = context.merged_dir
    residual_data_dir = context.residual_data_dir
    pframe_data_dir = context.pframe_data_dir
    correction_data_dir = context.correction_data_dir
    fade_data_dir = context.fade_data_dir
    frame_count = context.frame_count
    extension_type = context.extension_type
    logger = logging.getLogger(__name__)

    # Load the genesis image + the first upscaled image.
    frame_previous = Frame()
    frame_previous.load_from_string_wait(merged_dir + "merged_" + str(1) +
                                         extension_type)

    f1 = Frame()
    f1.load_from_string_wait(upscaled_dir + "output_" +
                             get_lexicon_value(6, 1) + ".png")

    # When upscaling every frame between start_frame to frame_count, there's obviously no x + 1 at frame_count - 1 .
    # So just make a small note not to load that image. Pretty much load images concurrently until we get to x - 1
    last_frame = False
    for x in range(1, frame_count):
        ###################################
        # Loop-iteration pre-requirements #
        ###################################

        # Check if we're at the last image
        if x == frame_count - 1:
            last_frame = True

        # load the next image ahead of time.
        if not last_frame:
            background_frame_load = AsyncFrameRead(upscaled_dir + "output_" +
                                                   get_lexicon_value(6, x +
                                                                     1) +
                                                   ".png")
            background_frame_load.start()

        #######################
        # Loop-iteration Core #
        #######################

        logger.info("Upscaling frame " + str(x))

        prediction_data_list = get_list_from_file(pframe_data_dir + "pframe_" +
                                                  str(x) + ".txt")
        residual_data_list = get_list_from_file(residual_data_dir +
                                                "residual_" + str(x) + ".txt")
        correction_data_list = get_list_from_file(correction_data_dir +
                                                  "correction_" + str(x) +
                                                  ".txt")
        fade_data_list = get_list_from_file(fade_data_dir + "fade_" + str(x) +
                                            ".txt")

        output_file = workspace + "merged/merged_" + str(x +
                                                         1) + extension_type

        frame_next = make_merge_image(context, f1, frame_previous,
                                      prediction_data_list, residual_data_list,
                                      correction_data_list, fade_data_list)

        # Write the image in the background for the preformance increase
        background_frame_write = AsyncFrameWrite(frame_next, output_file)
        background_frame_write.start()

        #######################################
        # Assign variables for next iteration #
        #######################################

        # Ensure the file is loaded for background_frame_load. If we're on the last frame, simply ignore this section
        # Because the frame_count + 1 does not exist.
        if not last_frame:
            while not background_frame_load.load_complete:
                wait_on_file(upscaled_dir + "output_" +
                             get_lexicon_value(6, x + 1) + ".png")

            f1 = background_frame_load.loaded_image

        frame_previous = frame_next
def run_realtime_encoding(context: Context, output_file: str):
    logger = context.logger
    logger.info("Real time encoding process started")

    # Load context
    workspace = context.workspace
    frames_per_video = int(context.frame_rate *
                           context.realtime_encoding_seconds_per_video)
    frame_count = int(context.frame_count)
    realtime_encoding_delete_files = context.realtime_encoding_delete_files
    extension_type = context.extension_type
    input_file = context.input_file

    # directories
    merged_files_prefix = context.merged_dir + "merged_"
    upscaled_files_prefix = context.upscaled_dir + "output_"
    compressed_files_prefix = context.compressed_static_dir + "compressed_"
    input_frames_prefix = context.input_frames_dir + "frame"

    # Create an encoded every frame_rate seconds.
    for x in range(0, int(frame_count / frames_per_video)):
        text_file = open(
            workspace + "encoded" + os.path.sep + "list.txt",
            'a+')  # text file for ffmpeg to use to concat vids together
        encoded_vid = workspace + "encoded" + os.path.sep + "encoded_" + str(
            x) + ".mkv"

        if file_exists(encoded_vid):
            logger.info(encoded_vid + " already exists: skipping iteration")
            continue

        wait_on_file(merged_files_prefix + str(x * frames_per_video + 1) +
                     extension_type)
        wait_on_file(merged_files_prefix +
                     str(x * frames_per_video + frames_per_video) +
                     extension_type)

        # create a video for frames in this section
        create_video_from_specific_frames(context, merged_files_prefix,
                                          encoded_vid,
                                          x * frames_per_video + 1,
                                          frames_per_video)

        # ensure ffmpeg video exists before deleting files
        wait_on_file(encoded_vid)

        # write to text file video for ffmpeg to concat vids with
        text_file.write("file " + "'" + encoded_vid + "'" + "\n")

        # put files to delete inside of here.
        if realtime_encoding_delete_files:
            delete_digit_files_in_range(
                context, merged_files_prefix, extension_type, 0,
                x * frames_per_video + 1,
                x * frames_per_video + frames_per_video + 1)

            delete_digit_files_in_range(
                context, compressed_files_prefix, extension_type, 0,
                x * frames_per_video + 1,
                x * frames_per_video + frames_per_video + 1)

            delete_digit_files_in_range(
                context, input_frames_prefix, extension_type, 0,
                x * frames_per_video + 1,
                x * frames_per_video + frames_per_video + 1)

            # upscaled files end on a different number than merged files.
            if x == int(frame_count / frames_per_video) - 1:

                wait_on_file(upscaled_files_prefix +
                             get_lexicon_value(6, x * frames_per_video + 1) +
                             ".png")
                wait_on_file(upscaled_files_prefix + get_lexicon_value(
                    6, x * frames_per_video + frames_per_video) + ".png")

                delete_digit_files_in_range(
                    context, upscaled_files_prefix, ".png", 6,
                    x * frames_per_video + 1,
                    x * frames_per_video + frames_per_video)

            else:

                wait_on_file(upscaled_files_prefix +
                             get_lexicon_value(6, x * frames_per_video + 1) +
                             ".png")
                wait_on_file(upscaled_files_prefix + get_lexicon_value(
                    6, x * frames_per_video + frames_per_video + 1) + ".png")

                delete_digit_files_in_range(
                    context, upscaled_files_prefix, ".png", 6,
                    x * frames_per_video + 1,
                    x * frames_per_video + frames_per_video + 1)

    # Because we divided the video into int(frame_count / frames_per_video) videos, and
    # int(frame_count / frames_per_video) != frame_count / frames_per_video, there's still frames that are left out.
    # We need to now encode those separately

    if frame_count - int(
            frame_count / frames_per_video) * frames_per_video > 0:
        print("got in here")
        x = int(frame_count / frames_per_video)
        encoded_vid = workspace + "encoded" + os.path.sep + "encoded_" + str(
            x) + ".mkv"

        wait_on_file(merged_files_prefix + str(x * frames_per_video + 1) +
                     extension_type)
        wait_on_file(merged_files_prefix +
                     str(frame_count - x * frames_per_video +
                         frames_per_video) + extension_type)

        # create a video for frames in this section
        create_video_from_specific_frames(context, merged_files_prefix,
                                          encoded_vid,
                                          x * frames_per_video + 1,
                                          frames_per_video)

        # ensure ffmpeg video exists before deleting files
        wait_on_file(encoded_vid)

        # write to text file video for ffmpeg to concat vids with
        text_file.write("file " + "'" + encoded_vid + "'" + "\n")

    text_file.close()

    concat_encoded_vids(context, workspace + "nosound.mkv")
    migrate_tracks(context, workspace + "nosound.mkv", input_file, output_file)
Beispiel #19
0
def merge_loop(context: Context, start_frame: int):
    # load variables from context
    workspace = context.workspace
    upscaled_dir = context.upscaled_dir
    merged_dir = context.merged_dir
    inversion_data_dir = context.inversion_data_dir
    pframe_data_dir = context.pframe_data_dir
    correction_data_dir = context.correction_data_dir
    fade_data_dir = context.fade_data_dir
    frame_count = context.frame_count
    extension_type = context.extension_type
    logger = logging.getLogger(__name__)

    # Load the genesis image + the first upscaled image.

    base = Frame()
    base.load_from_string_wait(merged_dir + "merged_" + str(start_frame) +
                               extension_type)

    f1 = Frame()
    f1.load_from_string_wait(upscaled_dir + "output_" +
                             get_lexicon_value(6, 1) + ".png")

    # When upscaling every frame between start_frame to frame_count, there's obviously no x + 1 at frame_count - 1 .
    # So just make a small note not to load that image. Pretty much load images concurrently until we get to x - 1
    last_frame = False
    for x in range(start_frame, frame_count):
        logger.info("Upscaling frame " + str(x))

        # Check if we're at the last image
        if x == frame_count - 1:
            last_frame = True

        # load the next image ahead of time.
        if not last_frame:
            background_frame_load = AsyncFrameRead(upscaled_dir + "output_" +
                                                   get_lexicon_value(6, x +
                                                                     1) +
                                                   ".png")
            background_frame_load.start()

        # load vectors needed to piece image back together
        prediction_data_list = get_list_from_file(pframe_data_dir + "pframe_" +
                                                  str(x) + ".txt")
        difference_data_list = get_list_from_file(inversion_data_dir +
                                                  "inversion_" + str(x) +
                                                  ".txt")
        correction_data_list = get_list_from_file(correction_data_dir +
                                                  "correction_" + str(x) +
                                                  ".txt")
        fade_data_list = get_list_from_file(fade_data_dir + "fade_" + str(x) +
                                            ".txt")

        output_file = workspace + "merged/merged_" + str(x +
                                                         1) + extension_type

        new_base = make_merge_image(context, f1, base, prediction_data_list,
                                    difference_data_list, correction_data_list,
                                    fade_data_list)

        # Write the image in the background for the preformance increase
        background_frame_write = AsyncFrameWrite(new_base, output_file)
        background_frame_write.start()

        # Assign variables for next iteration

        # Ensure the file is loaded for background_frame_load. If we're on the last frame, simply ignore this section
        # Because the frame_count + 1 does not exist.
        if not last_frame:
            while not background_frame_load.load_complete:
                wait_on_file(upscaled_dir + "output_" +
                             get_lexicon_value(6, x + 1) + ".png")

            f1 = background_frame_load.loaded_image

        base = new_base