def _successful_completion(self): """ This is called when Dandere2x 'finishes' successfully, and the finishing conditions (such as making sure subtitles get migrated, merging videos (if necessary), and deleting the workspace. """ self.log.info("It seems Dandere2x has finished successfully. Starting the final steps to complete your video.") if self.context.resume_session: """ In the event if Dandere2x is resuming a session, it'll need to merge `incomplete_video` (see yaml) with the current session's video, in order to make a complete video. """ self.log.info("This session is a resume session. Dandere2x will need to merge the two videos. ") file_to_be_concat = self.context.workspace + "file_to_be_concat.mp4" rename_file(self.context.nosound_file, file_to_be_concat) concat_two_videos(self.context, self.context.incomplete_video, file_to_be_concat, self.context.nosound_file) self.log.info("Merging the two videos is done. ") migrate_tracks(self.context, self.context.nosound_file, self.context.sound_file, self.context.output_file) """ At this point, dandere2x is "done" with the video, and all there is left is to clean up the files we used during runtime. """ # Close the file handler for log (since it's going to get deleted). self.log.info("Release log file... ") self.fh.close() self.log.removeHandler(self.fh) del self.fh if self.context.delete_workspace_after: self.log.info("Dandere2x will now delete the workspace it used.") force_delete_directory(self.context.workspace)
def _pre_processing(self): """ This MUST be the first thing `run` calls, or else dandere2x.py will not work! Description: This function is a series of instructions dandere2x MUST perform before the main threads are able to be called, and serves as a preliminary "health checker" for dandere2x to diagnose bugs before the main threads are called. """ self.log.info("Beginning pre-processing stage.") self.log.info("Dandere2x will process your video in a way that attempts to remove ambiguities caused by" " container formats.") force_delete_directory(self.context.workspace) try: self.context.load_video_settings_ffprobe(file=self.context.input_file) except FileNotFoundError as e: from sys import exit self.log.error("Caught FileNotFoundError. This is likeley caused by 'externals' missing a neccecary file.") self.log.error("Are you sure you hit the 'download externals' button?") exit(1) if not valid_input_resolution(self.context.width, self.context.height, self.context.block_size): """ Dandere2x needs the width and height to be a share a common factor with the block size so append a video filter if needed to make the size conform. For example, 1921x1081 is not evenly divisalbe by 30, so we'd need to resize the video in that scenario. """ self.log.warning( "Input video needs to be resized to be compatible with block-size - this is expected behaviour.") append_video_resize_filter(self.context) create_directories(self.context.workspace, self.context.directories) self.set_file_logger(self.context.workspace + "log.txt") # write to a log file in the workspace self.waifu2x.verify_upscaling_works() """ Re-encode the user input video. We do this because file container formats can be difficult to work with and can cause Dandere2x to not function properly (some videos resolutions are different, variable frame rate will cause video to have black spots, etc. """ workspace = self.context.workspace input_file = self.context.input_file unmigrated = workspace + "d2x_input_video_nonmigrated.mkv" pre_processed_video = self.context.pre_processed_video # have dandere2x load up the pre-processed video and re-assign video settings to use that instead re_encode_video(self.context, input_file, unmigrated, throw_exception=True) migrate_tracks(self.context, unmigrated, input_file, pre_processed_video, copy_if_failed=True) os.remove(unmigrated) wait_on_file(pre_processed_video, controller=self.context.controller) self.context.load_pre_processed_video(file=pre_processed_video)
def join(self, timeout=None): start = time.time() # for printing out total runtime logging.info("dandere2x joined called") # due to a weird quirk, prevent dandere2x from being joined until nosound.mkv exists (at least). wait_on_file(self.context.nosound_file) logging.info("joining residual") self.residual_thread.join() if self.context.use_min_disk: logging.info("joining min disk demon") self.min_disk_demon.join() logging.info("joining merge") self.merge_thread.join() logging.info("joining waifu2x") self.waifu2x.join() logging.info("joining dandere2x") self.dandere2x_cpp_thread.join() logging.info("joining status") self.status_thread.join() logging.info("joining compress") self.compress_frames_thread.join() self.context.logger.info("All threaded processes have finished") print("All threaded processes have been finished") if self.resume_session: print("Session is a resume session, concatenating two videos") logging.info( "Session is a resume session, concatenating two videos") file_to_be_concat = self.context.workspace + "file_to_be_concat.mp4" rename_file(self.context.nosound_file, file_to_be_concat) concat_two_videos( self.context, self.context.config_yaml['resume_settings']['nosound_file'], file_to_be_concat, self.context.nosound_file) # if this became a suspended dandere2x session, kill it. if not self.alive: logging.info("Invoking suspend exit conditions") self.__suspend_exit_conditions() elif self.alive: logging.info("Migrating tracks") migrate_tracks(self.context, self.context.nosound_file, self.context.sound_file, self.context.output_file) print("Total runtime : ", time.time() - start)
def _successful_completion(self): if self.context.resume_session: print("Session is a resume session, concatenating two videos") file_to_be_concat = self.context.workspace + "file_to_be_concat.mp4" rename_file(self.context.nosound_file, file_to_be_concat) concat_two_videos(self.context, self.context.incomplete_video, file_to_be_concat, self.context.nosound_file) migrate_tracks(self.context, self.context.nosound_file, self.context.sound_file, self.context.output_file) if self.context.delete_workspace_after: force_delete_directory(self.context.workspace)
def _pre_processing(self): """ This MUST be the first thing `run` calls, or else dandere2x.py will not work! """ force_delete_directory(self.context.workspace) self.context.load_video_settings(file=self.context.input_file) """ Dandere2x needs the width and height to be a share a common factor with the block size so append a video filter if needed to make the size conform. """ if not valid_input_resolution(self.context.width, self.context.height, self.context.block_size): append_video_resize_filter(self.context) create_directories(self.context.workspace, self.context.directories) self.waifu2x.verify_upscaling_works() """ Re-encode the user input video. We do this because file container formats can be difficult to work with and can cause Dandere2x to not function properly (some videos resolutions are different, variable frame rate will cause video to have black spots, etc. """ workspace = self.context.workspace input_file = self.context.input_file unmigrated = workspace + "d2x_input_video_nonmigrated.mkv" pre_processed_video = self.context.pre_processed_video re_encode_video(self.context, input_file, unmigrated, throw_exception=True) migrate_tracks(self.context, unmigrated, input_file, pre_processed_video, copy_if_failed=True) os.remove(unmigrated) wait_on_file_controller(pre_processed_video, controller=self.context.controller) self.context.load_video_settings(file=pre_processed_video)
def run_realtime_encoding(context: Context, output_file: str): logger = context.logger logger.info("Real time encoding process started") # Load context workspace = context.workspace frames_per_video = int(context.frame_rate * context.realtime_encoding_seconds_per_video) frame_count = int(context.frame_count) realtime_encoding_delete_files = context.realtime_encoding_delete_files extension_type = context.extension_type input_file = context.input_file # directories merged_files_prefix = context.merged_dir + "merged_" upscaled_files_prefix = context.upscaled_dir + "output_" compressed_files_prefix = context.compressed_static_dir + "compressed_" input_frames_prefix = context.input_frames_dir + "frame" # Create an encoded every frame_rate seconds. for x in range(0, int(frame_count / frames_per_video)): text_file = open( workspace + "encoded" + os.path.sep + "list.txt", 'a+') # text file for ffmpeg to use to concat vids together encoded_vid = workspace + "encoded" + os.path.sep + "encoded_" + str( x) + ".mkv" if file_exists(encoded_vid): logger.info(encoded_vid + " already exists: skipping iteration") continue wait_on_file(merged_files_prefix + str(x * frames_per_video + 1) + extension_type) wait_on_file(merged_files_prefix + str(x * frames_per_video + frames_per_video) + extension_type) # create a video for frames in this section create_video_from_specific_frames(context, merged_files_prefix, encoded_vid, x * frames_per_video + 1, frames_per_video) # ensure ffmpeg video exists before deleting files wait_on_file(encoded_vid) # write to text file video for ffmpeg to concat vids with text_file.write("file " + "'" + encoded_vid + "'" + "\n") # put files to delete inside of here. if realtime_encoding_delete_files: delete_digit_files_in_range( context, merged_files_prefix, extension_type, 0, x * frames_per_video + 1, x * frames_per_video + frames_per_video + 1) delete_digit_files_in_range( context, compressed_files_prefix, extension_type, 0, x * frames_per_video + 1, x * frames_per_video + frames_per_video + 1) delete_digit_files_in_range( context, input_frames_prefix, extension_type, 0, x * frames_per_video + 1, x * frames_per_video + frames_per_video + 1) # upscaled files end on a different number than merged files. if x == int(frame_count / frames_per_video) - 1: wait_on_file(upscaled_files_prefix + get_lexicon_value(6, x * frames_per_video + 1) + ".png") wait_on_file(upscaled_files_prefix + get_lexicon_value( 6, x * frames_per_video + frames_per_video) + ".png") delete_digit_files_in_range( context, upscaled_files_prefix, ".png", 6, x * frames_per_video + 1, x * frames_per_video + frames_per_video) else: wait_on_file(upscaled_files_prefix + get_lexicon_value(6, x * frames_per_video + 1) + ".png") wait_on_file(upscaled_files_prefix + get_lexicon_value( 6, x * frames_per_video + frames_per_video + 1) + ".png") delete_digit_files_in_range( context, upscaled_files_prefix, ".png", 6, x * frames_per_video + 1, x * frames_per_video + frames_per_video + 1) # Because we divided the video into int(frame_count / frames_per_video) videos, and # int(frame_count / frames_per_video) != frame_count / frames_per_video, there's still frames that are left out. # We need to now encode those separately if frame_count - int( frame_count / frames_per_video) * frames_per_video > 0: print("got in here") x = int(frame_count / frames_per_video) encoded_vid = workspace + "encoded" + os.path.sep + "encoded_" + str( x) + ".mkv" wait_on_file(merged_files_prefix + str(x * frames_per_video + 1) + extension_type) wait_on_file(merged_files_prefix + str(frame_count - x * frames_per_video + frames_per_video) + extension_type) # create a video for frames in this section create_video_from_specific_frames(context, merged_files_prefix, encoded_vid, x * frames_per_video + 1, frames_per_video) # ensure ffmpeg video exists before deleting files wait_on_file(encoded_vid) # write to text file video for ffmpeg to concat vids with text_file.write("file " + "'" + encoded_vid + "'" + "\n") text_file.close() concat_encoded_vids(context, workspace + "nosound.mkv") migrate_tracks(context, workspace + "nosound.mkv", input_file, output_file)
time.sleep(0.1) print("-------------------------------------------------") log.warning( "Please verify that %s is your complete upscaled video, just has no audio" % nosound_file) time.sleep(0.1) input("Press Enter to continue...") output_extension = os.path.splitext(nosound_file)[1] output_file = dandere2x.context.workspace + "outputfile" + output_extension log.info("We will now begin to try to manually migrate the tracks... standby") log.info("Output video will be at %s " % output_file) migrate_tracks(context=dandere2x.context, no_audio=nosound_file, file_dir=pre_processed_file, output_file=output_file) if file_exists(output_file): log.info("It seems migration succeeded? Check %s to see if it finished." % output_file) else: log.warning( "It seems the file is not there.. this is indicative of a migration failure somewhere" ) log.warning( "You can try migrating yourself (above you should see an output called 'Migrate Command:' or something" ) log.warning( "From ffmmpeg.py, and you can try changing the flags until it migrates correctly, but tbh beyond that" )
def merge_loop(context: Context): """ Call the 'make_merge_image' method for every image that needs to be upscaled. This method is sort of the driver for that, and has tasks needed to keep merging running smoothly. This method became a bit messy due to optimization-hunting, but the most important calls of the loop can be read in the 'Loop-iteration Core' area. Method Tasks: - Read / Write files that are used by merge asynchronously. - Load the text files containing the vectors needed for 'make_merge_image' """ # load variables from context workspace = context.workspace upscaled_dir = context.residual_upscaled_dir merged_dir = context.merged_dir residual_data_dir = context.residual_data_dir pframe_data_dir = context.pframe_data_dir correction_data_dir = context.correction_data_dir fade_data_dir = context.fade_data_dir frame_count = context.frame_count extension_type = context.extension_type logger = logging.getLogger(__name__) # # # ffmpeg piping stuff # # # ffmpeg_pipe_encoding = context.ffmpeg_pipe_encoding if ffmpeg_pipe_encoding: nosound_file = context.nosound_file frame_rate = str(context.frame_rate) input_file = context.input_file output_file = context.output_file ffmpeg_dir = context.ffmpeg_dir ffmpeg_pipe_encoding_type = context.ffmpeg_pipe_encoding_type if ffmpeg_pipe_encoding_type in ["jpeg", "jpg"]: vcodec = "mjpeg" pipe_format = "JPEG" elif ffmpeg_pipe_encoding_type == "png": vcodec = "png" pipe_format = "PNG" else: print(" Error: no valid ffmpeg_pipe_encoding_type set. Using jpeg as default") vcodec = "mjpeg" pipe_format = "JPEG" print("\n WARNING: EXPERIMENTAL FFMPEG PIPING IS ENABLED\n") ffmpegpipe = subprocess.Popen([ffmpeg_dir, "-loglevel", "panic", '-y', '-f', 'image2pipe', '-vcodec', vcodec, '-r', frame_rate, '-i', '-', '-vcodec', 'libx264', '-preset', 'medium', '-qscale', '5', '-crf', '17', '-vf', ' pp=hb/vb/dr/fq|32, deband=range=22:blur=false', '-r', frame_rate, nosound_file], stdin=subprocess.PIPE) # pipe the first merged image as it will not be done afterwards wait_on_file(merged_dir + "merged_" + str(1) + extension_type) im = Image.open(merged_dir + "merged_" + str(1) + extension_type) # best jpeg quality since we won't be saving up disk space im.save(ffmpegpipe.stdin, format=pipe_format, quality=100) # # # # # # # # # # # # # Load the genesis image + the first upscaled image. frame_previous = Frame() frame_previous.load_from_string_wait(merged_dir + "merged_" + str(1) + extension_type) f1 = Frame() f1.load_from_string_wait(upscaled_dir + "output_" + get_lexicon_value(6, 1) + ".png") # When upscaling every frame between start_frame to frame_count, there's obviously no x + 1 at frame_count - 1 . # So just make a small note not to load that image. Pretty much load images concurrently until we get to x - 1 last_frame = False for x in range(1, frame_count): ################################### # Loop-iteration pre-requirements # ################################### # Check if we're at the last image if x == frame_count - 1: last_frame = True # load the next image ahead of time. if not last_frame: background_frame_load = AsyncFrameRead(upscaled_dir + "output_" + get_lexicon_value(6, x + 1) + ".png") background_frame_load.start() ####################### # Loop-iteration Core # ####################### logger.info("Upscaling frame " + str(x)) prediction_data_list = get_list_from_file(pframe_data_dir + "pframe_" + str(x) + ".txt") residual_data_list = get_list_from_file(residual_data_dir + "residual_" + str(x) + ".txt") correction_data_list = get_list_from_file(correction_data_dir + "correction_" + str(x) + ".txt") fade_data_list = get_list_from_file(fade_data_dir + "fade_" + str(x) + ".txt") frame_next = make_merge_image(context, f1, frame_previous, prediction_data_list, residual_data_list, correction_data_list, fade_data_list) if not ffmpeg_pipe_encoding: # ffmpeg piping is disabled, traditional way # Write the image in the background for the preformance increase output_file_merged = workspace + "merged/merged_" + str(x + 1) + extension_type background_frame_write = AsyncFrameWrite(frame_next, output_file_merged) background_frame_write.start() else: # ffmpeg piping is enabled # Write the image directly into ffmpeg pipe im = frame_next.get_pil_image() im.save(ffmpegpipe.stdin, format=pipe_format, quality=95) ####################################### # Assign variables for next iteration # ####################################### if not last_frame: while not background_frame_load.load_complete: wait_on_file(upscaled_dir + "output_" + get_lexicon_value(6, x + 1) + ".png") f1 = background_frame_load.loaded_image frame_previous = frame_next # Ensure the file is loaded for background_frame_load. If we're on the last frame, simply ignore this section # Because the frame_count + 1 does not exist. if ffmpeg_pipe_encoding: ffmpegpipe.stdin.close() ffmpegpipe.wait() # add the original file audio to the nosound file migrate_tracks(context, nosound_file, input_file, output_file)