def residual_loop(context): """ Call the 'make_residual_image' method for every image that needs to be made into a residual. Method Tasks: - Load and wait for the files needed to create a residual image. - Call 'make_residual_image' once the needed files exist """ # load variables from context workspace = context.workspace residual_images_dir = context.residual_images_dir residual_data_dir = context.residual_data_dir pframe_data_dir = context.pframe_data_dir input_frames_dir = context.input_frames_dir frame_count = context.frame_count block_size = context.block_size extension_type = context.extension_type debug_dir = context.debug_dir debug = context.debug temp_image = context.temp_image_folder + "tempimage.jpg" logger = logging.getLogger(__name__) logger.info((workspace, 1, frame_count, block_size)) # for every frame in the video, create a residual_frame given the text files. for x in range(1, frame_count): f1 = Frame() f1.load_from_string_wait(input_frames_dir + "frame" + str(x + 1) + extension_type) # Load the neccecary lists to compute this iteration of residual making residual_data = get_list_from_file(residual_data_dir + "residual_" + str(x) + ".txt") prediction_data = get_list_from_file(pframe_data_dir + "pframe_" + str(x) + ".txt") # Create the output files.. debug_output_file = debug_dir + "debug" + str(x + 1) + extension_type output_file = residual_images_dir + "output_" + get_lexicon_value( 6, x) + ".jpg" # Save to a temp folder so waifu2x-vulkan doesn't try reading it, then move it out_image = make_residual_image(context, f1, residual_data, prediction_data) out_image.save_image_temp(output_file, temp_image) if debug == 1: debug_image(block_size, f1, prediction_data, residual_data, debug_output_file)
def main(): block_size = 4 scale_factor = 2 frame_base = Frame() frame_base.load_from_string("C:\\Users\\windwoz\\Desktop\\image_research\\shelter\\merged2x.jpg") list_predictive = get_list_from_file("C:\\Users\\windwoz\\Desktop\\image_research\\shelter\\correction.txt") out_location = ("C:\\Users\\windwoz\\Desktop\\image_research\\shelter\\new_correction.jpg") correct_image(block_size, scale_factor, frame_base, list_predictive, out_location)
def difference_loop(context, start_frame: int): # load variables from context workspace = context.workspace differences_dir = context.differences_dir inversion_data_dir = context.inversion_data_dir pframe_data_dir = context.pframe_data_dir input_frames_dir = context.input_frames_dir frame_count = context.frame_count block_size = context.block_size extension_type = context.extension_type debug = context.debug temp_image = context.temp_image_folder + "tempimage.jpg" logger = logging.getLogger(__name__) logger.info((workspace, start_frame, frame_count, block_size)) # for every frame in the video, create a difference_frame given the text files. for x in range(start_frame, frame_count): f1 = Frame() f1.load_from_string_wait(input_frames_dir + "frame" + str(x + 1) + extension_type) # Load the neccecary lists to compute this iteration of difference making difference_data = get_list_from_file(inversion_data_dir + "inversion_" + str(x) + ".txt") prediction_data = get_list_from_file(pframe_data_dir + "pframe_" + str(x) + ".txt") # Create the output files.. debug_output_file = workspace + "debug/debug" + str(x + 1) + extension_type output_file = differences_dir + "output_" + get_lexicon_value( 6, x) + ".jpg" # Save to a temp folder so waifu2x-vulkan doesn't try reading it, then move it out_image = make_difference_image(context, f1, difference_data, prediction_data) out_image.save_image_temp(output_file, temp_image) if debug == 1: debug_image(block_size, f1, prediction_data, difference_data, debug_output_file)
def merge_loop(context: Context): """ Call the 'make_merge_image' method for every image that needs to be upscaled. This method is sort of the driver for that, and has tasks needed to keep merging running smoothly. This method became a bit messy due to optimization-hunting, but the most important calls of the loop can be read in the 'Loop-iteration Core' area. Method Tasks: - Read / Write files that are used by merge asynchronously. - Load the text files containing the vectors needed for 'make_merge_image' """ # load variables from context workspace = context.workspace upscaled_dir = context.residual_upscaled_dir merged_dir = context.merged_dir residual_data_dir = context.residual_data_dir pframe_data_dir = context.pframe_data_dir correction_data_dir = context.correction_data_dir fade_data_dir = context.fade_data_dir frame_count = context.frame_count extension_type = context.extension_type logger = logging.getLogger(__name__) # # # ffmpeg piping stuff # # # ffmpeg_pipe_encoding = context.ffmpeg_pipe_encoding if ffmpeg_pipe_encoding: nosound_file = context.nosound_file frame_rate = str(context.frame_rate) input_file = context.input_file output_file = context.output_file ffmpeg_dir = context.ffmpeg_dir ffmpeg_pipe_encoding_type = context.ffmpeg_pipe_encoding_type if ffmpeg_pipe_encoding_type in ["jpeg", "jpg"]: vcodec = "mjpeg" pipe_format = "JPEG" elif ffmpeg_pipe_encoding_type == "png": vcodec = "png" pipe_format = "PNG" else: print(" Error: no valid ffmpeg_pipe_encoding_type set. Using jpeg as default") vcodec = "mjpeg" pipe_format = "JPEG" print("\n WARNING: EXPERIMENTAL FFMPEG PIPING IS ENABLED\n") ffmpegpipe = subprocess.Popen([ffmpeg_dir, "-loglevel", "panic", '-y', '-f', 'image2pipe', '-vcodec', vcodec, '-r', frame_rate, '-i', '-', '-vcodec', 'libx264', '-preset', 'medium', '-qscale', '5', '-crf', '17', '-vf', ' pp=hb/vb/dr/fq|32, deband=range=22:blur=false', '-r', frame_rate, nosound_file], stdin=subprocess.PIPE) # pipe the first merged image as it will not be done afterwards wait_on_file(merged_dir + "merged_" + str(1) + extension_type) im = Image.open(merged_dir + "merged_" + str(1) + extension_type) # best jpeg quality since we won't be saving up disk space im.save(ffmpegpipe.stdin, format=pipe_format, quality=100) # # # # # # # # # # # # # Load the genesis image + the first upscaled image. frame_previous = Frame() frame_previous.load_from_string_wait(merged_dir + "merged_" + str(1) + extension_type) f1 = Frame() f1.load_from_string_wait(upscaled_dir + "output_" + get_lexicon_value(6, 1) + ".png") # When upscaling every frame between start_frame to frame_count, there's obviously no x + 1 at frame_count - 1 . # So just make a small note not to load that image. Pretty much load images concurrently until we get to x - 1 last_frame = False for x in range(1, frame_count): ################################### # Loop-iteration pre-requirements # ################################### # Check if we're at the last image if x == frame_count - 1: last_frame = True # load the next image ahead of time. if not last_frame: background_frame_load = AsyncFrameRead(upscaled_dir + "output_" + get_lexicon_value(6, x + 1) + ".png") background_frame_load.start() ####################### # Loop-iteration Core # ####################### logger.info("Upscaling frame " + str(x)) prediction_data_list = get_list_from_file(pframe_data_dir + "pframe_" + str(x) + ".txt") residual_data_list = get_list_from_file(residual_data_dir + "residual_" + str(x) + ".txt") correction_data_list = get_list_from_file(correction_data_dir + "correction_" + str(x) + ".txt") fade_data_list = get_list_from_file(fade_data_dir + "fade_" + str(x) + ".txt") frame_next = make_merge_image(context, f1, frame_previous, prediction_data_list, residual_data_list, correction_data_list, fade_data_list) if not ffmpeg_pipe_encoding: # ffmpeg piping is disabled, traditional way # Write the image in the background for the preformance increase output_file_merged = workspace + "merged/merged_" + str(x + 1) + extension_type background_frame_write = AsyncFrameWrite(frame_next, output_file_merged) background_frame_write.start() else: # ffmpeg piping is enabled # Write the image directly into ffmpeg pipe im = frame_next.get_pil_image() im.save(ffmpegpipe.stdin, format=pipe_format, quality=95) ####################################### # Assign variables for next iteration # ####################################### if not last_frame: while not background_frame_load.load_complete: wait_on_file(upscaled_dir + "output_" + get_lexicon_value(6, x + 1) + ".png") f1 = background_frame_load.loaded_image frame_previous = frame_next # Ensure the file is loaded for background_frame_load. If we're on the last frame, simply ignore this section # Because the frame_count + 1 does not exist. if ffmpeg_pipe_encoding: ffmpegpipe.stdin.close() ffmpegpipe.wait() # add the original file audio to the nosound file migrate_tracks(context, nosound_file, input_file, output_file)
def residual_loop(context): """ Call the 'make_residual_image' method for every image that needs to be made into a residual. Method Tasks: - Load and wait for the files needed to create a residual image. - Call 'make_residual_image' once the needed files exist """ # load variables from context workspace = context.workspace residual_upscaled_dir = context.residual_upscaled_dir residual_images_dir = context.residual_images_dir residual_data_dir = context.residual_data_dir pframe_data_dir = context.pframe_data_dir input_frames_dir = context.input_frames_dir frame_count = context.frame_count block_size = context.block_size extension_type = context.extension_type debug_dir = context.debug_dir debug = context.debug temp_image = context.temp_image_folder + "tempimage.jpg" logger = logging.getLogger(__name__) logger.info((workspace, 1, frame_count, block_size)) # for every frame in the video, create a residual_frame given the text files. for x in range(1, frame_count): f1 = Frame() f1.load_from_string_wait(input_frames_dir + "frame" + str(x + 1) + extension_type) # Load the neccecary lists to compute this iteration of residual making residual_data = get_list_from_file(residual_data_dir + "residual_" + str(x) + ".txt") prediction_data = get_list_from_file(pframe_data_dir + "pframe_" + str(x) + ".txt") # Create the output files.. debug_output_file = debug_dir + "debug" + str(x + 1) + extension_type output_file = residual_images_dir + "output_" + get_lexicon_value( 6, x) + ".jpg" # Save to a temp folder so waifu2x-vulkan doesn't try reading it, then move it out_image = make_residual_image(context, f1, residual_data, prediction_data) if out_image.get_res() == (1, 1): """ If out_image is (1,1) in size, then frame_x and frame_x+1 are identical. We still need to save an outimage for sake of having N output images for N input images, so we save these meaningless files anyways. However, these 1x1 can slow whatever waifu2x implementation down, so we 'cheat' d2x but 'fake' upscaling them, so that they don't need to be processed by waifu2x. """ # Location of the 'fake' upscaled image. out_image = Frame() out_image.create_new(2, 2) output_file = residual_upscaled_dir + "output_" + get_lexicon_value( 6, x) + ".png" out_image.save_image(output_file) else: # This image has things to upscale, continue normally out_image.save_image_temp(output_file, temp_image) # With this change the wrappers must be modified to not try deleting the non existing residual file if debug == 1: debug_image(block_size, f1, prediction_data, residual_data, debug_output_file)
def merge_loop(context: Context): """ Call the 'make_merge_image' method for every image that needs to be upscaled. This method is sort of the driver for that, and has tasks needed to keep merging running smoothly. This method became a bit messy due to optimization-hunting, but the most important calls of the loop can be read in the 'Loop-iteration Core' area. Method Tasks: - Read / Write files that are used by merge asynchronously. - Load the text files containing the vectors needed for 'make_merge_image' """ # load variables from context workspace = context.workspace upscaled_dir = context.residual_upscaled_dir merged_dir = context.merged_dir residual_data_dir = context.residual_data_dir pframe_data_dir = context.pframe_data_dir correction_data_dir = context.correction_data_dir fade_data_dir = context.fade_data_dir frame_count = context.frame_count extension_type = context.extension_type logger = logging.getLogger(__name__) # Load the genesis image + the first upscaled image. frame_previous = Frame() frame_previous.load_from_string_wait(merged_dir + "merged_" + str(1) + extension_type) f1 = Frame() f1.load_from_string_wait(upscaled_dir + "output_" + get_lexicon_value(6, 1) + ".png") # When upscaling every frame between start_frame to frame_count, there's obviously no x + 1 at frame_count - 1 . # So just make a small note not to load that image. Pretty much load images concurrently until we get to x - 1 last_frame = False for x in range(1, frame_count): ################################### # Loop-iteration pre-requirements # ################################### # Check if we're at the last image if x == frame_count - 1: last_frame = True # load the next image ahead of time. if not last_frame: background_frame_load = AsyncFrameRead(upscaled_dir + "output_" + get_lexicon_value(6, x + 1) + ".png") background_frame_load.start() ####################### # Loop-iteration Core # ####################### logger.info("Upscaling frame " + str(x)) prediction_data_list = get_list_from_file(pframe_data_dir + "pframe_" + str(x) + ".txt") residual_data_list = get_list_from_file(residual_data_dir + "residual_" + str(x) + ".txt") correction_data_list = get_list_from_file(correction_data_dir + "correction_" + str(x) + ".txt") fade_data_list = get_list_from_file(fade_data_dir + "fade_" + str(x) + ".txt") output_file = workspace + "merged/merged_" + str(x + 1) + extension_type frame_next = make_merge_image(context, f1, frame_previous, prediction_data_list, residual_data_list, correction_data_list, fade_data_list) # Write the image in the background for the preformance increase background_frame_write = AsyncFrameWrite(frame_next, output_file) background_frame_write.start() ####################################### # Assign variables for next iteration # ####################################### # Ensure the file is loaded for background_frame_load. If we're on the last frame, simply ignore this section # Because the frame_count + 1 does not exist. if not last_frame: while not background_frame_load.load_complete: wait_on_file(upscaled_dir + "output_" + get_lexicon_value(6, x + 1) + ".png") f1 = background_frame_load.loaded_image frame_previous = frame_next
def merge_loop(context: Context, start_frame: int): # load variables from context workspace = context.workspace upscaled_dir = context.upscaled_dir merged_dir = context.merged_dir inversion_data_dir = context.inversion_data_dir pframe_data_dir = context.pframe_data_dir correction_data_dir = context.correction_data_dir fade_data_dir = context.fade_data_dir frame_count = context.frame_count extension_type = context.extension_type logger = logging.getLogger(__name__) # Load the genesis image + the first upscaled image. base = Frame() base.load_from_string_wait(merged_dir + "merged_" + str(start_frame) + extension_type) f1 = Frame() f1.load_from_string_wait(upscaled_dir + "output_" + get_lexicon_value(6, 1) + ".png") # When upscaling every frame between start_frame to frame_count, there's obviously no x + 1 at frame_count - 1 . # So just make a small note not to load that image. Pretty much load images concurrently until we get to x - 1 last_frame = False for x in range(start_frame, frame_count): logger.info("Upscaling frame " + str(x)) # Check if we're at the last image if x == frame_count - 1: last_frame = True # load the next image ahead of time. if not last_frame: background_frame_load = AsyncFrameRead(upscaled_dir + "output_" + get_lexicon_value(6, x + 1) + ".png") background_frame_load.start() # load vectors needed to piece image back together prediction_data_list = get_list_from_file(pframe_data_dir + "pframe_" + str(x) + ".txt") difference_data_list = get_list_from_file(inversion_data_dir + "inversion_" + str(x) + ".txt") correction_data_list = get_list_from_file(correction_data_dir + "correction_" + str(x) + ".txt") fade_data_list = get_list_from_file(fade_data_dir + "fade_" + str(x) + ".txt") output_file = workspace + "merged/merged_" + str(x + 1) + extension_type new_base = make_merge_image(context, f1, base, prediction_data_list, difference_data_list, correction_data_list, fade_data_list) # Write the image in the background for the preformance increase background_frame_write = AsyncFrameWrite(new_base, output_file) background_frame_write.start() # Assign variables for next iteration # Ensure the file is loaded for background_frame_load. If we're on the last frame, simply ignore this section # Because the frame_count + 1 does not exist. if not last_frame: while not background_frame_load.load_complete: wait_on_file(upscaled_dir + "output_" + get_lexicon_value(6, x + 1) + ".png") f1 = background_frame_load.loaded_image base = new_base