def _upscale_frames(self): """ Upscale video frames with waifu2x-caffe This function upscales all the frames extracted by ffmpeg using the waifu2x-caffe binary. Arguments: w2 {Waifu2x Object} -- initialized waifu2x object """ # progress bar process exit signal self.progress_bar_exit_signal = False # initialize waifu2x driver if self.waifu2x_driver not in AVAILABLE_DRIVERS: raise UnrecognizedDriverError( f'Unrecognized driver: {self.waifu2x_driver}') # create a container for all upscaler processes upscaler_processes = [] # list all images in the extracted frames frames = [(self.extracted_frames / f) for f in self.extracted_frames.iterdir() if f.is_file] # if we have less images than processes, # create only the processes necessary if len(frames) < self.processes: self.processes = len(frames) # create a directory for each process and append directory # name into a list process_directories = [] for process_id in range(self.processes): process_directory = self.extracted_frames / str(process_id) process_directories.append(process_directory) # delete old directories and create new directories if process_directory.is_dir(): shutil.rmtree(process_directory) process_directory.mkdir(parents=True, exist_ok=True) # waifu2x-converter-cpp will perform multi-threading within its own process if self.waifu2x_driver in ['waifu2x_converter', 'anime4k']: process_directories = [self.extracted_frames] else: # evenly distribute images into each directory # until there is none left in the directory for image in frames: # move image image.rename(process_directories[0] / image.name) # rotate list process_directories = process_directories[ -1:] + process_directories[:-1] # create threads and start them for process_directory in process_directories: # if the driver being used is waifu2x-caffe if self.waifu2x_driver == 'waifu2x_caffe': driver = Waifu2xCaffe(copy.deepcopy(self.driver_settings), self.method, self.model_dir, self.bit_depth) if self.scale_ratio: upscaler_processes.append( driver.upscale(process_directory, self.upscaled_frames, self.scale_ratio, False, False, self.image_format)) else: upscaler_processes.append( driver.upscale(process_directory, self.upscaled_frames, False, self.scale_width, self.scale_height, self.image_format)) # if the driver being used is waifu2x-converter-cpp elif self.waifu2x_driver == 'waifu2x_converter': driver = Waifu2xConverter(self.driver_settings, self.model_dir) upscaler_processes.append( driver.upscale(process_directory, self.upscaled_frames, self.scale_ratio, self.processes, self.image_format)) # if the driver being used is waifu2x-ncnn-vulkan elif self.waifu2x_driver == 'waifu2x_ncnn_vulkan': driver = Waifu2xNcnnVulkan(copy.deepcopy(self.driver_settings)) upscaler_processes.append( driver.upscale(process_directory, self.upscaled_frames, self.scale_ratio)) # if the driver being used is anime4k elif self.waifu2x_driver == 'anime4k': driver = Anime4k(copy.deepcopy(self.driver_settings)) upscaler_processes += driver.upscale(process_directory, self.upscaled_frames, self.scale_ratio, self.processes) # start progress bar in a different thread progress_bar = threading.Thread(target=self._progress_bar, args=(process_directories, )) progress_bar.start() # create the clearer and start it Avalon.debug_info('Starting upscaled image cleaner') image_cleaner = ImageCleaner(self.extracted_frames, self.upscaled_frames, len(upscaler_processes)) image_cleaner.start() # wait for all process to exit try: Avalon.debug_info('Main process waiting for subprocesses to exit') for process in upscaler_processes: Avalon.debug_info( f'Subprocess {process.pid} exited with code {process.wait()}' ) except (KeyboardInterrupt, SystemExit): Avalon.warning('Exit signal received') Avalon.warning('Killing processes') for process in upscaler_processes: process.terminate() # cleanup and exit with exit code 1 Avalon.debug_info('Killing upscaled image cleaner') image_cleaner.stop() self.progress_bar_exit_signal = True sys.exit(1) # if the driver is waifu2x-converter-cpp # images need to be renamed to be recognizable for FFmpeg if self.waifu2x_driver == 'waifu2x_converter': for image in [ f for f in self.upscaled_frames.iterdir() if f.is_file() ]: renamed = re.sub( f'_\\[.*\\]\\[x(\\d+(\\.\\d+)?)\\]\\.{self.image_format}', f'.{self.image_format}', str(image.name)) (self.upscaled_frames / image).rename(self.upscaled_frames / renamed) # upscaling done, kill the clearer Avalon.debug_info('Killing upscaled image cleaner') image_cleaner.stop() # pass exit signal to progress bar thread self.progress_bar_exit_signal = True
def run(self): """Main controller for Video2X This function controls the flow of video conversion and handles all necessary functions. """ # parse arguments for waifu2x # check argument sanity self._check_arguments() # convert paths to absolute paths self.input_video = os.path.abspath(self.input_video) self.output_video = os.path.abspath(self.output_video) # initialize objects for ffmpeg and waifu2x-caffe fm = Ffmpeg(self.ffmpeg_settings, self.image_format) # initialize waifu2x driver if self.waifu2x_driver == 'waifu2x_caffe': w2 = Waifu2xCaffe(self.waifu2x_settings, self.method, self.model_dir) elif self.waifu2x_driver == 'waifu2x_converter': w2 = Waifu2xConverter(self.waifu2x_settings, self.model_dir) else: raise Exception(f'Unrecognized waifu2x driver: {self.waifu2x_driver}') # extract frames from video fm.extract_frames(self.input_video, self.extracted_frames) Avalon.info('Reading video information') video_info = fm.get_video_info(self.input_video) # analyze original video with ffprobe and retrieve framerate # width, height = info['streams'][0]['width'], info['streams'][0]['height'] # find index of video stream video_stream_index = None for stream in video_info['streams']: if stream['codec_type'] == 'video': video_stream_index = stream['index'] break # exit if no video stream found if video_stream_index is None: Avalon.error('Aborting: No video stream found') exit(1) # get average frame rate of video stream framerate = float(Fraction(video_info['streams'][video_stream_index]['avg_frame_rate'])) Avalon.info(f'Framerate: {framerate}') # width/height will be coded width/height x upscale factor if self.scale_ratio: original_width = video_info['streams'][video_stream_index]['width'] original_height = video_info['streams'][video_stream_index]['height'] self.scale_width = int(self.scale_ratio * original_width) self.scale_height = int(self.scale_ratio * original_height) # upscale images one by one using waifu2x Avalon.info('Starting to upscale extracted images') self._upscale_frames(w2) Avalon.info('Upscaling completed') # frames to Video Avalon.info('Converting extracted frames into video') # use user defined output size fm.convert_video(framerate, f'{self.scale_width}x{self.scale_height}', self.upscaled_frames) Avalon.info('Conversion completed') # migrate audio tracks and subtitles Avalon.info('Migrating audio tracks and subtitles to upscaled video') fm.migrate_audio_tracks_subtitles(self.input_video, self.output_video, self.upscaled_frames)
def _upscale_frames(self): """ Upscale video frames with waifu2x-caffe This function upscales all the frames extracted by ffmpeg using the waifu2x-caffe binary. Arguments: w2 {Waifu2x Object} -- initialized waifu2x object """ # progress bar thread exit signal self.progress_bar_exit_signal = False # create a container for exceptions in threads # if this thread is not empty, then an exception has occured self.upscaler_exceptions = [] # initialize waifu2x driver drivers = ['waifu2x_caffe', 'waifu2x_converter', 'waifu2x_ncnn_vulkan'] if self.waifu2x_driver not in drivers: raise Exception(f'Unrecognized waifu2x driver: {self.waifu2x_driver}') # it's easier to do multi-threading with waifu2x_converter # the number of threads can be passed directly to waifu2x_converter if self.waifu2x_driver == 'waifu2x_converter': w2 = Waifu2xConverter(self.waifu2x_settings, self.model_dir) progress_bar = threading.Thread(target=self._progress_bar, args=([self.extracted_frames],)) progress_bar.start() w2.upscale(self.extracted_frames, self.upscaled_frames, self.scale_ratio, self.threads, self.image_format, self.upscaler_exceptions) for image in [f for f in os.listdir(self.upscaled_frames) if os.path.isfile(os.path.join(self.upscaled_frames, f))]: renamed = re.sub(f'_\[.*-.*\]\[x(\d+(\.\d+)?)\]\.{self.image_format}', f'.{self.image_format}', image) shutil.move(os.path.join(self.upscaled_frames, image), os.path.join(self.upscaled_frames, renamed)) self.progress_bar_exit_signal = True progress_bar.join() return else: # create a container for all upscaler threads upscaler_threads = [] # list all images in the extracted frames frames = [os.path.join(self.extracted_frames, f) for f in os.listdir(self.extracted_frames) if os.path.isfile(os.path.join(self.extracted_frames, f))] # if we have less images than threads, # create only the threads necessary if len(frames) < self.threads: self.threads = len(frames) # create a directory for each thread and append directory # name into a list thread_pool = [] thread_directories = [] for thread_id in range(self.threads): thread_directory = os.path.join(self.extracted_frames, str(thread_id)) thread_directories.append(thread_directory) # delete old directories and create new directories if os.path.isdir(thread_directory): shutil.rmtree(thread_directory) os.mkdir(thread_directory) # append directory path into list thread_pool.append((thread_directory, thread_id)) # evenly distribute images into each directory # until there is none left in the directory for image in frames: # move image shutil.move(image, thread_pool[0][0]) # rotate list thread_pool = thread_pool[-1:] + thread_pool[:-1] # create threads and start them for thread_info in thread_pool: # create a separate w2 instance for each thread if self.waifu2x_driver == 'waifu2x_caffe': w2 = Waifu2xCaffe(copy.deepcopy(self.waifu2x_settings), self.method, self.model_dir, self.bit_depth) if self.scale_ratio: thread = threading.Thread(target=w2.upscale, args=(thread_info[0], self.upscaled_frames, self.scale_ratio, False, False, self.image_format, self.upscaler_exceptions)) else: thread = threading.Thread(target=w2.upscale, args=(thread_info[0], self.upscaled_frames, False, self.scale_width, self.scale_height, self.image_format, self.upscaler_exceptions)) # if the driver being used is waifu2x_ncnn_vulkan elif self.waifu2x_driver == 'waifu2x_ncnn_vulkan': w2 = Waifu2xNcnnVulkan(copy.deepcopy(self.waifu2x_settings)) thread = threading.Thread(target=w2.upscale, args=(thread_info[0], self.upscaled_frames, self.scale_ratio, self.upscaler_exceptions)) # create thread thread.name = thread_info[1] # add threads into the pool upscaler_threads.append(thread) # start progress bar in a different thread progress_bar = threading.Thread(target=self._progress_bar, args=(thread_directories,)) progress_bar.start() # create the clearer and start it Avalon.debug_info('Starting upscaled image cleaner') image_cleaner = ImageCleaner(self.extracted_frames, self.upscaled_frames, len(upscaler_threads)) image_cleaner.start() # start all threads for thread in upscaler_threads: thread.start() # wait for threads to finish for thread in upscaler_threads: thread.join() # upscaling done, kill the clearer Avalon.debug_info('Killing upscaled image cleaner') image_cleaner.stop() self.progress_bar_exit_signal = True if len(self.upscaler_exceptions) != 0: raise(self.upscaler_exceptions[0])
def run(self): """Main controller for Video2X This function controls the flow of video conversion and handles all necessary functions. """ # Parse arguments for waifu2x # Check argument sanity self._check_model_type(self.model_type) self._check_arguments() # Convert paths to absolute paths self.input_video = os.path.abspath(self.input_video) self.output_video = os.path.abspath(self.output_video) # Add a forward slash to directory if not present # otherwise there will be a format error if self.ffmpeg_path[-1] != '/' and self.ffmpeg_path[-1] != '\\': self.ffmpeg_path = '{}/'.format(self.ffmpeg_path) # Check if FFMPEG and waifu2x are present if not os.path.isdir(self.ffmpeg_path): raise FileNotFoundError(self.ffmpeg_path) if not os.path.isfile(self.waifu2x_path) and not os.path.isdir(self.waifu2x_path): raise FileNotFoundError(self.waifu2x_path) # Initialize objects for ffmpeg and waifu2x-caffe fm = Ffmpeg(self.ffmpeg_path, self.ffmpeg_arguments) # Initialize waifu2x driver if self.waifu2x_driver == 'waifu2x_caffe': w2 = Waifu2xCaffe(self.waifu2x_path, self.method, self.model_type) elif self.waifu2x_driver == 'waifu2x_converter': w2 = Waifu2xConverter(self.waifu2x_path) else: raise Exception('Unrecognized waifu2x driver: {}'.format(self.waifu2x_driver)) # Extract frames from video fm.extract_frames(self.input_video, self.extracted_frames) Avalon.info('Reading video information') video_info = fm.get_video_info(self.input_video) # Analyze original video with ffprobe and retrieve framerate # width, height = info['streams'][0]['width'], info['streams'][0]['height'] # Find index of video stream video_stream_index = None for stream in video_info['streams']: if stream['codec_type'] == 'video': video_stream_index = stream['index'] break # Exit if no video stream found if video_stream_index is None: Avalon.error('Aborting: No video stream found') exit(1) # Get average frame rate of video stream framerate = float(Fraction(video_info['streams'][video_stream_index]['avg_frame_rate'])) Avalon.info('Framerate: {}'.format(framerate)) # Width/height will be coded width/height x upscale factor if self.ratio: coded_width = video_info['streams'][video_stream_index]['coded_width'] coded_height = video_info['streams'][video_stream_index]['coded_height'] self.output_width = self.ratio * coded_width self.output_height = self.ratio * coded_height # Upscale images one by one using waifu2x Avalon.info('Starting to upscale extracted images') self._upscale_frames(w2) Avalon.info('Upscaling completed') # Frames to Video Avalon.info('Converting extracted frames into video') # Use user defined output size fm.convert_video(framerate, '{}x{}'.format(self.output_width, self.output_height), self.upscaled_frames) Avalon.info('Conversion completed') # Migrate audio tracks and subtitles Avalon.info('Migrating audio tracks and subtitles to upscaled video') fm.migrate_audio_tracks_subtitles(self.input_video, self.output_video, self.upscaled_frames)