def run(self): MiscUtils.configure_worker_logger(self.__log_queue) if self.__initializer: initialization_result = self.__initializer( *self.__initializer_args) else: initialization_result = None self.__logger.debug("Starting task execution loop") while True: next_task = self.__task_queue.get() if next_task is None: self.__logger.debug("Poison pill received") self.__task_queue.task_done() break if (self.__stop_event is None or not self.__stop_event.is_set()): try: args = next_task[0] task_id = next_task[1] result = self.__target(*args, initialization_result, task_id) if (result and self.__result_queue is not None): self.__result_queue.put(result) except: self.__logger.exception( "Uncaught exception while executing target") self.__task_queue.task_done() self.__logger.debug("Exited task execution loop") if self.__terminator: self.__terminator(*self.__terminator_args, initialization_result)
def __queue_poll_thread_target(self, progress_signal): MiscUtils.debug_this_thread() self.__logger.info("Queue poll thread started") log_formatter = MiscUtils.get_default_log_formatter() while not self.__cleanup_started: log_record = self.__log_window_queue.get() if log_record is None: break if self.__window.isVisible(): log_text = log_formatter.format(log_record) progress_signal.emit(log_text)
def copy_exif_to_file(settings: Settings, original_file_path: str, new_file_path: str, media_file: MediaFile): args = [ settings.path_exiftool, "-overwrite_original", "-tagsFromFile", original_file_path, new_file_path ] if ScannedFileType.VIDEO.name == media_file.file_type and media_file.video_rotation: args.insert(4, "-rotation={}".format(media_file.video_rotation)) if ScannedFileType.IMAGE.name == media_file.file_type and media_file.extension in [ "HEIC", "HEIF" ]: args.insert(4, "-x") args.insert(5, "Orientation") MiscUtils.exec_subprocess(args, "EXIF copy failed")
def lookup_already_indexed_files(self, indexDB: IndexDB, scanned_files: List[ScannedFile]): IndexingHelper.__logger.info( "BEGIN:: IndexDB lookup for indexed files") total_scanned_files = len(scanned_files) media_files_by_path = indexDB.get_all_media_files_by_path() for scanned_file_num, scanned_file in enumerate(scanned_files, start=1): if self.__indexing_stop_event.is_set(): break if scanned_file.file_path in media_files_by_path: media_file = media_files_by_path[scanned_file.file_path] scanned_file.already_indexed = True if (scanned_file.creation_time != media_file.creation_time or scanned_file.last_modification_time != media_file.last_modification_time): scanned_file.hash = MiscUtils.generate_hash( scanned_file.file_path) if scanned_file.hash != media_file.original_file_hash: scanned_file.needs_reindex = True IndexingHelper.__logger.info( "Searched Index %s/%s: %s (AlreadyIndexed = %s, NeedsReindex= %s)", scanned_file_num, total_scanned_files, scanned_file.file_path, scanned_file.already_indexed, scanned_file.needs_reindex) IndexingHelper.__logger.info("END:: IndexDB lookup for indexed files")
def __init__(self, qt_threadpool: QtCore.QThreadPool): self.__qt_threadpool = qt_threadpool ui_file = QtCore.QFile( MiscUtils.get_abs_resource_path(LogWindow.__UI_FILE)) ui_file.open(QtCore.QFile.ReadOnly) loader = QtUiTools.QUiLoader() self.__window: QtWidgets.QMainWindow = loader.load(ui_file) ui_file.close() self.__cleanup_started = False self.__window.setWindowTitle("View Logs") self.__log_window_queue = Queue() self.__log_window_queue_handler = QueueHandler(self.__log_window_queue) self.__log_window_queue_handler.setLevel(logging.INFO) logging.getLogger().addHandler(self.__log_window_queue_handler) self.qt_worker = QWorker(self.__queue_poll_thread_target) self.qt_worker.signals.progress.connect( self.__queue_poll_thread_progress) self.__qt_threadpool.start(self.qt_worker) self.__txt_log_display: QtWidgets.QPlainTextEdit = self.__window.findChild( QtWidgets.QPlainTextEdit, 'txt_log_display') self.__btn_clear: QtWidgets.QPushButton = self.__window.findChild( QtWidgets.QPushButton, 'btn_clear') self.__btn_log_dir: QtWidgets.QPushButton = self.__window.findChild( QtWidgets.QPushButton, 'btn_log_dir') self.__txt_log_display.setMaximumBlockCount(self.__LINES_TO_DISPLAY) self.__btn_clear.clicked.connect(self.__btn_clear_clicked) self.__btn_log_dir.clicked.connect(self.__btn_log_dir_clicked)
def convert_image_file(settings: Settings, media_file: MediaFile, original_file_path: str, save_file_path: str): # Sample: magick convert -resize 320x480 -quality 75 inputFile.cr2 outputfile.jpg args = [ settings.path_magick, "convert", "-quality", str(settings.image_compression_quality), "{}[0]".format(original_file_path), save_file_path ] new_dimentions = MediaProcessor.get_new_dimentions( media_file.height, media_file.width, settings.image_max_dimension) if new_dimentions: args.insert(2, "-resize") args.insert( 3, "{}x{}".format(new_dimentions['height'], new_dimentions['width'])) MiscUtils.exec_subprocess(args, "Image conversion failed")
def __init__(self): # For in-memory, use: 'sqlite:///:memory:' db_file = 'sqlite:///' + os.path.join(MiscUtils.get_app_data_dir(), "index.db") self.__engine = create_engine(db_file, echo=False) DB_BASE.metadata.create_all(self.__engine) self.__session: Session = sessionmaker(bind=self.__engine)() IndexDB.__logger.info("Connected to IndexDB")
def convert_video_file(settings: Settings, media_file: MediaFile, original_file_path: str, new_file_path: str, target_gpu: int): new_dimentions = MediaProcessor.get_new_dimentions( media_file.height, media_file.width, settings.video_max_dimension) audio_bitrate_arg = str(settings.video_audio_bitrate) + "k" # Adding ability to play converted videos in QuickTime: https://brandur.org/fragments/ffmpeg-h265 if target_gpu < 0: # CPU Sample: ffmpeg -noautorotate -i input -c:v libx265 -crf 28 -tag:v hvc1 -c:a aac -ac 2 -vf scale=320:240 -b:a 128k -y output.mp4 args = [ settings.path_ffmpeg, "-noautorotate", "-i", original_file_path, "-c:v", "libx265", "-crf", str(settings.video_crf), "-tag:v", "hvc1", "-c:a", "aac", "-ac", "2", "-b:a", audio_bitrate_arg, "-y", new_file_path ] if new_dimentions: args.insert(14, "-vf") args.insert( 15, "scale={}:{}".format(new_dimentions['width'], new_dimentions['height'])) else: # GPU Sample: ffmpeg -noautorotate -vsync 0 -hwaccel cuda -hwaccel_device 0 -hwaccel_output_format cuda -i input -c:v hevc_nvenc -preset medium -rc vbr -cq 38 -gpu 0 -c:a aac -ac 2 -b:a 128k -tag:v hvc1 -vf scale_cuda=2560:1440 -y output.mp4 args = [ settings.path_ffmpeg, "-noautorotate", "-vsync", "0", "-hwaccel", "cuda", "-hwaccel_device", str(target_gpu), "-hwaccel_output_format", "cuda", "-i", original_file_path, "-c:v", "hevc_nvenc", "-preset", settings.video_nvenc_preset, "-rc", "vbr", "-cq", str(settings.video_crf), "-gpu", str(target_gpu), "-c:a", "aac", "-ac", "2", "-b:a", audio_bitrate_arg, "-tag:v", "hvc1", "-y", new_file_path ] if new_dimentions: args.insert(30, "-vf") args.insert( 31, "scale_cuda={}:{}".format(new_dimentions['width'], new_dimentions['height'])) MiscUtils.exec_subprocess(args, "Video conversion failed")
def create_media_file(path_exiftool: str, index_time: datetime, scanned_file: ScannedFile, existing_media_file: MediaFile) -> MediaFile: file_path = scanned_file.file_path exif = ExifHelper.__get_exif_dict(path_exiftool, file_path) error_str = ExifHelper.__get_exif(exif, "Error") exif_file_type_str = ExifHelper.__get_exif(exif, "FileType") if error_str: ExifHelper.__logger.error("Error processing file. EXIF: %s", exif) if error_str == 'File is empty' or error_str == 'File format error' or 'file is binary' in error_str: return None if exif_file_type_str == "TXT": ExifHelper.__logger.error("Possibly corrupt file. EXIF: %s", exif) return None media_file = existing_media_file if existing_media_file else MediaFile( ) media_file.parent_dir_path = scanned_file.parent_dir_path media_file.file_path = file_path media_file.extension = scanned_file.extension media_file.file_type = scanned_file.file_type.name media_file.is_raw = scanned_file.is_raw media_file.mime = ExifHelper.__get_mime(file_path, exif) media_file.original_size = os.path.getsize(file_path) media_file.creation_time = scanned_file.creation_time media_file.last_modification_time = scanned_file.last_modification_time media_file.original_file_hash = scanned_file.hash if ( scanned_file.hash is not None) else MiscUtils.generate_hash(file_path) media_file.converted_file_hash = None media_file.conversion_settings_hash = None media_file.index_time = index_time ExifHelper.__append_dimentions(media_file, exif) media_file.capture_date = ExifHelper.__get_capture_date( scanned_file, exif) media_file.camera_make = ExifHelper.__get_exif(exif, "Make") media_file.camera_model = ExifHelper.__get_exif( exif, "CameraModelName", "Model") media_file.lens_model = ExifHelper.__get_exif(exif, "LensModel", "LensType", "LensInfo") gps_info = ExifHelper.__get_gps_info(exif) media_file.gps_alt = gps_info.get('altitude') media_file.gps_lat = gps_info.get('latitude') media_file.gps_long = gps_info.get('longitude') exif_orientation = ExifHelper.__get_exif(exif, "Orientation", "CameraOrientation") media_file.view_rotation = ExifHelper.__get_view_rotation( exif_orientation) media_file.image_orientation = exif_orientation media_file.video_duration = ExifHelper.__get_video_duration(exif) ExifHelper.__append_video_rotation(media_file, exif) return media_file
def start_deletion(self, clearIndex: bool): MiscUtils.debug_this_thread() with IndexDB() as indexDB: if clearIndex: indexDB.clear_indexed_files() self.__logger.info("Index cleared") settings: Settings = indexDB.get_settings() MiscUtils.recursively_delete_children(settings.output_dir) MiscUtils.recursively_delete_children(settings.unknown_output_dir) self.__logger.info("Output directories cleared")
def __run_exiftool_command_line(cmd): """Handle the command line call keyword arguments: cmd = a list return 0 if error or a string for the command line output """ try: output = subprocess.Popen(cmd, **MiscUtils.subprocess_args()) output = output.stdout.read() return output.strip() except subprocess.CalledProcessError: return 0
def __init__(self, log_queue: Queue): super().__init__(QtGui.QIcon(MiscUtils.get_app_icon_path())) self.log_queue = log_queue self.preferences_window: PreferencesWindow = None self.log_window: LogWindow = None self.indexing_stop_event: Event = None self.observer = None self.indexDB = IndexDB() self.threadpool: QtCore.QThreadPool = QtCore.QThreadPool() self.__logger.debug("QT multithreading with thread pool size: %s", self.threadpool.maxThreadCount()) self.setToolTip("Batch Media Compressor") self.activated.connect(self.trayIcon_activated) tray_menu = QtWidgets.QMenu('Main Menu') self.startIndexAction = tray_menu.addAction( 'Start Processing', self.startIndexAction_triggered) self.stopIndexAction = tray_menu.addAction( 'Stop Processing', self.stopIndexAction_triggered) self.stopIndexAction.setEnabled(False) tray_menu.addSeparator() self.clearIndexAction = tray_menu.addAction( 'Clear Indexed Files', self.clearIndexAction_triggered) self.clearOutputDirsAction = tray_menu.addAction( 'Clear Ouput Directories', self.clearOutputDirsAction_triggered) tray_menu.addSeparator() self.editPrefAction = tray_menu.addAction( 'Edit Preferences', self.editPreferencesAction_triggered) self.viewLogsAction = tray_menu.addAction( 'View Logs', self.viewLogsAction_triggered) tray_menu.addSeparator() self.updateCheckAction = tray_menu.addAction( 'Check for Updates', self.updateCheckAction_triggered) self.coffeeAction = tray_menu.addAction('Buy me a Coffee', self.coffeeAction_triggered) tray_menu.addSeparator() tray_menu.addAction('Quit', self.quitMenuAction_triggered) self.setContextMenu(tray_menu) self.apply_process_changed_setting() if self.indexDB.get_settings().auto_update_check: self.update_check_worker = QWorker(self.auto_update_check) self.threadpool.start(self.update_check_worker)
def start_indexing(self): MiscUtils.debug_this_thread() with IndexDB() as indexDB: indexing_task = IndexingTask() indexing_task.settings = indexDB.get_settings() if self.settings_valid(indexing_task.settings): misc_utils = MiscUtils(indexing_task) misc_utils.create_root_marker() indexing_helper = IndexingHelper(indexing_task, self.log_queue, self.indexing_stop_event) (scanned_files, _) = indexing_helper.scan_dirs() indexing_helper.remove_slate_files(indexDB, scanned_files) indexing_helper.lookup_already_indexed_files( indexDB, scanned_files) if not self.indexing_stop_event.is_set(): indexing_helper.create_media_files(scanned_files) if not self.indexing_stop_event.is_set(): media_processor = MediaProcessor(indexing_task, self.log_queue, self.indexing_stop_event) media_processor.save_processed_files(indexDB) if not self.indexing_stop_event.is_set(): misc_utils.cleanEmptyOutputDirs()
def get_settings(self): settings_path = MiscUtils.get_settings_path() settings: Settings = None if os.path.exists(settings_path) and os.path.isfile(settings_path): with open(settings_path) as file: try: settings_dict = json.load(file) settings = Settings() for key in settings_dict: if key in settings.__dict__: # Don't keep stale keys settings.__dict__[key] = settings_dict[key] except: logging.exception( "Failed to load settings from JSON file. Restoring defaults." ) if settings is None: settings = Settings() self.save_settings(settings) return settings
def auto_update_check(self): MiscUtils.debug_this_thread() self.check_for_updates(False)
def save_settings(self, settings: Settings): settings_path = MiscUtils.get_settings_path() with open(settings_path, 'w') as file: data = settings.__dict__ json.dump(data, file, sort_keys=True, indent=4)
def wait_and_get_results(self): self.__task_queue.join() for process in self.__processes: process.terminate() self.__logger.info("PyProcessPool tasks completed") return MiscUtils.get_all_from_queue(self.__result_queue)
def __btn_log_dir_clicked(self): path = os.path.realpath(MiscUtils.get_log_dir_path()) webbrowser.open("file:///" + path)
def conversion_process_exec(media_file_path: str, target_gpu: int, save_file_path_computation_lock: Lock, indexDB: IndexDB, task_id: str): settings: Settings = indexDB.get_settings() media_file: MediaFile = indexDB.get_by_file_path(media_file_path) conversion_settings_hash: str = settings.generate_image_settings_hash( ) if (ScannedFileType.IMAGE.name == media_file.file_type ) else settings.generate_video_settings_hash() processing_start_time = time.time() original_file_path = media_file.file_path save_file_path = "UNKNOWN" try: with save_file_path_computation_lock: save_file_path = MediaProcessor.get_save_file_path( indexDB, media_file, settings) skip_conversion: bool = False if (not media_file.capture_date and not settings.convert_unknown ): # No captureDate and conversion not requested for unknown if os.path.exists(save_file_path): os.remove(save_file_path) logging.info( "Deleted Previously Converted File %s: %s -> %s", task_id, original_file_path, save_file_path) skip_conversion = True else: os.makedirs(os.path.dirname(save_file_path), exist_ok=True) if (not settings.overwrite_output_files and os.path.exists( save_file_path ) and media_file.converted_file_hash == MiscUtils.generate_hash( save_file_path ) # Converted file hash is None if the original file is re-indexed and media_file.conversion_settings_hash == conversion_settings_hash ): # Settings hash is None if the original file is re-indexed skip_conversion = True if not skip_conversion: if ScannedFileType.IMAGE.name == media_file.file_type: MediaProcessor.convert_image_file(settings, media_file, original_file_path, save_file_path) if ScannedFileType.VIDEO.name == media_file.file_type: MediaProcessor.convert_video_file(settings, media_file, original_file_path, save_file_path, target_gpu) MediaProcessor.copy_exif_to_file(settings, original_file_path, save_file_path, media_file) media_file.converted_file_hash = MiscUtils.generate_hash( save_file_path) media_file.conversion_settings_hash = conversion_settings_hash with save_file_path_computation_lock: indexDB.insert_media_file(media_file) logging.info( "Converted %s: %s -> %s (%s%%) (%ss)", task_id, original_file_path, save_file_path, round( os.path.getsize(save_file_path) / media_file.original_size * 100, 2), round(time.time() - processing_start_time, 2)) else: logging.info("Skipped Conversion %s: %s -> %s", task_id, original_file_path, save_file_path) except: try: if os.path.exists(save_file_path): os.remove( save_file_path) # Delete corrupt / invalid output file except: pass logging.exception("Failed Processing %s: %s -> %s (%ss)", task_id, original_file_path, save_file_path, round(time.time() - processing_start_time, 2))
import logging import multiprocessing import sys import threading from multiprocessing import Manager, freeze_support from PySide2 import QtCore, QtGui, QtWidgets from zc.lockfile import LockError, LockFile from pie import TrayIcon from pie.util import MiscUtils if __name__ == "__main__": if MiscUtils.running_in_pyinstaller_bundle(): freeze_support() multiprocessing.set_start_method('spawn') QtCore.QCoreApplication.setAttribute(QtCore.Qt.AA_ShareOpenGLContexts) QtGui.QGuiApplication.setAttribute(QtCore.Qt.AA_EnableHighDpiScaling) MiscUtils.configure_logging() log_queue = Manager().Queue() logger_thread = threading.Thread(target=MiscUtils.logger_thread_exec, args=(log_queue, )) logger_thread.start() app = QtWidgets.QApplication(sys.argv) app.setWindowIcon(QtGui.QIcon(MiscUtils.get_app_icon_path())) app.setApplicationDisplayName( "Batch Media Compressor") # TODO test + add org / ver app.setQuitOnLastWindowClosed(False)